repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
aleksandar-mitrevski/robot_simulation
fault_injector/fault_injector_node.py
1
6522
#!/usr/bin/env python import sys, tty, termios import rospy from fault_injector.msg import InjectFault class Commands(object): InjectFault = 1 RepairSensor = 2 RepairAllSensors = 3 Quit = 4 Unknown = 5 class FaultTypes(object): Permanent = 1 Transient = 2 Unknown = 3 class FaultInjectorNode(object): '''Defines a node that sends commands for injecting sensor faults. The current version allows injecting both permanent and transient faults. Author -- Aleksandar Mitrevski ''' def __init__(self): #stores names of sensor frames representing sensors with injected faults self.faulty_sensor_frames = list() #a list of sensor frames that will be 'repaired', i.e. faults #will stop being injected to the respective sensors self.sensor_frames_to_remove = list() self.fault_message_publisher = rospy.Publisher('inject_fault', InjectFault, queue_size=10) shutdown = False self.print_instructions() while not shutdown: if len(self.sensor_frames_to_remove) > 0: self.repair_transient_faults() character = self.read_character() command = self.read_command(character) if command == Commands.InjectFault: print 'Press:\np for injecting a permanent fault\nt for injecting a transient fault\n' character = self.read_character() fault_type = self.read_fault_type(character) self.manage_sensor(command, fault_type) elif command == Commands.RepairSensor: self.manage_sensor(command) elif command == Commands.RepairAllSensors: self.repair_all_sensors() elif command == Commands.Quit: self.repair_all_sensors() rospy.sleep(0.5) shutdown = True print 'Faulty sensors: ', self.faulty_sensor_frames rospy.sleep(0.5) #~ def inject_fault(self, request): #~ if request.frame_id in self.faulty_sensor_frames: #~ return InjectFaultResponse(True) #~ return InjectFaultResponse(False) def print_instructions(self): print 'Use the following keys:\ni for injecting a fault\nr for "repairing" the sensor (removing the fault)\na for repairing all sensors\nq to quit\n' def read_character(self): '''Code used from http://stackoverflow.com/questions/510357/python-read-a-single-character-from-the-user ''' fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) try: tty.setraw(sys.stdin.fileno()) ch = sys.stdin.read(1) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) return ch def read_command(self, character): '''Returns an appropriate 'Commands' value based on the value of the input character. ''' if character == 'i': return Commands.InjectFault elif character == 'r': return Commands.RepairSensor elif character == 'a': return Commands.RepairAllSensors elif character == 'q': return Commands.Quit print 'unknown command ', character return Commands.Unknown def read_fault_type(self, character): '''Returns an appropriate 'FaultTypes' based on the value of the input character. ''' if character == 'p': return FaultTypes.Permanent elif character == 't': return FaultTypes.Transient print 'unknown fault type; injecting permanent fault' return FaultTypes.Permanent def manage_sensor(self, command, fault_type=None): '''Publishes 'InjectFault' messages for injecting faults to sensors or repairing sensor faults based on 'command' and 'fault_type'. The sensor that should be repeared/to which a fault will be injected is provided by the user from the command line. Keyword arguments: command -- A 'Commands' value. fault_type -- A 'FaultTypes' value (default None, which means that we are repairing sensors). ''' sensor_frame = raw_input('Please enter the name of a sensor frame\n') if command == Commands.InjectFault: if sensor_frame not in self.faulty_sensor_frames: fault_msg = InjectFault() fault_msg.frame_id = sensor_frame fault_msg.inject_fault = True self.fault_message_publisher.publish(fault_msg) self.faulty_sensor_frames.append(sensor_frame) if fault_type == FaultTypes.Transient: self.sensor_frames_to_remove.append(sensor_frame) else: print 'Faults are already being injected to this sensor' if command == Commands.RepairSensor: if sensor_frame in self.faulty_sensor_frames: fault_msg = InjectFault() fault_msg.frame_id = sensor_frame fault_msg.inject_fault = False self.fault_message_publisher.publish(fault_msg) self.faulty_sensor_frames.remove(sensor_frame) else: print 'Faults have not been injected to this sensor; ignoring command' def repair_transient_faults(self): '''Repairs all sensors that are currently in 'self.sensor_frames_to_remove' by publishing an appropriate 'InjectFault' message. ''' for sensor_frame in self.sensor_frames_to_remove: fault_msg = InjectFault() fault_msg.frame_id = sensor_frame fault_msg.inject_fault = False self.fault_message_publisher.publish(fault_msg) self.faulty_sensor_frames.remove(sensor_frame) self.sensor_frames_to_remove[:] = [] print 'Faulty sensors: ', self.faulty_sensor_frames def repair_all_sensors(self): '''Repairs all sensors by sending appropriate 'InjectFault' commands. ''' for sensor_frame in self.faulty_sensor_frames: fault_msg = InjectFault() fault_msg.frame_id = sensor_frame fault_msg.inject_fault = False self.fault_message_publisher.publish(fault_msg) self.faulty_sensor_frames[:] = [] if __name__ == '__main__': rospy.init_node('fault_injector') try: FaultInjectorNode() except rospy.ROSInterruptException: pass
mit
3,365,828,375,489,635,000
37.140351
157
0.616069
false
4.221359
false
false
false
infobip/infobip-api-python-client
infobip/api/model/omni/logs/OmniLog.py
1
5654
# -*- coding: utf-8 -*- """This is a generated class and is not intended for modification! """ from datetime import datetime from infobip.util.models import DefaultObject, serializable from infobip.api.model.omni.Price import Price from infobip.api.model.omni.Status import Status from infobip.api.model.omni.OmniChannel import OmniChannel class OmniLog(DefaultObject): @property @serializable(name="bulkId", type=unicode) def bulk_id(self): """ Property is of type: unicode """ return self.get_field_value("bulk_id") @bulk_id.setter def bulk_id(self, bulk_id): """ Property is of type: unicode """ self.set_field_value("bulk_id", bulk_id) def set_bulk_id(self, bulk_id): self.bulk_id = bulk_id return self @property @serializable(name="messageId", type=unicode) def message_id(self): """ Property is of type: unicode """ return self.get_field_value("message_id") @message_id.setter def message_id(self, message_id): """ Property is of type: unicode """ self.set_field_value("message_id", message_id) def set_message_id(self, message_id): self.message_id = message_id return self @property @serializable(name="to", type=unicode) def to(self): """ Property is of type: unicode """ return self.get_field_value("to") @to.setter def to(self, to): """ Property is of type: unicode """ self.set_field_value("to", to) def set_to(self, to): self.to = to return self @property @serializable(name="from", type=unicode) def from_(self): """ Property is of type: unicode """ return self.get_field_value("from_") @from_.setter def from_(self, from_): """ Property is of type: unicode """ self.set_field_value("from_", from_) def set_from_(self, from_): self.from_ = from_ return self @property @serializable(name="text", type=unicode) def text(self): """ Property is of type: unicode """ return self.get_field_value("text") @text.setter def text(self, text): """ Property is of type: unicode """ self.set_field_value("text", text) def set_text(self, text): self.text = text return self @property @serializable(name="sentAt", type=datetime) def sent_at(self): """ Property is of type: datetime """ return self.get_field_value("sent_at") @sent_at.setter def sent_at(self, sent_at): """ Property is of type: datetime """ self.set_field_value("sent_at", sent_at) def set_sent_at(self, sent_at): self.sent_at = sent_at return self @property @serializable(name="doneAt", type=datetime) def done_at(self): """ Property is of type: datetime """ return self.get_field_value("done_at") @done_at.setter def done_at(self, done_at): """ Property is of type: datetime """ self.set_field_value("done_at", done_at) def set_done_at(self, done_at): self.done_at = done_at return self @property @serializable(name="messageCount", type=int) def message_count(self): """ Property is of type: int """ return self.get_field_value("message_count") @message_count.setter def message_count(self, message_count): """ Property is of type: int """ self.set_field_value("message_count", message_count) def set_message_count(self, message_count): self.message_count = message_count return self @property @serializable(name="mccMnc", type=unicode) def mcc_mnc(self): """ Property is of type: unicode """ return self.get_field_value("mcc_mnc") @mcc_mnc.setter def mcc_mnc(self, mcc_mnc): """ Property is of type: unicode """ self.set_field_value("mcc_mnc", mcc_mnc) def set_mcc_mnc(self, mcc_mnc): self.mcc_mnc = mcc_mnc return self @property @serializable(name="price", type=Price) def price(self): """ Property is of type: Price """ return self.get_field_value("price") @price.setter def price(self, price): """ Property is of type: Price """ self.set_field_value("price", price) def set_price(self, price): self.price = price return self @property @serializable(name="status", type=Status) def status(self): """ Property is of type: Status """ return self.get_field_value("status") @status.setter def status(self, status): """ Property is of type: Status """ self.set_field_value("status", status) def set_status(self, status): self.status = status return self @property @serializable(name="channel", type=OmniChannel) def channel(self): """ Property is of type: OmniChannel """ return self.get_field_value("channel") @channel.setter def channel(self, channel): """ Property is of type: OmniChannel """ self.set_field_value("channel", channel) def set_channel(self, channel): self.channel = channel return self
apache-2.0
-4,938,123,351,956,362,000
22.5625
66
0.556951
false
3.799731
false
false
false
Amarchuk/2FInstability
core/n1167.py
1
24998
__author__ = 'amarch' # -*- coding: utf-8 -*- import shutil from core.main import * def correctGasData(r_g1, v_g1, dv_g1): '''Функция, куда убраны все операции подгонки с данными по газу.''' r_g = r_g1 v_g = v_g1 dv_g = dv_g1 #Если необходимо выпрямить апроксимацию на краю - можно добавить несколько последних точек, #это должно помочь сгладить. Или обрезать по upperBord. # upperBord = 200 # r_g, v_g, dv_g = zip(*(filter(lambda x: x[0] < upperBord, zip(r_g, v_g, dv_g)))) # r_g = list(r_g) # v_g = list(v_g) # dv_g = list(dv_g) # multiplate = 5 # addition_points = 2 # r_points = heapq.nlargest(addition_points, r_g) # v_points = [] # dv_points = [] # for po in r_points: # v_points.append(v_g[r_g.index(po)]) # dv_points.append(dv_g[r_g.index(po)]) # r_g = r_g + [i[0] + scale * i[1] for i in zip(r_points * multiplate, range(1, multiplate * addition_points + 1))] # v_g = v_g + v_points * multiplate # dv_g = dv_g + dv_points * multiplate # correction = lstqBend(r_g, v_g) correction = 4952/math.sin(36*math.pi/180) v_g = map(lambda x: abs(x - correction), v_g) r_g, v_g, dv_g = map(list, zip(*sorted(zip(r_g, v_g, dv_g)))) # add_points = 5 # r_points = [32] # v_points = [285] # dv_points = [1] # # r_g = r_g + [i[0] + scale * i[1] for i in zip(r_points * add_points, range(1, add_points + 1))] # v_g = v_g + v_points * add_points # dv_g = dv_g + dv_points * add_points # # add_points = 54 # r_points = [46] # v_points = [268] # dv_points = [1] # # r_g = r_g + [i[0] + scale * i[1] for i in zip(r_points * add_points, range(1, add_points + 1))] # v_g = v_g + v_points * add_points # dv_g = dv_g + dv_points * add_points return r_g, v_g, dv_g def correctStarData(r_ma1, v_ma1, dv_ma1): '''Корректировка данных по звездам.''' r_ma = r_ma1 v_ma = v_ma1 dv_ma = dv_ma1 #Если необходимо выпрямить апроксимацию на краю - можно добавить несколько последних точек, #это должно помочь сгладить. Или обрезать по upperBord. # upperBord = 3000 # r_ma, v_ma = zip(*(filter(lambda x: x[0] < upperBord, zip(r_ma, v_ma)))) # r_ma = list(r_ma) # v_ma = list(v_ma) # # multiplate = 5 # addition_points = 3 # r_points = heapq.nlargest(addition_points, r_ma) # v_points = [] # dv_points = [] # for po in r_points: # v_points.append(v_ma[r_ma.index(po)]) # dv_points.append(dv_ma[r_ma.index(po)]) # r_ma = r_ma + [i[0] + scale * i[1] for i in zip(r_points * multiplate, range(1, multiplate * addition_points + 1))] # v_ma = v_ma + v_points * multiplate # dv_ma = dv_ma + dv_points * multiplate add_points = 50 r_points = [36] v_points = [340] dv_points = [2] r_ma = r_ma + [i[0] + scale * i[1] for i in zip(r_points * add_points, range(1, add_points + 1))] v_ma = v_ma + v_points * add_points dv_ma = dv_ma + dv_points * add_points return r_ma, v_ma, dv_ma def correctSigmaLosMaj(r_ma1, sig_los_ma1, dsig_los_ma1): '''Корректируем данные по дисперсии скоростей вдоль главной оси. ''' # Если не сошлось - надо исправить начальное приближение гауссианы ниже: x0 = array([0, 100, 5, 100]) # на случай если данные из разных источников в одном файле r_ma, sig_los_ma, dsig_los_ma = map(list, zip(*sorted(zip(r_ma1, sig_los_ma1, dsig_los_ma1)))) # Можно обрезать в случае плохих краев # r_ma = r_ma[1:-1] # sig_los_ma = sig_los_ma[1:-1] # dsig_los_ma = dsig_los_ma[1:-1] # #Если необходимо выпрямить апроксимацию на краю - можно добавить несколько последних точек, # #это должно помочь сгладить. # # multiplate = 10 # addition_points = 1 # r_points = heapq.nlargest(addition_points, r_ma) # sig_points = [] # dsig_points = [] # for po in r_points: # sig_points.append(sig_los_ma[r_ma.index(po)]) # dsig_points.append(dsig_los_ma[r_ma.index(po)]) # r_ma = r_ma + [i[0] + scale * i[1] for i in # zip(r_points * multiplate, arange(1, 3 * (multiplate * addition_points) + 1, 3))] # sig_los_ma = sig_los_ma + sig_points * multiplate # dsig_los_ma = dsig_los_ma + dsig_points * multiplate add_points = 90 r_points = [7] v_points = [238] dv_points = [1] # Экспоненциальные точки r_ma = r_ma + [i[0] + i[1] for i in zip(r_points * add_points, arange(1, add_points + 1, 1))] sig_los_ma = sig_los_ma + [220 * math.exp(-x / 43.0) for x in [i[0] + i[1] for i in zip(r_points * add_points, arange(1, add_points + 1, 1))]] dsig_los_ma = dsig_los_ma + dv_points * add_points return r_ma, sig_los_ma, dsig_los_ma, x0 def correctSigmaLosMin(r_ma1, sig_los_ma1, dsig_los_ma1): '''Корректируем данные по дисперсии скоростей вдоль главной оси. ''' r_ma, sig_los_ma, dsig_los_ma = map(list, zip(*sorted(zip(r_ma1, sig_los_ma1, dsig_los_ma1)))) # Можно обрезать в случае плохих краев # r_ma = r_ma[1:-1] # sig_los_ma = sig_los_ma[1:-1] # dsig_los_ma = dsig_los_ma[1:-1] # Если не сошлось - надо исправить начальное приближение гауссианы ниже: x0 = array([0, 10, 5, 10]) #Если необходимо выпрямить апроксимацию на краю - можно добавить несколько последних точек, #это должно помочь сгладить. # multiplate = 10 # addition_points = 1 # r_points = heapq.nlargest(addition_points, r_ma) # sig_points = [] # dsig_points = [] # for po in r_points: # sig_points.append(sig_los_ma[r_ma.index(po)]) # dsig_points.append(dsig_los_ma[r_ma.index(po)]) # r_ma = r_ma + [i[0] + scale * i[1] for i in # zip(r_points * multiplate, arange(1, 5 * (multiplate * addition_points) + 1, 5))] # sig_los_ma = sig_los_ma + sig_points * multiplate # dsig_los_ma = dsig_los_ma + dsig_points * multiplate return r_ma, sig_los_ma, dsig_los_ma, x0 startTime = time.time() if __name__ == "__main__": plt.rcParams.update({'font.size': 16}) path = '/home/amarch/Documents/RotationCurves/Diploma/TwoFluidInstAllDataFromSotn17Feb/Sample/RC/U2487_N1167' name = 'U2487_N1167' incl = 36 scale = 1 resolution = 330 #pc/arcsec h_disc = 24.2 # R-band M_R = 11.69 M_B = 13.40 mu0_c_R = 20.12 r_eff_bulge = 6.7 pol_degree_star = 15 pol_degree_gas = 8 sig_pol_deg = 10 sig_pol_deg_mi = 8 Rmin = 29 Rmax = 75 gas_corr_by_incl = False M_to_L = mass_to_light(M_B - M_R) di = 2 monte_carlo_realizations = 1 peculiarities = [59,61] maxDisc = 4 sig_wings = r_eff_bulge # откуда крылья для дисперсий фитировать use_minor = False # используется ли дисперсия по малой оси if not os.path.exists(path+'/EQUAL_BELL/'): os.makedirs(path+'/EQUAL_BELL/') else: for f in os.listdir(path+'/EQUAL_BELL/'): os.remove(path+'/EQUAL_BELL/'+f) shutil.copy2(path+'/v_stars_ma.dat', path+'/EQUAL_BELL/v_stars_ma.dat') shutil.copy2(path+'/v_gas_ma.dat', path+'/EQUAL_BELL/v_gas_ma.dat') shutil.copy2(path+'/gas_density.dat', path+'/EQUAL_BELL/gas_density.dat') if os.path.exists(path+'/v_stars_mi.dat'): shutil.copy2(path+'/v_stars_mi.dat', path+'/EQUAL_BELL/v_stars_mi.dat') #EQUAL и Белл mainf(PATH=path+'/EQUAL_BELL', NAME=name, INCL=incl, SCALE=scale, RESOLUTION=resolution, H_DISC=h_disc, MR=M_R, MB=M_B, MU0=mu0_c_R, R_EFF_B=r_eff_bulge, DEG_STAR=pol_degree_star, DEG_GAS=pol_degree_gas, SIG_MA_DEG=sig_pol_deg, SIG_MI_DEG=sig_pol_deg_mi, RMIN=Rmin, RMAX=Rmax, GAS_CORR=gas_corr_by_incl, M_TO_L=M_to_L, DI=di, MONTE_CARLO=monte_carlo_realizations, CORRECTION_GAS=correctGasData, CORRECTION_STAR=correctStarData, CORRECTION_SIG_MA=correctSigmaLosMaj, CORRECTION_SIG_MI=correctSigmaLosMin, SURF_DENS_STAR=surfaceDensityStarR, METHOD='EQUAL', PECULIARITIES=peculiarities, SIG_WINGS = sig_wings, USE_MINOR = use_minor, RUN=1) renameFilesByMethod(path+'/EQUAL_BELL/', 'EQUAL_BELL') if not os.path.exists(path+'/HALF_MAX/'): os.makedirs(path+'/HALF_MAX/') else: for f in os.listdir(path+'/HALF_MAX/'): os.remove(path+'/HALF_MAX/'+f) shutil.copy2(path+'/v_stars_ma.dat', path+'/HALF_MAX/v_stars_ma.dat') shutil.copy2(path+'/v_gas_ma.dat', path+'/HALF_MAX/v_gas_ma.dat') shutil.copy2(path+'/gas_density.dat', path+'/HALF_MAX/gas_density.dat') if os.path.exists(path+'/v_stars_mi.dat'): shutil.copy2(path+'/v_stars_mi.dat', path+'/HALF_MAX/v_stars_mi.dat') #HALF и Макс. диск mainf(PATH=path+'/HALF_MAX', NAME=name, INCL=incl, SCALE=scale, RESOLUTION=resolution, H_DISC=h_disc, MR=M_R, MB=M_B, MU0=mu0_c_R, R_EFF_B=r_eff_bulge, DEG_STAR=pol_degree_star, DEG_GAS=pol_degree_gas, SIG_MA_DEG=sig_pol_deg, SIG_MI_DEG=sig_pol_deg_mi, RMIN=Rmin, RMAX=Rmax, GAS_CORR=gas_corr_by_incl, M_TO_L=maxDisc, DI=di, MONTE_CARLO=monte_carlo_realizations, CORRECTION_GAS=correctGasData, CORRECTION_STAR=correctStarData, CORRECTION_SIG_MA=correctSigmaLosMaj, CORRECTION_SIG_MI=correctSigmaLosMin, SURF_DENS_STAR=surfaceDensityStarR, METHOD='HALF', PECULIARITIES=peculiarities, SIG_WINGS = sig_wings, USE_MINOR = use_minor, RUN=2) renameFilesByMethod(path+'/HALF_MAX/', 'HALF_MAX') if not os.path.exists(path+'/HALF_BELL/'): os.makedirs(path+'/HALF_BELL/') else: for f in os.listdir(path+'/HALF_BELL/'): os.remove(path+'/HALF_BELL/'+f) shutil.copy2(path+'/v_stars_ma.dat', path+'/HALF_BELL/v_stars_ma.dat') shutil.copy2(path+'/v_gas_ma.dat', path+'/HALF_BELL/v_gas_ma.dat') shutil.copy2(path+'/gas_density.dat', path+'/HALF_BELL/gas_density.dat') if os.path.exists(path+'/v_stars_mi.dat'): shutil.copy2(path+'/v_stars_mi.dat', path+'/HALF_BELL/v_stars_mi.dat') #HALF и Белл mainf(PATH=path+'/HALF_BELL', NAME=name, INCL=incl, SCALE=scale, RESOLUTION=resolution, H_DISC=h_disc, MR=M_R, MB=M_B, MU0=mu0_c_R, R_EFF_B=r_eff_bulge, DEG_STAR=pol_degree_star, DEG_GAS=pol_degree_gas, SIG_MA_DEG=sig_pol_deg, SIG_MI_DEG=sig_pol_deg_mi, RMIN=Rmin, RMAX=Rmax, GAS_CORR=gas_corr_by_incl, M_TO_L=M_to_L, DI=di, MONTE_CARLO=monte_carlo_realizations, CORRECTION_GAS=correctGasData, CORRECTION_STAR=correctStarData, CORRECTION_SIG_MA=correctSigmaLosMaj, CORRECTION_SIG_MI=correctSigmaLosMin, SURF_DENS_STAR=surfaceDensityStarR, METHOD='HALF', PECULIARITIES=peculiarities, SIG_WINGS = sig_wings, USE_MINOR = use_minor, RUN=3) renameFilesByMethod(path+'/HALF_BELL/', 'HALF_BELL') if not os.path.exists(path+'/EQUAL_MAX/'): os.makedirs(path+'/EQUAL_MAX/') else: for f in os.listdir(path+'/EQUAL_MAX/'): os.remove(path+'/EQUAL_MAX/'+f) shutil.copy2(path+'/v_stars_ma.dat', path+'/EQUAL_MAX/v_stars_ma.dat') shutil.copy2(path+'/v_gas_ma.dat', path+'/EQUAL_MAX/v_gas_ma.dat') shutil.copy2(path+'/gas_density.dat', path+'/EQUAL_MAX/gas_density.dat') if os.path.exists(path+'/v_stars_mi.dat'): shutil.copy2(path+'/v_stars_mi.dat', path+'/EQUAL_MAX/v_stars_mi.dat') #EQUAL и Макс диск mainf(PATH=path+'/EQUAL_MAX', NAME=name, INCL=incl, SCALE=scale, RESOLUTION=resolution, H_DISC=h_disc, MR=M_R, MB=M_B, MU0=mu0_c_R, R_EFF_B=r_eff_bulge, DEG_STAR=pol_degree_star, DEG_GAS=pol_degree_gas, SIG_MA_DEG=sig_pol_deg, SIG_MI_DEG=sig_pol_deg_mi, RMIN=Rmin, RMAX=Rmax, GAS_CORR=gas_corr_by_incl, M_TO_L=maxDisc, DI=di, MONTE_CARLO=monte_carlo_realizations, CORRECTION_GAS=correctGasData, CORRECTION_STAR=correctStarData, CORRECTION_SIG_MA=correctSigmaLosMaj, CORRECTION_SIG_MI=correctSigmaLosMin, SURF_DENS_STAR=surfaceDensityStarR, METHOD='EQUAL', PECULIARITIES=peculiarities, SIG_WINGS = sig_wings, USE_MINOR = use_minor, RUN=4) renameFilesByMethod(path+'/EQUAL_MAX/', 'EQUAL_MAX') # #Логгирование в файл # sys.stdout = Tee(path + "/log_" + name + '.txt', 'w') # # # Работа с фотометрией в I полосе. # poly_star, poly_gas, star_data, gas_data = bendStarRC(correctGasData, correctStarData, path, incl, 0.0, False, # pol_degree_star, pol_degree_gas, name, # scale, gas_corr_by_incl, False) # h_disc *= scale # R1, R2 = correctDistanceInterval(path, scale) # R1 = 10 # R2 = 75 # evaluateSigLosWingsExpScale(path, r_eff_bulge) # sigLosGaussParams, sigMajData = fitGaussSigLosMaj(correctSigmaLosMaj, path, scale, incl) # sigLosPolyParams = fitPolySigLosMaj(correctSigmaLosMaj, path, scale, incl, sig_pol_deg, False, min(Rmax, R2)) # sigLosSinhParams = fitSechSigLosMaj(correctSigmaLosMaj, path, scale, incl) ## sigLosGaussParamsMi, sigMiData = fitGaussSigLosMin(correctSigmaLosMin, path, scale, incl) ## sigLosPolyParamsMi = fitPolySigLosMin(correctSigmaLosMin, path, scale, incl, sig_pol_deg_mi, False, min(Rmax, R2)) # eval_SigPhi_to_sigR(poly_star, R1, R2, (R2 - R1) / 1000.0, path) # evalEpyciclicFreq(poly_gas, arange(R1 + 2, R2, 0.1), path, resolution, h_disc) # #M_to_L = mass_to_light_Iband(M_B - M_R) # print '#!!!!!!!!!!!!# Mass-to-light ratio in I band (M/L) = ', M_to_L # plotSurfDens(M_to_L, h_disc, mu0_c_R, 0, Rmax, 0.1, path, surfaceDensityStarR) # gas_sf_data = surfaceDensityGas(path) # # r_surfd_gas = gas_sf_data[0] # r_star_and_gas = list(arange(Rmin, Rmax, 0.1)) + r_surfd_gas # r_star_and_gas.sort() # # r_star_and_gas = filter(lambda x: ((x <= min(Rmax, R2)) & (x >= max(Rmin, R1))), r_star_and_gas) # # r_surfd_gas = filter(lambda x: ((x <= min(Rmax, R2)) & (x >= max(Rmin, R1, r_eff_bulge))), r_surfd_gas) # r_star_and_gas = filter(lambda x: x > r_eff_bulge, r_star_and_gas) # r_surfd_gas = filter(lambda x: x > r_eff_bulge, r_surfd_gas) # # ratioSVEfromSigma(r_star_and_gas, h_disc, path, poly_star, sigLosPolyParams, sigLosPolyParamsMi, 100, incl) # SVEfunction = simpleSVEfromSigma # # SVEfunction = simpleSVEwhenPhiEqualsZ # sig_R2, sig_Phi2, sig_Z2 = SVEfunction(r_star_and_gas, h_disc, path, poly_star, sigMajData, # sigLosPolyParams, 0.5, 71, incl) # # # h_kin, sigR2 = asymmetricDriftEvaluation(r_star_and_gas, h_disc, path, poly_star, poly_gas, 91) # # sigZ2, sigPhi2 = velosityEllipsoid(h_disc,r_star_and_gas, sigR2, path, incl, sigLosPolyParams, poly_star) # # # # Решаем гравнеустойчивость для точек, где есть данные по газовой плотности # star_density = [surfaceDensityStarI(M_to_L, h_disc, R, mu0_c_I) for R in r_surfd_gas] # gas_density = [gas_sf_data[1][gas_sf_data[0].index(R)] for R in r_surfd_gas] # sigma_corr_gas = [math.sqrt(sig_R2[r_star_and_gas.index(R)]) for R in r_surfd_gas] # Qeffs = findTwoFluidQeffs(r_surfd_gas, poly_gas, gas_density, star_density, sigma_corr_gas, path, resolution, 60.0) # hydroQeffs = findTwoFluidHydroQeffs(r_surfd_gas, poly_gas, gas_density, star_density, sigma_corr_gas, path, # resolution, 60.0) # hzGas = [zGas(R[1], R[2], resolution) / 2 for R in zip(r_surfd_gas, star_density, gas_density)] # sigmaZgas = [math.sqrt(sig_Z2[r_star_and_gas.index(R)]) for R in r_surfd_gas] # hzStar = [zStar(R[1], R[2], resolution, R[3]) / 2 for R in zip(r_surfd_gas, star_density, gas_density, sigmaZgas)] # plotVerticalScale(star_density, gas_density, resolution, sigmaZgas, r_surfd_gas, path) # discQeffs = findTwoFluidWithDiscQeffs(r_surfd_gas, poly_gas, gas_density, star_density, sigma_corr_gas, path, # resolution, hzStar, hzGas, 60.0) # Qeffs1F = findOneFluidQeffs(r_surfd_gas, poly_gas, gas_density, star_density, sigma_corr_gas, path, resolution, # 60.0) # # # Смотрим, как отразится уменьшение толщины диска в два раза. # hzStar = [hzs / 2 for hzs in hzStar] # discQeffs_3 = findTwoFluidWithDiscQeffs(r_surfd_gas, poly_gas, gas_density, star_density, sigma_corr_gas, path, # resolution, hzStar, hzGas, 60.0) # # Смотрим, какие результаты в случае однородно толстого диска 0.2h # # hzStar = [0.1 * h_disc] * r_surfd_gas.__len__() # hzStar = [0.5] * r_surfd_gas.__len__() # discQeffs_4 = findTwoFluidWithDiscQeffs(r_surfd_gas, poly_gas, gas_density, star_density, sigma_corr_gas, path, # resolution, hzStar, hzGas, 60.0) # # # # То же для другого угла - чтобы понять зависимость от угла # incl = incl + di # # poly_star1, poly_gas1, star_data1, gas_data1 = bendStarRC(correctGasData, correctStarData, path, incl, 0.0, False, # pol_degree_star, pol_degree_gas, name, scale, gas_corr_by_incl, False) # sigLosPolyParams1 = fitPolySigLosMaj(correctSigmaLosMaj, path, scale, incl, sig_pol_deg, False, min(Rmax, R2)) # eval_SigPhi_to_sigR(poly_star1, R1, R2, 0.1, path) # evalEpyciclicFreq(poly_gas1, arange(R1 + 2, R2, 0.1), path, resolution, h_disc) # sig_R2_1, sig_Phi2_1, sig_Z2_1 = SVEfunction(r_star_and_gas, h_disc, path, poly_star, sigMajData, # sigLosPolyParams, 0.5, 71, incl) # sigma_corr_gas_1 = [math.sqrt(sig_R2_1[r_star_and_gas.index(R)]) for R in r_surfd_gas] # Qeffs_1 = findTwoFluidQeffs(r_surfd_gas, poly_gas1, gas_density, star_density, sigma_corr_gas_1, path, resolution, # 60.0) # hydroQeffs_1 = findTwoFluidHydroQeffs(r_surfd_gas, poly_gas1, gas_density, star_density, sigma_corr_gas_1, path, # resolution, 60.0) # sigmaZgas = [math.sqrt(sig_Z2_1[r_star_and_gas.index(R)]) for R in r_surfd_gas] # hzStar = [zStar(R[1], R[2], resolution, R[3]) / 2 for R in zip(r_surfd_gas, star_density, gas_density, sigmaZgas)] # discQeffs_1 = findTwoFluidWithDiscQeffs(r_surfd_gas, poly_gas1, gas_density, star_density, sigma_corr_gas_1, path, # resolution, hzStar, hzGas, 60.0) # Qeffs1F_1 = findOneFluidQeffs(r_surfd_gas, poly_gas1, gas_density, star_density, sigma_corr_gas_1, path, resolution, # 60.0) # # # То же для другого угла # incl = incl - 2 * di # # poly_star2, poly_gas2, star_data2, gas_data2 = bendStarRC(correctGasData, correctStarData, path, incl, 0.0, False, # pol_degree_star, pol_degree_gas, name, # scale, gas_corr_by_incl, False) # sigLosPolyParams2 = fitPolySigLosMaj(correctSigmaLosMaj, path, scale, incl, sig_pol_deg, False, min(Rmax, R2)) # eval_SigPhi_to_sigR(poly_star2, R1, R2, 0.1, path) # evalEpyciclicFreq(poly_gas2, arange(R1 + 2, R2, 0.1), path, resolution, h_disc) # sig_R2_2, sig_Phi2_2, sig_Z2_2 = SVEfunction(r_star_and_gas, h_disc, path, poly_star, sigMajData, # sigLosPolyParams, 0.5, 71, incl) # sigma_corr_gas_2 = [math.sqrt(sig_R2_2[r_star_and_gas.index(R)]) for R in r_surfd_gas] # Qeffs_2 = findTwoFluidQeffs(r_surfd_gas, poly_gas2, gas_density, star_density, sigma_corr_gas_2, path, resolution, # 60.0) # hydroQeffs_2 = findTwoFluidHydroQeffs(r_surfd_gas, poly_gas2, gas_density, star_density, sigma_corr_gas_2, path, # resolution, 60.0) # sigmaZgas = [math.sqrt(sig_Z2_2[r_star_and_gas.index(R)]) for R in r_surfd_gas] # hzStar = [zStar(R[1], R[2], resolution, R[3]) / 2 for R in zip(r_surfd_gas, star_density, gas_density, sigmaZgas)] # discQeffs_2 = findTwoFluidWithDiscQeffs(r_surfd_gas, poly_gas2, gas_density, star_density, sigma_corr_gas_2, path, # resolution, hzStar, hzGas, 60.0) # Qeffs1F_2 = findOneFluidQeffs(r_surfd_gas, poly_gas2, gas_density, star_density, sigma_corr_gas_2, path, resolution, # 60.0) # # # Монте-Карло реализации в количестве monte_carlo_realizations штук. # # incl = incl + di # sigR2_list = [sig_R2] # sigZ2_list = [sig_Z2] # sigPhi2_list = [sig_Phi2] # Qeffs_list = [zip(*Qeffs)[2]] # hydroQeffs_list = [zip(*hydroQeffs)[2]] # discQeffs_list = [zip(*discQeffs)[2]] # Qeffs1F_list = [Qeffs1F] # MC_iter = 1 # # while MC_iter < monte_carlo_realizations: # MC_iter += 1 # print '#!!!!!!!!!!!!# Monte-Carlo iterration number ', MC_iter # poly_star_mc, poly_gas_mc, star_data_mc, gas_data_mc = bendStarRC(correctGasData, correctStarData, path, incl, # 0.0, False, pol_degree_star, pol_degree_gas, name, scale, gas_corr_by_incl, True) # sigLosPolyParams_mc = fitPolySigLosMaj(correctSigmaLosMaj, path, scale, incl, sig_pol_deg, True, min(Rmax, R2)) # eval_SigPhi_to_sigR(poly_star_mc, R1, R2, 0.1, path) # evalEpyciclicFreq(poly_gas_mc, arange(R1 + 2, R2, 0.1), path, resolution, h_disc) # sig_R2_mc, sig_Phi2_mc, sig_Z2_mc = SVEfunction(r_star_and_gas, h_disc, path, poly_star, sigMajData, # sigLosPolyParams, 0.5, 71, incl) # sigma_corr_gas_mc = [math.sqrt(sig_R2_mc[r_star_and_gas.index(R)]) for R in r_surfd_gas] # Qeffs_mc = findTwoFluidQeffs(r_surfd_gas, poly_gas_mc, gas_density, star_density, sigma_corr_gas_mc, path, # resolution, 60.0) # hydroQeffs_mc = findTwoFluidHydroQeffs(r_surfd_gas, poly_gas_mc, gas_density, star_density, sigma_corr_gas_mc, # path, # resolution, 60.0) # sigmaZgas_mc = [math.sqrt(sig_Z2_mc[r_star_and_gas.index(R)]) for R in r_surfd_gas] # hzStar_mc = [zStar(R[1], R[2], resolution, R[3]) / 2 for R in # zip(r_surfd_gas, star_density, gas_density, sigmaZgas_mc)] # discQeffs_mc = findTwoFluidWithDiscQeffs(r_surfd_gas, poly_gas_mc, gas_density, star_density, sigma_corr_gas_mc, # path, # resolution, hzStar_mc, hzGas, 60.0) # Qeffs1F_mc = findOneFluidQeffs(r_surfd_gas, poly_gas_mc, gas_density, star_density, sigma_corr_gas_mc, path, # resolution, # 60.0) # sigR2_list.append(sig_R2_mc) # sigZ2_list.append(sig_Z2_mc) # sigPhi2_list.append(sig_Phi2_mc) # Qeffs_list.append(zip(*Qeffs_mc)[2]) # hydroQeffs_list.append(zip(*hydroQeffs_mc)[2]) # discQeffs_list.append(zip(*discQeffs_mc)[2]) # Qeffs1F_list.append(Qeffs1F_mc) # # plotFinalPics(path, poly_star, poly_gas, di, star_data, gas_data, incl, resolution, h_disc, r_eff_bulge, # sigMajData, sigLosGaussParams, sigLosPolyParams, sigLosSinhParams, r_surfd_gas, # zip(Qeffs1F, Qeffs1F_1, Qeffs1F_2) + Qeffs1F_list, # zip(zip(*hydroQeffs)[2], zip(*hydroQeffs_1)[2], zip(*hydroQeffs_2)[2]) + hydroQeffs_list, # zip(zip(*Qeffs)[2], zip(*Qeffs_1)[2], zip(*Qeffs_2)[2]) + Qeffs_list, # zip(zip(*discQeffs)[2], zip(*discQeffs_1)[2], zip(*discQeffs_2)[2], zip(*discQeffs_3)[2], zip(*discQeffs_4)[2]) # + discQeffs_list, # r_star_and_gas, # zip(sig_R2, sig_R2_1, sig_R2_2) + sigR2_list, # zip(sig_Phi2, sig_Phi2_1, sig_Phi2_2) + sigPhi2_list, # zip(sig_Z2, sig_Z2_1, sig_Z2_2) + sigZ2_list, # hzStar) # plt.show() finishTime = time.time() print '#!!!!!!!!!!!!# Time total: ', (finishTime - startTime), 's' print '#!!!!!!!!!!!!# THE END'
gpl-3.0
8,201,307,194,499,260,000
41.394643
124
0.609241
false
2.272083
false
false
false
lahwaacz/wiki-scripts
ws/db/selects/__init__.py
1
8229
#!/usr/bin/env python3 from collections import OrderedDict import sqlalchemy as sa from .namespaces import * from .interwiki import * from .lists.recentchanges import * from .lists.logevents import * from .lists.allpages import * from .lists.protectedtitles import * from .lists.allrevisions import * from .lists.alldeletedrevisions import * from .lists.allusers import * from .props.info import * from .props.pageprops import * from .props.revisions import * from .props.deletedrevisions import * from .props.templates import * from .props.transcludedin import * from .props.links import * from .props.linkshere import * from .props.images import * from .props.categories import * from .props.langlinks import * from .props.iwlinks import * from .props.extlinks import * from .props.redirects import * from .props.sections import * __classes_lists = { "recentchanges": RecentChanges, "logevents": LogEvents, "allpages": AllPages, "protectedtitles": ProtectedTitles, "allrevisions": AllRevisions, "alldeletedrevisions": AllDeletedRevisions, "allusers": AllUsers, } # TODO: generator=allpages works, check the others __classes_generators = { "recentchanges": RecentChanges, "allpages": AllPages, "protectedtitles": ProtectedTitles, "allrevisions": AllRevisions, "alldeletedrevisions": AllDeletedRevisions, } # MediaWiki's prop=revisions supports 3 modes: # 1. multiple pages, but only the latest revision # 2. single page, but all revisions # 3. specifying revids # Fuck it, let's have separate "latestrevisions" for mode 1... __classes_props = { "info": Info, "pageprops": PageProps, "latestrevisions": Revisions, # custom module "revisions": Revisions, "deletedrevisions": DeletedRevisions, "templates": Templates, "transcludedin": TranscludedIn, "links": Links, "linkshere": LinksHere, "images": Images, "categories": Categories, "langlinks": LanguageLinks, "iwlinks": InterwikiLinks, "extlinks": ExternalLinks, "redirects": Redirects, "sections": Sections, # custom module } def list(db, params): assert "list" in params list = params.pop("list") if list not in __classes_lists: raise NotImplementedError("Module list={} is not implemented yet.".format(list)) s = __classes_lists[list](db) # TODO: make sure that all parameters are used (i.e. when all modules take their parameters, params_copy should be empty) list_params = s.filter_params(params) s.set_defaults(list_params) s.sanitize_params(list_params) query = s.get_select(list_params) # TODO: some lists like allrevisions should group the results per page like MediaWiki result = s.execute_sql(query) for row in result: yield s.db_to_api(row) result.close() def get_pageset(db, titles=None, pageids=None): """ :param list titles: list of :py:class:`ws.parser_helpers.title.Title` objects :param list pageids: list of :py:obj:`int` objects """ assert titles is not None or pageids is not None assert titles is None or pageids is None # join to get the namespace prefix page = db.page nss = db.namespace_starname tail = page.outerjoin(nss, page.c.page_namespace == nss.c.nss_id) s = sa.select([page.c.page_id, page.c.page_namespace, page.c.page_title, nss.c.nss_name]) if titles is not None: ns_title_pairs = [(t.namespacenumber, t.dbtitle()) for t in titles] s = s.where(sa.tuple_(page.c.page_namespace, page.c.page_title).in_(ns_title_pairs)) s = s.order_by(page.c.page_namespace.asc(), page.c.page_title.asc()) ex = sa.select([page.c.page_namespace, page.c.page_title]) ex = ex.where(sa.tuple_(page.c.page_namespace, page.c.page_title).in_(ns_title_pairs)) elif pageids is not None: s = s.where(page.c.page_id.in_(pageids)) s = s.order_by(page.c.page_id.asc()) ex = sa.select([page.c.page_id]) ex = ex.where(page.c.page_id.in_(pageids)) return tail, s, ex def query_pageset(db, params): params_copy = params.copy() # TODO: for the lack of better structure, we abuse the AllPages class for execution of titles= and pageids= queries s = AllPages(db) assert "titles" in params or "pageids" in params or "generator" in params if "titles" in params: titles = params_copy.pop("titles") if isinstance(titles, str): titles = {titles} assert isinstance(titles, set) titles = [db.Title(t) for t in titles] tail, pageset, ex = get_pageset(db, titles=titles) elif "pageids" in params: pageids = params_copy.pop("pageids") if isinstance(pageids, int): pageids = {pageids} assert isinstance(pageids, set) tail, pageset, ex = get_pageset(db, pageids=pageids) elif "generator" in params: generator = params_copy.pop("generator") if generator not in __classes_generators: raise NotImplementedError("Module generator={} is not implemented yet.".format(generator)) s = __classes_generators[generator](db) # TODO: make sure that all parameters are used (i.e. when all modules take their parameters, params_copy should be empty) generator_params = s.filter_params(params_copy, generator=True) s.set_defaults(generator_params) s.sanitize_params(generator_params) pageset, tail = s.get_pageset(generator_params) # report missing pages (does not make sense for generators) if "generator" not in params: existing_pages = set() result = s.execute_sql(ex) for row in result: if "titles" in params: existing_pages.add((row.page_namespace, row.page_title)) elif "pageids" in params: existing_pages.add(row.page_id) if "titles" in params: for t in titles: if (t.namespacenumber, t.dbtitle()) not in existing_pages: yield {"missing": "", "ns": t.namespacenumber, "title": t.dbtitle()} elif "pageids" in params: for p in pageids: if p not in existing_pages: yield {"missing": "", "pageid": p} # fetch the pageset into an intermediate list # TODO: query-continuation is probably needed for better efficiency query = pageset.select_from(tail) pages = OrderedDict() # for indexed access, like in MediaWiki result = s.execute_sql(query) for row in result: entry = s.db_to_api(row) pages[entry["pageid"]] = entry result.close() if "prop" in params: prop = params_copy.pop("prop") if isinstance(prop, str): prop = {prop} assert isinstance(prop, set) for p in prop: if p not in __classes_props: raise NotImplementedError("Module prop={} is not implemented yet.".format(p)) _s = __classes_props[p](db) if p == "latestrevisions": prop_tail = _s.join_with_pageset(tail, enum_rev_mode=False) else: prop_tail = _s.join_with_pageset(tail) prop_params = _s.filter_params(params_copy) _s.set_defaults(prop_params) prop_select, prop_tail = _s.get_select_prop(pageset, prop_tail, prop_params) query = prop_select.select_from(prop_tail) result = _s.execute_sql(query) for row in result: page = pages[row["page_id"]] _s.db_to_api_subentry(page, row) result.close() yield from pages.values() def query(db, params=None, **kwargs): if params is None: params = kwargs elif not isinstance(params, dict): raise ValueError("params must be dict or None") elif kwargs and params: raise ValueError("specifying 'params' and 'kwargs' at the same time is not supported") if "list" in params: return list(db, params) elif "titles" in params or "pageids" in params or "generator" in params: return query_pageset(db, params) raise NotImplementedError("Unknown query: no recognizable parameter ({}).".format(params))
gpl-3.0
-5,223,906,679,752,857,000
35.573333
129
0.645036
false
3.642762
false
false
false
chubbymaggie/angr
angr/storage/memory.py
1
38942
#!/usr/bin/env python import logging l = logging.getLogger("angr.storage.memory") import claripy from ..state_plugins.plugin import SimStatePlugin from ..engines.vex.ccall import _get_flags stn_map = { 'st%d' % n: n for n in xrange(8) } tag_map = { 'tag%d' % n: n for n in xrange(8) } class AddressWrapper(object): """ AddressWrapper is used in SimAbstractMemory, which provides extra meta information for an address (or a ValueSet object) that is normalized from an integer/BVV/StridedInterval. """ def __init__(self, region, region_base_addr, address, is_on_stack, function_address): """ Constructor for the class AddressWrapper. :param strregion: Name of the memory regions it belongs to. :param int region_base_addr: Base address of the memory region :param address: An address (not a ValueSet object). :param bool is_on_stack: Whether this address is on a stack region or not. :param int function_address: Related function address (if any). """ self.region = region self.region_base_addr = region_base_addr self.address = address self.is_on_stack = is_on_stack self.function_address = function_address def __hash__(self): return hash((self.region, self.address)) def __eq__(self, other): return self.region == other.region and self.address == other.address def __repr__(self): return "<%s> %s" % (self.region, hex(self.address)) def to_valueset(self, state): """ Convert to a ValueSet instance :param state: A state :return: The converted ValueSet instance """ return state.se.VS(state.arch.bits, self.region, self.region_base_addr, self.address) class RegionDescriptor(object): """ Descriptor for a memory region ID. """ def __init__(self, region_id, base_address, related_function_address=None): self.region_id = region_id self.base_address = base_address self.related_function_address = related_function_address def __repr__(self): return "<%s - %#x>" % ( self.region_id, self.related_function_address if self.related_function_address is not None else 0 ) class RegionMap(object): """ Mostly used in SimAbstractMemory, RegionMap stores a series of mappings between concrete memory address ranges and memory regions, like stack frames and heap regions. """ def __init__(self, is_stack): """ Constructor :param is_stack: Whether this is a region map for stack frames or not. Different strategies apply for stack regions. """ self.is_stack = is_stack # An AVLTree, which maps stack addresses to region IDs self._address_to_region_id = AVLTree() # A dict, which maps region IDs to memory address ranges self._region_id_to_address = { } # # Properties # def __repr__(self): return "RegionMap<%s>" % ( "S" if self.is_stack else "H" ) @property def is_empty(self): return len(self._address_to_region_id) == 0 @property def stack_base(self): if not self.is_stack: raise SimRegionMapError('Calling "stack_base" on a non-stack region map.') return self._address_to_region_id.max_key() @property def region_ids(self): return self._region_id_to_address.keys() # # Public methods # def copy(self): r = RegionMap(is_stack=self.is_stack) # A shallow copy should be enough, since we never modify any RegionDescriptor object in-place if len(self._address_to_region_id) > 0: # TODO: There is a bug in bintrees 2.0.2 that prevents us from copying a non-empty AVLTree object # TODO: Consider submit a pull request r._address_to_region_id = self._address_to_region_id.copy() r._region_id_to_address = self._region_id_to_address.copy() return r def map(self, absolute_address, region_id, related_function_address=None): """ Add a mapping between an absolute address and a region ID. If this is a stack region map, all stack regions beyond (lower than) this newly added regions will be discarded. :param absolute_address: An absolute memory address. :param region_id: ID of the memory region. :param related_function_address: A related function address, mostly used for stack regions. """ if self.is_stack: # Sanity check if not region_id.startswith('stack_'): raise SimRegionMapError('Received a non-stack memory ID "%d" in a stack region map' % region_id) # Remove all stack regions that are lower than the one to add while True: try: addr = self._address_to_region_id.floor_key(absolute_address) descriptor = self._address_to_region_id[addr] # Remove this mapping del self._address_to_region_id[addr] # Remove this region ID from the other mapping del self._region_id_to_address[descriptor.region_id] except KeyError: break else: if absolute_address in self._address_to_region_id: descriptor = self._address_to_region_id[absolute_address] # Remove this mapping del self._address_to_region_id[absolute_address] del self._region_id_to_address[descriptor.region_id] # Add this new region mapping desc = RegionDescriptor( region_id, absolute_address, related_function_address=related_function_address ) self._address_to_region_id[absolute_address] = desc self._region_id_to_address[region_id] = desc def unmap_by_address(self, absolute_address): """ Removes a mapping based on its absolute address. :param absolute_address: An absolute address """ desc = self._address_to_region_id[absolute_address] del self._address_to_region_id[absolute_address] del self._region_id_to_address[desc.region_id] def absolutize(self, region_id, relative_address): """ Convert a relative address in some memory region to an absolute address. :param region_id: The memory region ID :param relative_address: The relative memory offset in that memory region :return: An absolute address if converted, or an exception is raised when region id does not exist. """ if region_id == 'global': # The global region always bases 0 return relative_address if region_id not in self._region_id_to_address: raise SimRegionMapError('Non-existent region ID "%s"' % region_id) base_address = self._region_id_to_address[region_id].base_address return base_address + relative_address def relativize(self, absolute_address, target_region_id=None): """ Convert an absolute address to the memory offset in a memory region. Note that if an address belongs to heap region is passed in to a stack region map, it will be converted to an offset included in the closest stack frame, and vice versa for passing a stack address to a heap region. Therefore you should only pass in address that belongs to the same category (stack or non-stack) of this region map. :param absolute_address: An absolute memory address :return: A tuple of the closest region ID, the relative offset, and the related function address. """ if target_region_id is None: if self.is_stack: # Get the base address of the stack frame it belongs to base_address = self._address_to_region_id.ceiling_key(absolute_address) else: try: base_address = self._address_to_region_id.floor_key(absolute_address) except KeyError: # Not found. It belongs to the global region then. return 'global', absolute_address, None descriptor = self._address_to_region_id[base_address] else: if target_region_id == 'global': # Just return the absolute address return 'global', absolute_address, None if target_region_id not in self._region_id_to_address: raise SimRegionMapError('Trying to relativize to a non-existent region "%s"' % target_region_id) descriptor = self._region_id_to_address[target_region_id] base_address = descriptor.base_address return descriptor.region_id, absolute_address - base_address, descriptor.related_function_address class MemoryStoreRequest(object): """ A MemoryStoreRequest is used internally by SimMemory to track memory request data. """ def __init__(self, addr, data=None, size=None, condition=None, endness=None): self.addr = addr self.data = data self.size = size self.condition = condition self.endness = endness # was this store done? self.completed = False # stuff that's determined during handling self.actual_addresses = None self.constraints = [ ] self.fallback_values = None self.symbolic_sized_values = None self.conditional_values = None self.simplified_values = None self.stored_values = None def _adjust_condition(self, state): self.condition = state._adjust_condition(self.condition) class SimMemory(SimStatePlugin): """ Represents the memory space of the process. """ def __init__(self, endness=None, abstract_backer=None, stack_region_map=None, generic_region_map=None): SimStatePlugin.__init__(self) self.id = None self.endness = "Iend_BE" if endness is None else endness # Boolean or None. Indicates whether this memory is internally used inside SimAbstractMemory self._abstract_backer = abstract_backer # # These are some performance-critical thresholds # # The maximum range of a normal write operation. If an address range is greater than this number, # SimMemory will simply concretize it to a single value. Note that this is only relevant when # the "symbolic" concretization strategy is enabled for writes. self._write_address_range = 128 self._write_address_range_approx = 128 # The maximum range of a symbolic read address. If an address range is greater than this number, # SimMemory will simply concretize it. self._read_address_range = 1024 self._read_address_range_approx = 1024 # The maximum size of a symbolic-sized operation. If a size maximum is greater than this number, # SimMemory will constrain it to this number. If the size minimum is greater than this # number, a SimMemoryLimitError is thrown. self._maximum_symbolic_size = 8 * 1024 self._maximum_symbolic_size_approx = 4*1024 # Same, but for concrete writes self._maximum_concrete_size = 0x1000000 # Save those arguments first. Since self.state is empty at this moment, we delay the initialization of region # maps until set_state() is called. self._temp_stack_region_map = stack_region_map self._temp_generic_region_map = generic_region_map self._stack_region_map = None self._generic_region_map = None @property def category(self): """ Return the category of this SimMemory instance. It can be one of the three following categories: reg, mem, or file. """ if self.id in ('reg', 'mem'): return self.id elif self._abstract_backer: return 'mem' elif self.id.startswith('file'): return 'file' else: raise SimMemoryError('Unknown SimMemory category for memory_id "%s"' % self.id) def set_state(self, state): """ Call the set_state method in SimStatePlugin class, and then perform the delayed initialization. :param state: The SimState instance """ SimStatePlugin.set_state(self, state) # Delayed initialization stack_region_map, generic_region_map = self._temp_stack_region_map, self._temp_generic_region_map if stack_region_map or generic_region_map: # Inherited from its parent self._stack_region_map = stack_region_map.copy() self._generic_region_map = generic_region_map.copy() else: if not self._abstract_backer and o.REGION_MAPPING in self.state.options: # Only the top-level SimMemory instance can have region maps. self._stack_region_map = RegionMap(True) self._generic_region_map = RegionMap(False) else: self._stack_region_map = None self._generic_region_map = None def _resolve_location_name(self, name, is_write=False): if self.category == 'reg': if self.state.arch.name in ('X86', 'AMD64'): if name in stn_map: return (((stn_map[name] + self.load('ftop')) & 7) << 3) + self.state.arch.registers['fpu_regs'][0], 8 elif name in tag_map: return ((tag_map[name] + self.load('ftop')) & 7) + self.state.arch.registers['fpu_tags'][0], 1 elif name in ('flags', 'eflags', 'rflags'): # we tweak the state to convert the vex condition registers into the flags register if not is_write: # this work doesn't need to be done if we're just gonna overwrite it self.store('cc_dep1', _get_flags(self.state)[0]) # TODO: can constraints be added by this? self.store('cc_op', 0) # OP_COPY return self.state.arch.registers['cc_dep1'][0], self.state.arch.bytes if self.state.arch.name in ('ARMEL', 'ARMHF', 'ARM', 'AARCH64'): if name == 'flags': if not is_write: self.store('cc_dep1', _get_flags(self.state)[0]) self.store('cc_op', 0) return self.state.arch.registers['cc_dep1'][0], self.state.arch.bytes return self.state.arch.registers[name] elif name[0] == '*': return self.state.registers.load(name[1:]), None else: raise SimMemoryError("Trying to address memory with a register name.") def _convert_to_ast(self, data_e, size_e=None): """ Make an AST out of concrete @data_e """ if type(data_e) is str: # Convert the string into a BVV, *regardless of endness* bits = len(data_e) * 8 data_e = self.state.se.BVV(data_e, bits) elif type(data_e) in (int, long): data_e = self.state.se.BVV(data_e, size_e*8 if size_e is not None else self.state.arch.bits) else: data_e = data_e.raw_to_bv() return data_e def set_stack_address_mapping(self, absolute_address, region_id, related_function_address=None): """ Create a new mapping between an absolute address (which is the base address of a specific stack frame) and a region ID. :param absolute_address: The absolute memory address. :param region_id: The region ID. :param related_function_address: Related function address. """ if self._stack_region_map is None: raise SimMemoryError('Stack region map is not initialized.') self._stack_region_map.map(absolute_address, region_id, related_function_address=related_function_address) def unset_stack_address_mapping(self, absolute_address): """ Remove a stack mapping. :param absolute_address: An absolute memory address, which is the base address of the stack frame to destroy. """ if self._stack_region_map is None: raise SimMemoryError('Stack region map is not initialized.') self._stack_region_map.unmap_by_address(absolute_address) def stack_id(self, function_address): """ Return a memory region ID for a function. If the default region ID exists in the region mapping, an integer will appended to the region name. In this way we can handle recursive function calls, or a function that appears more than once in the call frame. This also means that `stack_id()` should only be called when creating a new stack frame for a function. You are not supposed to call this function every time you want to map a function address to a stack ID. :param int function_address: Address of the function. :return: ID of the new memory region. :rtype: str """ region_id = 'stack_0x%x' % function_address # deduplication region_ids = self._stack_region_map.region_ids if region_id not in region_ids: return region_id else: for i in xrange(0, 2000): new_region_id = region_id + '_%d' % i if new_region_id not in region_ids: return new_region_id raise SimMemoryError('Cannot allocate region ID for function %#08x - recursion too deep' % function_address) def store(self, addr, data, size=None, condition=None, add_constraints=None, endness=None, action=None, inspect=True, priv=None, disable_actions=False): """ Stores content into memory. :param addr: A claripy expression representing the address to store at. :param data: The data to store (claripy expression or something convertable to a claripy expression). :param size: A claripy expression representing the size of the data to store. The following parameters are optional. :param condition: A claripy expression representing a condition if the store is conditional. :param add_constraints: Add constraints resulting from the merge (default: True). :param endness: The endianness for the data. :param action: A SimActionData to fill out with the final written value and constraints. :param bool inspect: Whether this store should trigger SimInspect breakpoints or not. :param bool disable_actions: Whether this store should avoid creating SimActions or not. When set to False, state options are respected. """ if priv is not None: self.state.scratch.push_priv(priv) addr_e = _raw_ast(addr) data_e = _raw_ast(data) size_e = _raw_ast(size) condition_e = _raw_ast(condition) add_constraints = True if add_constraints is None else add_constraints if isinstance(addr, str): named_addr, named_size = self._resolve_location_name(addr, is_write=True) addr = named_addr addr_e = addr if size is None: size = named_size size_e = size # store everything as a BV data_e = self._convert_to_ast(data_e, size_e if isinstance(size_e, (int, long)) else None) if type(size_e) in (int, long): size_e = self.state.se.BVV(size_e, self.state.arch.bits) if inspect is True: if self.category == 'reg': self.state._inspect( 'reg_write', BP_BEFORE, reg_write_offset=addr_e, reg_write_length=size_e, reg_write_expr=data_e) addr_e = self.state._inspect_getattr('reg_write_offset', addr_e) size_e = self.state._inspect_getattr('reg_write_length', size_e) data_e = self.state._inspect_getattr('reg_write_expr', data_e) elif self.category == 'mem': self.state._inspect( 'mem_write', BP_BEFORE, mem_write_address=addr_e, mem_write_length=size_e, mem_write_expr=data_e, ) addr_e = self.state._inspect_getattr('mem_write_address', addr_e) size_e = self.state._inspect_getattr('mem_write_length', size_e) data_e = self.state._inspect_getattr('mem_write_expr', data_e) # if the condition is false, bail if condition_e is not None and self.state.se.is_false(condition_e): if priv is not None: self.state.scratch.pop_priv() return if ( o.UNDER_CONSTRAINED_SYMEXEC in self.state.options and isinstance(addr_e, claripy.ast.Base) and addr_e.uninitialized ): self._constrain_underconstrained_index(addr_e) request = MemoryStoreRequest(addr_e, data=data_e, size=size_e, condition=condition_e, endness=endness) try: self._store(request) except SimSegfaultError as e: e.original_addr = addr_e raise if inspect is True: if self.category == 'reg': self.state._inspect('reg_write', BP_AFTER) if self.category == 'mem': self.state._inspect('mem_write', BP_AFTER) add_constraints = self.state._inspect_getattr('address_concretization_add_constraints', add_constraints) if add_constraints and len(request.constraints) > 0: self.state.add_constraints(*request.constraints) if not disable_actions: if request.completed and o.AUTO_REFS in self.state.options and action is None and not self._abstract_backer: ref_size = size * 8 if size is not None else data_e.size() region_type = self.category if region_type == 'file': # Special handling for files to keep compatibility # We may use some refactoring later region_type = self.id action = SimActionData(self.state, region_type, 'write', addr=addr_e, data=data_e, size=ref_size, condition=condition ) self.state.history.add_action(action) if request.completed and action is not None: action.actual_addrs = request.actual_addresses action.actual_value = action._make_object(request.stored_values[0]) # TODO if len(request.constraints) > 0: action.added_constraints = action._make_object(self.state.se.And(*request.constraints)) else: action.added_constraints = action._make_object(self.state.se.true) if priv is not None: self.state.scratch.pop_priv() def _store(self, request): raise NotImplementedError() def store_cases(self, addr, contents, conditions, fallback=None, add_constraints=None, endness=None, action=None): """ Stores content into memory, conditional by case. :param addr: A claripy expression representing the address to store at. :param contents: A list of bitvectors, not necessarily of the same size. Use None to denote an empty write. :param conditions: A list of conditions. Must be equal in length to contents. The following parameters are optional. :param fallback: A claripy expression representing what the write should resolve to if all conditions evaluate to false (default: whatever was there before). :param add_constraints: Add constraints resulting from the merge (default: True) :param endness: The endianness for contents as well as fallback. :param action: A SimActionData to fill out with the final written value and constraints. :type action: SimActionData """ if fallback is None and all(c is None for c in contents): l.debug("Avoiding an empty write.") return addr_e = _raw_ast(addr) contents_e = _raw_ast(contents) conditions_e = _raw_ast(conditions) fallback_e = _raw_ast(fallback) max_bits = max(c.length for c in contents_e if isinstance(c, claripy.ast.Bits)) \ if fallback is None else fallback.length # if fallback is not provided by user, load it from memory # remember to specify the endianness! fallback_e = self.load(addr, max_bits/8, add_constraints=add_constraints, endness=endness) \ if fallback_e is None else fallback_e req = self._store_cases(addr_e, contents_e, conditions_e, fallback_e, endness=endness) add_constraints = self.state._inspect_getattr('address_concretization_add_constraints', add_constraints) if add_constraints: self.state.add_constraints(*req.constraints) if req.completed and o.AUTO_REFS in self.state.options and action is None: region_type = self.category if region_type == 'file': # Special handling for files to keep compatibility # We may use some refactoring later region_type = self.id action = SimActionData(self.state, region_type, 'write', addr=addr_e, data=req.stored_values[-1], size=max_bits, condition=self.state.se.Or(*conditions), fallback=fallback ) self.state.history.add_action(action) if req.completed and action is not None: action.actual_addrs = req.actual_addresses action.actual_value = action._make_object(req.stored_values[-1]) action.added_constraints = action._make_object(self.state.se.And(*req.constraints) if len(req.constraints) > 0 else self.state.se.true) def _store_cases(self, addr, contents, conditions, fallback, endness=None): extended_contents = [ ] for c in contents: if c is None: c = fallback else: need_bits = fallback.length - c.length if need_bits > 0: c = c.concat(fallback[need_bits-1:0]) extended_contents.append(c) case_constraints = { } for c,g in zip(extended_contents, conditions): if c not in case_constraints: case_constraints[c] = [ ] case_constraints[c].append(g) unique_contents = [ ] unique_constraints = [ ] for c,g in case_constraints.items(): unique_contents.append(c) unique_constraints.append(self.state.se.Or(*g)) if len(unique_contents) == 1 and unique_contents[0] is fallback: req = MemoryStoreRequest(addr, data=fallback, endness=endness) return self._store(req) else: simplified_contents = [ ] simplified_constraints = [ ] for c,g in zip(unique_contents, unique_constraints): simplified_contents.append(self.state.se.simplify(c)) simplified_constraints.append(self.state.se.simplify(g)) cases = zip(simplified_constraints, simplified_contents) #cases = zip(unique_constraints, unique_contents) ite = self.state.se.simplify(self.state.se.ite_cases(cases, fallback)) req = MemoryStoreRequest(addr, data=ite, endness=endness) return self._store(req) def load(self, addr, size=None, condition=None, fallback=None, add_constraints=None, action=None, endness=None, inspect=True, disable_actions=False, ret_on_segv=False): """ Loads size bytes from dst. :param dst: The address to load from. :param size: The size (in bytes) of the load. :param condition: A claripy expression representing a condition for a conditional load. :param fallback: A fallback value if the condition ends up being False. :param add_constraints: Add constraints resulting from the merge (default: True). :param action: A SimActionData to fill out with the constraints. :param endness: The endness to load with. :param bool inspect: Whether this store should trigger SimInspect breakpoints or not. :param bool disable_actions: Whether this store should avoid creating SimActions or not. When set to False, state options are respected. :param bool ret_on_segv: Whether returns the memory that is already loaded before a segmentation fault is triggered. The default is False. There are a few possible return values. If no condition or fallback are passed in, then the return is the bytes at the address, in the form of a claripy expression. For example: <A BVV(0x41, 32)> On the other hand, if a condition and fallback are provided, the value is conditional: <A If(condition, BVV(0x41, 32), fallback)> """ add_constraints = True if add_constraints is None else add_constraints addr_e = _raw_ast(addr) size_e = _raw_ast(size) condition_e = _raw_ast(condition) fallback_e = _raw_ast(fallback) if isinstance(addr, str): named_addr, named_size = self._resolve_location_name(addr) addr = named_addr addr_e = addr if size is None: size = named_size size_e = size if size is None: size = self.state.arch.bits / 8 size_e = size if inspect is True: if self.category == 'reg': self.state._inspect('reg_read', BP_BEFORE, reg_read_offset=addr_e, reg_read_length=size_e) addr_e = self.state._inspect_getattr("reg_read_offset", addr_e) size_e = self.state._inspect_getattr("reg_read_length", size_e) elif self.category == 'mem': self.state._inspect('mem_read', BP_BEFORE, mem_read_address=addr_e, mem_read_length=size_e) addr_e = self.state._inspect_getattr("mem_read_address", addr_e) size_e = self.state._inspect_getattr("mem_read_length", size_e) if ( o.UNDER_CONSTRAINED_SYMEXEC in self.state.options and isinstance(addr_e, claripy.ast.Base) and addr_e.uninitialized ): self._constrain_underconstrained_index(addr_e) try: a,r,c = self._load(addr_e, size_e, condition=condition_e, fallback=fallback_e, inspect=inspect, events=not disable_actions, ret_on_segv=ret_on_segv) except SimSegfaultError as e: e.original_addr = addr_e raise add_constraints = self.state._inspect_getattr('address_concretization_add_constraints', add_constraints) if add_constraints and c: self.state.add_constraints(*c) if (self.category == 'mem' and o.SIMPLIFY_MEMORY_READS in self.state.options) or \ (self.category == 'reg' and o.SIMPLIFY_REGISTER_READS in self.state.options): # pylint:disable=too-many-boolean-expressions l.debug("simplifying %s read...", self.category) r = self.state.simplify(r) if not self._abstract_backer and \ o.UNINITIALIZED_ACCESS_AWARENESS in self.state.options and \ self.state.uninitialized_access_handler is not None and \ (r.op == 'Reverse' or r.op == 'I') and \ hasattr(r._model_vsa, 'uninitialized') and \ r._model_vsa.uninitialized: normalized_addresses = self.normalize_address(addr) if len(normalized_addresses) > 0 and type(normalized_addresses[0]) is AddressWrapper: normalized_addresses = [ (aw.region, aw.address) for aw in normalized_addresses ] self.state.uninitialized_access_handler(self.category, normalized_addresses, size, r, self.state.scratch.bbl_addr, self.state.scratch.stmt_idx) # the endianess endness = self.endness if endness is None else endness if endness == "Iend_LE": r = r.reversed if inspect is True: if self.category == 'mem': self.state._inspect('mem_read', BP_AFTER, mem_read_expr=r) r = self.state._inspect_getattr("mem_read_expr", r) elif self.category == 'reg': self.state._inspect('reg_read', BP_AFTER, reg_read_expr=r) r = self.state._inspect_getattr("reg_read_expr", r) if not disable_actions: if o.AST_DEPS in self.state.options and self.category == 'reg': r = SimActionObject(r, reg_deps=frozenset((addr,))) if o.AUTO_REFS in self.state.options and action is None: ref_size = size * 8 if size is not None else r.size() region_type = self.category if region_type == 'file': # Special handling for files to keep compatibility # We may use some refactoring later region_type = self.id action = SimActionData(self.state, region_type, 'read', addr=addr, data=r, size=ref_size, condition=condition, fallback=fallback) self.state.history.add_action(action) if action is not None: action.actual_addrs = a action.added_constraints = action._make_object(self.state.se.And(*c) if len(c) > 0 else self.state.se.true) return r def _constrain_underconstrained_index(self, addr_e): if not self.state.uc_manager.is_bounded(addr_e) or self.state.se.max_int(addr_e) - self.state.se.min_int( addr_e) >= self._read_address_range: # in under-constrained symbolic execution, we'll assign a new memory region for this address mem_region = self.state.uc_manager.assign(addr_e) # ... but only if it's not already been constrained to something! if self.state.se.solution(addr_e, mem_region): self.state.add_constraints(addr_e == mem_region) l.debug('Under-constrained symbolic execution: assigned a new memory region @ %s to %s', mem_region, addr_e) def normalize_address(self, addr, is_write=False): #pylint:disable=no-self-use,unused-argument """ Normalize `addr` for use in static analysis (with the abstract memory model). In non-abstract mode, simply returns the address in a single-element list. """ return [ addr ] def _load(self, addr, size, condition=None, fallback=None, inspect=True, events=True, ret_on_segv=False): raise NotImplementedError() def find(self, addr, what, max_search=None, max_symbolic_bytes=None, default=None, step=1): """ Returns the address of bytes equal to 'what', starting from 'start'. Note that, if you don't specify a default value, this search could cause the state to go unsat if no possible matching byte exists. :param addr: The start address. :param what: What to search for; :param max_search: Search at most this many bytes. :param max_symbolic_bytes: Search through at most this many symbolic bytes. :param default: The default value, if what you're looking for wasn't found. :returns: An expression representing the address of the matching byte. """ addr = _raw_ast(addr) what = _raw_ast(what) default = _raw_ast(default) if isinstance(what, str): # Convert it to a BVV what = claripy.BVV(what, len(what) * 8) r,c,m = self._find(addr, what, max_search=max_search, max_symbolic_bytes=max_symbolic_bytes, default=default, step=step) if o.AST_DEPS in self.state.options and self.category == 'reg': r = SimActionObject(r, reg_deps=frozenset((addr,))) return r,c,m def _find(self, addr, what, max_search=None, max_symbolic_bytes=None, default=None, step=1): raise NotImplementedError() def copy_contents(self, dst, src, size, condition=None, src_memory=None, dst_memory=None, inspect=True, disable_actions=False): """ Copies data within a memory. :param dst: A claripy expression representing the address of the destination :param src: A claripy expression representing the address of the source The following parameters are optional. :param src_memory: Copy data from this SimMemory instead of self :param src_memory: Copy data to this SimMemory instead of self :param size: A claripy expression representing the size of the copy :param condition: A claripy expression representing a condition, if the write should be conditional. If this is determined to be false, the size of the copy will be 0. """ dst = _raw_ast(dst) src = _raw_ast(src) size = _raw_ast(size) condition = _raw_ast(condition) return self._copy_contents(dst, src, size, condition=condition, src_memory=src_memory, dst_memory=dst_memory, inspect=inspect, disable_actions=disable_actions) def _copy_contents(self, dst, src, size, condition=None, src_memory=None, dst_memory=None, inspect=True, disable_actions=False): raise NotImplementedError() from bintrees import AVLTree from .. import sim_options as o from ..state_plugins.sim_action import SimActionData from ..state_plugins.sim_action_object import SimActionObject, _raw_ast from ..errors import SimMemoryError, SimRegionMapError, SimSegfaultError from ..state_plugins.inspect import BP_BEFORE, BP_AFTER
bsd-2-clause
6,487,748,199,254,424,000
43.302617
155
0.600637
false
4.173848
false
false
false
TheAlgorithms/Python
project_euler/problem_070/sol1.py
1
3189
""" Project Euler Problem 70: https://projecteuler.net/problem=70 Euler's Totient function, φ(n) [sometimes called the phi function], is used to determine the number of positive numbers less than or equal to n which are relatively prime to n. For example, as 1, 2, 4, 5, 7, and 8, are all less than nine and relatively prime to nine, φ(9)=6. The number 1 is considered to be relatively prime to every positive number, so φ(1)=1. Interestingly, φ(87109)=79180, and it can be seen that 87109 is a permutation of 79180. Find the value of n, 1 < n < 10^7, for which φ(n) is a permutation of n and the ratio n/φ(n) produces a minimum. ----- This is essentially brute force. Calculate all totients up to 10^7 and find the minimum ratio of n/φ(n) that way. To minimize the ratio, we want to minimize n and maximize φ(n) as much as possible, so we can store the minimum fraction's numerator and denominator and calculate new fractions with each totient to compare against. To avoid dividing by zero, I opt to use cross multiplication. References: Finding totients https://en.wikipedia.org/wiki/Euler's_totient_function#Euler's_product_formula """ from typing import List def get_totients(max_one: int) -> List[int]: """ Calculates a list of totients from 0 to max_one exclusive, using the definition of Euler's product formula. >>> get_totients(5) [0, 1, 1, 2, 2] >>> get_totients(10) [0, 1, 1, 2, 2, 4, 2, 6, 4, 6] """ totients = [0] * max_one for i in range(0, max_one): totients[i] = i for i in range(2, max_one): if totients[i] == i: for j in range(i, max_one, i): totients[j] -= totients[j] // i return totients def has_same_digits(num1: int, num2: int) -> bool: """ Return True if num1 and num2 have the same frequency of every digit, False otherwise. digits[] is a frequency table where the index represents the digit from 0-9, and the element stores the number of appearances. Increment the respective index every time you see the digit in num1, and decrement if in num2. At the end, if the numbers have the same digits, every index must contain 0. >>> has_same_digits(123456789, 987654321) True >>> has_same_digits(123, 12) False >>> has_same_digits(1234566, 123456) False """ digits = [0] * 10 while num1 > 0 and num2 > 0: digits[num1 % 10] += 1 digits[num2 % 10] -= 1 num1 //= 10 num2 //= 10 for digit in digits: if digit != 0: return False return True def solution(max: int = 10000000) -> int: """ Finds the value of n from 1 to max such that n/φ(n) produces a minimum. >>> solution(100) 21 >>> solution(10000) 4435 """ min_numerator = 1 # i min_denominator = 0 # φ(i) totients = get_totients(max + 1) for i in range(2, max + 1): t = totients[i] if i * min_denominator < min_numerator * t and has_same_digits(i, t): min_numerator = i min_denominator = t return min_numerator if __name__ == "__main__": print(f"{solution() = }")
mit
-6,360,014,420,547,202,000
25.714286
78
0.633847
false
3.270576
false
false
false
ukdtom/WebTools.bundle
Contents/Code/jsonExporterV3.py
1
15236
###################################################################################################################### # json Exporter module for WebTools # # Author: dane22, a Plex Community member # ###################################################################################################################### import os import io from consts import DEBUGMODE, JSONTIMESTAMP import datetime import json from shutil import move # Consts used here FILEEXT = '.json' # File ext of export file statusMsg = 'idle' # Response to getStatus # Internal tracker of where we are runningState = 0 # Flag to set if user wants to cancel bAbort = False GET = ['GETSTATUS'] PUT = ['EXPORT'] POST = [] DELETE = [] class jsonExporterV3(object): init_already = False # Make sure init only run once bResultPresent = False # Do we have a result to present # Init of the class @classmethod def init(self): self.MediaChuncks = 40 self.CoreUrl = 'http://127.0.0.1:32400/library/sections/' # Only init once during the lifetime of this if not jsonExporter.init_already: jsonExporter.init_already = True self.populatePrefs() Log.Debug('******* Starting jsonExporter *******') #********** Functions below ****************** # This is the main call @classmethod def EXPORT(self, req, *args): ''' Return the type of the section ''' def getSectionType(section): url = 'http://127.0.0.1:32400/library/sections/' + section + \ '/all?X-Plex-Container-Start=1&X-Plex-Container-Size=0' try: return XML.ElementFromURL(url).xpath('//MediaContainer/@viewGroup')[0] except: return "None" ''' Create a simple entry in the videoDetails tree ''' def makeSimpleEntry(media, videoDetails, el): try: entry = unicode(videoDetails.get(el)) if entry != 'None': media[el] = entry except: pass ''' Create an array based entry, based on the tag attribute ''' def makeArrayEntry(media, videoDetails, el): try: Entries = videoDetails.xpath('//' + el) EntryList = [] for Entry in Entries: try: EntryList.append(unicode(Entry.xpath('@tag')[0])) except: pass media[el] = EntryList except: pass ''' Export the actual .json file, as well as poster and fanart ''' def makeFiles(ratingKey): videoDetails = XML.ElementFromURL( 'http://127.0.0.1:32400/library/metadata/' + ratingKey).xpath('//Video')[0] try: media = {} ''' Now digest the media, and add to the XML ''' # Id # try: # media['guid'] = videoDetails.get('guid') # except: # pass media['About This File'] = 'JSON Export Made with WebTools for Plex' # Simple entries elements = ['guid', 'title', 'originalTitle', 'titleSort', 'type', 'summary', 'duration', 'rating', 'ratingImage', 'contentRating', 'studio', 'year', 'tagline', 'originallyAvailableAt', 'audienceRatingImage', 'audienceRating'] for element in elements: makeSimpleEntry(media, videoDetails, element) arrayElements = ['Genre', 'Collection', 'Director', 'Writer', 'Producer', 'Country', 'Label'] for element in arrayElements: makeArrayEntry(media, videoDetails, element) # Locked fields Locked = [] try: Fields = videoDetails.xpath('//Field') for Field in Fields: try: if Field.xpath('@locked')[0] == '1': Locked.append(unicode(Field.xpath('@name')[0])) except: pass media['Field'] = Locked except: pass # Role aka actor try: Roles = videoDetails.xpath('//Role') orderNo = 1 Actors = [] for Role in Roles: Actor = {} try: Actor['name'] = unicode(Role.xpath('@tag')[0]) except: pass try: Actor['role'] = unicode(Role.xpath('@role')[0]) except: pass try: Actor['order'] = orderNo orderNo += 1 except: pass try: Actor['thumb'] = Role.xpath('@thumb')[0] except: pass Actors.append(Actor) media['Role'] = Actors except Exception, e: Log.Exception('Exception in MakeFiles: ' + str(e)) pass # Let's start by grapping relevant files for this movie fileNames = videoDetails.xpath('//Part') for fileName in fileNames: filename = fileName.xpath('@file')[0] filename = String.Unquote( filename).encode('utf8', 'ignore') # Get name of json file plexJSON = os.path.splitext(filename)[0] + FILEEXT Log.Debug('Name and path to plexJSON file is: ' + plexJSON) try: with io.open(plexJSON, 'w', encoding='utf-8') as outfile: outfile.write( unicode(json.dumps(media, indent=4, sort_keys=True))) except Exception, e: Log.Debug('Exception happend during saving %s. Exception was: %s' % ( plexJSON, str(e))) # Make poster posterUrl = 'http://127.0.0.1:32400' + \ videoDetails.get('thumb') targetFile = os.path.splitext(filename)[0] + '-poster.jpg' response = HTTP.Request(posterUrl) with io.open(targetFile, 'wb') as fo: fo.write(response.content) Log.Debug('Poster saved as %s' % targetFile) # Make fanart posterUrl = 'http://127.0.0.1:32400' + \ videoDetails.get('art') targetFile = os.path.splitext(filename)[0] + '-fanart.jpg' response = HTTP.Request(posterUrl) with io.open(targetFile, 'wb') as fo: fo.write(response.content) Log.Debug('FanArt saved as %s' % targetFile) except Exception, e: Log.Exception( 'Exception happend in generating json file: ' + str(e)) ''' Scan a movie section ''' def scanMovieSection(req, sectionNumber): Log.Debug('Starting scanMovieSection') global AmountOfMediasInDatabase global mediasFromDB global statusMsg global runningState try: # Start by getting the last timestamp for a scanning: if sectionNumber in Dict['jsonExportTimeStamps'].keys(): timeStamp = Dict['jsonExportTimeStamps'][sectionNumber] else: # Setting key for section to epoch start Dict['jsonExportTimeStamps'][sectionNumber] = 0 Dict.Save() timeStamp = 0 # Debug mode? if JSONTIMESTAMP != 0: timeStamp = JSONTIMESTAMP now = int((datetime.datetime.now() - datetime.datetime(1970, 1, 1)).total_seconds()) Log.Debug('Starting scanMovieDb for section %s' % (sectionNumber)) Log.Debug('Only grap medias updated since: ' + datetime.datetime.fromtimestamp( int(timeStamp)).strftime('%Y-%m-%d %H:%M:%S')) runningState = -1 statusMsg = 'Starting to scan database for section %s' % ( sectionNumber) # Start by getting the totals of this section totalSize = XML.ElementFromURL(self.CoreUrl + sectionNumber + '/all?updatedAt>=' + str( timeStamp) + '&X-Plex-Container-Start=1&X-Plex-Container-Size=0').get('totalSize') AmountOfMediasInDatabase = totalSize Log.Debug('Total size of medias are %s' % (totalSize)) if totalSize == '0': # Stamp dict with new timestamp Dict['jsonExportTimeStamps'][sectionNumber] = now Dict.Save() Log.Debug('Nothing to process...Exiting') return iStart = 0 iCount = 0 statusMsg = 'Scanning database item %s of %s : Working' % ( iCount, totalSize) # So let's walk the library while True: # Grap a chunk from the server videos = XML.ElementFromURL(self.CoreUrl + sectionNumber + '/all?updatedAt>=' + str( timeStamp) + '&X-Plex-Container-Start=' + str(iStart) + '&X-Plex-Container-Size=' + str(self.MediaChuncks)).xpath('//Video') # Walk the chunk for video in videos: if bAbort: raise ValueError('Aborted') iCount += 1 makeFiles(video.get('ratingKey')) statusMsg = 'Scanning database: item %s of %s : Working' % ( iCount, totalSize) iStart += self.MediaChuncks if len(videos) == 0: statusMsg = 'Scanning database: %s : Done' % ( totalSize) Log.Debug('***** Done scanning the database *****') runningState = 1 break # Stamp dict with new timestamp Dict['jsonExportTimeStamps'][sectionNumber] = now Dict.Save() return except Exception, e: Log.Exception('Fatal error in scanMovieDb: ' + str(e)) runningState = 99 # End scanMovieDb def scanShowSection(req, sectionNumber): print 'Ged1 scanShowSection' # ********** Main function ************** Log.Debug('json export called') try: section = req.get_argument('section', '_export_missing_') if section == '_export_missing_': req.clear() req.set_status(412) req.finish( "<html><body>Missing section parameter</body></html>") if getSectionType(section) == 'movie': scanMovieSection(req, section) elif getSectionType(section) == 'show': scanShowSection(req, section) else: Log.Debug('Unknown section type for section:' + section + ' type: ' + getSectionType(section)) req.clear() req.set_status(404) req.finish("Unknown sectiontype or sectiion") except Exception, e: Log.Exception('Exception in json export' + str(e)) # Return current status @classmethod def GETSTATUS(self, req, *args): global runningState req.clear() req.set_status(200) if runningState == 0: req.finish('Idle') else: req.finish(statusMsg) ''' Get the relevant function and call it with optinal params ''' @classmethod def getFunction(self, metode, req, *args): self.init() params = req.request.uri[8:].upper().split('/') self.function = None if metode == 'get': for param in params: if param in GET: self.function = param break else: pass elif metode == 'post': for param in params: if param in POST: self.function = param break else: pass elif metode == 'put': for param in params: if param in PUT: self.function = param break else: pass elif metode == 'delete': for param in params: if param in DELETE: self.function = param break else: pass if self.function == None: Log.Debug('Function to call is None') req.clear() req.set_status(404) req.finish('Unknown function call') else: # Check for optional argument paramsStr = req.request.uri[req.request.uri.upper().find( self.function) + len(self.function):] # remove starting and ending slash if paramsStr.endswith('/'): paramsStr = paramsStr[:-1] if paramsStr.startswith('/'): paramsStr = paramsStr[1:] # Turn into a list params = paramsStr.split('/') # If empty list, turn into None if params[0] == '': params = None try: Log.Debug('Function to call is: ' + self.function + ' with params: ' + str(params)) if params == None: getattr(self, self.function)(req) else: getattr(self, self.function)(req, params) except Exception, e: Log.Exception('Exception in process of: ' + str(e)) ################### Internal functions ############################# ''' Populate the defaults, if not already there ''' @classmethod def populatePrefs(self): if Dict['jsonExportTimeStamps'] == None: Dict['jsonExportTimeStamps'] = {} Dict.Save() ############################################################################################################## jsonExporter = jsonExporterV3()
mpl-2.0
7,612,324,967,193,343,000
40.857143
148
0.454319
false
4.979085
false
false
false
nanjj/softlayer-python
SoftLayer/CLI/file/detail.py
1
5131
"""Display details for a specified volume.""" # :license: MIT, see LICENSE for more details. import click import SoftLayer from SoftLayer.CLI import environment from SoftLayer.CLI import formatting from SoftLayer import utils @click.command() @click.argument('volume_id') @environment.pass_env def cli(env, volume_id): """Display details for a specified volume.""" file_manager = SoftLayer.FileStorageManager(env.client) file_volume = file_manager.get_file_volume_details(volume_id) file_volume = utils.NestedDict(file_volume) table = formatting.KeyValueTable(['Name', 'Value']) table.align['Name'] = 'r' table.align['Value'] = 'l' storage_type = file_volume['storageType']['keyName'].split('_').pop(0) table.add_row(['ID', file_volume['id']]) table.add_row(['Username', file_volume['username']]) table.add_row(['Type', storage_type]) table.add_row(['Capacity (GB)', "%iGB" % file_volume['capacityGb']]) used_space = int(file_volume['bytesUsed'])\ if file_volume['bytesUsed'] else 0 if used_space < (1 << 10): table.add_row(['Used Space', "%dB" % used_space]) elif used_space < (1 << 20): table.add_row(['Used Space', "%dKB" % (used_space / (1 << 10))]) elif used_space < (1 << 30): table.add_row(['Used Space', "%dMB" % (used_space / (1 << 20))]) else: table.add_row(['Used Space', "%dGB" % (used_space / (1 << 30))]) if file_volume.get('provisionedIops'): table.add_row(['IOPs', int(file_volume['provisionedIops'])]) if file_volume.get('storageTierLevel'): table.add_row([ 'Endurance Tier', file_volume['storageTierLevel'], ]) table.add_row([ 'Data Center', file_volume['serviceResource']['datacenter']['name'], ]) table.add_row([ 'Target IP', file_volume['serviceResourceBackendIpAddress'], ]) if file_volume['fileNetworkMountAddress']: table.add_row([ 'Mount Address', file_volume['fileNetworkMountAddress'], ]) if file_volume['snapshotCapacityGb']: table.add_row([ 'Snapshot Capacity (GB)', file_volume['snapshotCapacityGb'], ]) if 'snapshotSizeBytes' in file_volume['parentVolume']: table.add_row([ 'Snapshot Used (Bytes)', file_volume['parentVolume']['snapshotSizeBytes'], ]) table.add_row(['# of Active Transactions', "%i" % file_volume['activeTransactionCount']]) if file_volume['activeTransactions']: for trans in file_volume['activeTransactions']: if 'transactionStatus' in trans and 'friendlyName' in trans['transactionStatus']: table.add_row(['Ongoing Transaction', trans['transactionStatus']['friendlyName']]) table.add_row(['Replicant Count', "%u" % file_volume.get('replicationPartnerCount', 0)]) if file_volume['replicationPartnerCount'] > 0: # This if/else temporarily handles a bug in which the SL API # returns a string or object for 'replicationStatus'; it seems that # the type is string for File volumes and object for Block volumes if 'message' in file_volume['replicationStatus']: table.add_row(['Replication Status', "%s" % file_volume['replicationStatus']['message']]) else: table.add_row(['Replication Status', "%s" % file_volume['replicationStatus']]) replicant_list = [] for replicant in file_volume['replicationPartners']: replicant_table = formatting.Table(['Replicant ID', replicant['id']]) replicant_table.add_row([ 'Volume Name', utils.lookup(replicant, 'username')]) replicant_table.add_row([ 'Target IP', utils.lookup(replicant, 'serviceResourceBackendIpAddress')]) replicant_table.add_row([ 'Data Center', utils.lookup(replicant, 'serviceResource', 'datacenter', 'name')]) replicant_table.add_row([ 'Schedule', utils.lookup(replicant, 'replicationSchedule', 'type', 'keyname')]) replicant_list.append(replicant_table) table.add_row(['Replicant Volumes', replicant_list]) if file_volume.get('originalVolumeSize'): original_volume_info = formatting.Table(['Property', 'Value']) original_volume_info.add_row(['Original Volume Size', file_volume['originalVolumeSize']]) if file_volume.get('originalVolumeName'): original_volume_info.add_row(['Original Volume Name', file_volume['originalVolumeName']]) if file_volume.get('originalSnapshotName'): original_volume_info.add_row(['Original Snapshot Name', file_volume['originalSnapshotName']]) table.add_row(['Original Volume Properties', original_volume_info]) env.fout(table)
mit
-6,818,909,302,590,211,000
39.401575
105
0.592477
false
4.101519
false
false
false
wood-galaxy/FreeCAD
src/Mod/Path/PathCommands.py
3
3451
# -*- coding: utf-8 -*- # *************************************************************************** # * * # * Copyright (c) 2016 sliptonic <shopinthewoods@gmail.com> * # * * # * This program is free software; you can redistribute it and/or modify * # * it under the terms of the GNU Lesser General Public License (LGPL) * # * as published by the Free Software Foundation; either version 2 of * # * the License, or (at your option) any later version. * # * for detail see the LICENCE text file. * # * * # * This program is distributed in the hope that it will be useful, * # * but WITHOUT ANY WARRANTY; without even the implied warranty of * # * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * # * GNU Library General Public License for more details. * # * * # * You should have received a copy of the GNU Library General Public * # * License along with this program; if not, write to the Free Software * # * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * # * USA * # * * # *************************************************************************** import FreeCAD from PathScripts.PathUtils import loopdetect if FreeCAD.GuiUp: import FreeCADGui from PySide import QtCore from DraftTools import translate else: def translate(ctxt,txt): return txt __title__="FreeCAD Path Commands" __author__ = "sliptonic" __url__ = "http://www.freecadweb.org" class _CommandSelectLoop: "the Arch RemoveShape command definition" def GetResources(self): return {'Pixmap' : 'Path-SelectLoop', 'MenuText': QtCore.QT_TRANSLATE_NOOP("Path_SelectLoop","Finish Selecting Loop"), 'ToolTip': QtCore.QT_TRANSLATE_NOOP("Path_SelectLoop","Complete loop selection from two edges")} def IsActive(self): if bool(FreeCADGui.Selection.getSelection()) is False: return False try: sel = FreeCADGui.Selection.getSelectionEx()[0] sub1 = sel.SubElementNames[0] if sub1[0:4] != 'Edge': return False sub2 = sel.SubElementNames[1] if sub2[0:4] != 'Edge': return False return True except: return False def Activated(self): sel = FreeCADGui.Selection.getSelectionEx()[0] obj = sel.Object edge1 = sel.SubObjects[0] edge2 = sel.SubObjects[1] loopwire = loopdetect(obj, edge1, edge2) if loopwire is not None: FreeCADGui.Selection.clearSelection() elist = obj.Shape.Edges for e in elist: for i in loopwire.Edges: if e.hashCode() == i.hashCode(): FreeCADGui.Selection.addSelection(obj, "Edge"+str(elist.index(e)+1)) if FreeCAD.GuiUp: FreeCADGui.addCommand('Path_SelectLoop',_CommandSelectLoop())
lgpl-2.1
-4,393,383,547,174,822,400
42.1375
112
0.496957
false
4.570861
false
false
false
disenone/zsync
test/zsync_client.py
1
2617
# -*- coding: utf-8 -*- import zmq import os import time from threading import Thread from zhelpers import socket_set_hwm, zpipe from zsync_server import CHUNK_SIZE, PIPELINE, ports, ip dst_path = 'sync_files_dst' def client_thread(ctx, port): dealer = ctx.socket(zmq.DEALER) socket_set_hwm(dealer, PIPELINE) tcp = 'tcp://%s:%d' % (ip, port) dealer.connect(tcp) print 'connecting %s \n' % tcp credit = PIPELINE # Up to PIPELINE chunks in transit total = 0 # Total bytes received chunks = 0 # Total chunks received offset = 0 # Offset of next chunk request dealer.send_multipart([b'fetch']) try: fname = dealer.recv() except zmq.ZMQError as e: if e.errno == zmq.ETERM: return # shutting down, quit else: raise outf = open(os.path.join(dst_path, fname), 'w') print 'fetching %s \n' % fname recvd = {} while True: while credit: # ask for next chunk dealer.send_multipart([ b"fetch", b"%i" % offset, b"%i" % CHUNK_SIZE, ]) offset += CHUNK_SIZE credit -= 1 try: msg = dealer.recv_multipart() except zmq.ZMQError as e: if e.errno == zmq.ETERM: return # shutting down, quit else: raise offset_str, chunk = msg chunks += 1 credit += 1 roffset = int(offset_str) if total != roffset: recvd[roffset] = chunk print 'total %d save offset %d' % (total, roffset) else: outf.write(chunk) last_size = len(chunk) total += last_size for roff in sorted(recvd.keys()): if roff == total: chunk = recvd.pop(roff) outf.write(chunk) last_size = len(chunk) total += last_size else: break if last_size < CHUNK_SIZE: break # Last chunk received; exit outf.close() dealer.send_multipart([b'close', b'0', b'0']) print ("%i chunks received, %i bytes" % (chunks, total)) return if __name__ == '__main__': begint = time.time() ctx = zmq.Context() clients = [Thread(target=client_thread, args=(ctx, port,)) for port in ports] [client.start() for client in clients] [client.join() for client in clients] endt = time.time() print 'finish: %ss' % (endt - begint)
mit
-1,719,389,939,645,223,000
25.444444
81
0.512419
false
3.787265
false
false
false
peter765/pineapple
plugins/base/Help.py
1
3564
from util import Events class Plugin(object): def __init__(self, pm): self.pm = pm self.name = "Help" @staticmethod def register_events(): return [Events.Command("help", desc="Usage: help [module]|all, shows help text for a plugin, or a list of " "plugins if no plugin is specified."), Events.Command("hello"), Events.Command("info")] async def handle_command(self, message_object, command, args): if command == "help": if "all" in args[1]: await self.all_help(message_object) elif args[1] is not "": await self.show_help(message_object, args[1].lower()) else: await self.show_help_assigned(message_object) if command == "info": await self.info(message_object) if command == "hello": await self.hello(message_object) async def all_help(self, message_object): hstr = "Complete Command List\n" for name, commands in self.pm.comlist.items(): if len(commands) > 0: hstr += "\n**{0}**\n".format(name[:-3]) for c, d in commands: if d is not "": hstr += "`" + self.pm.botPreferences.commandPrefix + c + "`: \n_" + d + "_\n" else: hstr += "`" + self.pm.botPreferences.commandPrefix + c + "`\n" # Split text into pieces of 1000 chars help_strings = list(map(''.join, zip(*[iter(hstr)] * 1000))) for string in help_strings: await self.pm.client.send_message(message_object.author, string) if not message_object.channel.is_private: await self.pm.client.delete_message(message_object) async def info(self, message_object): await self.pm.clientWrap.send_message(self.name, message_object.channel, '**Pineapple**\nSource code available at: https://github.com/Dynista/pineapple') async def hello(self, message_object): msg = 'Hello {0.author.mention}'.format(message_object) await self.pm.clientWrap.send_message(self.name, message_object.channel, msg) async def show_help(self, message_object, args): try: hstr = "**{0}**:\n".format(args) for c, d in self.pm.comlist[args + ".py"]: hstr = hstr + "`" + self.pm.botPreferences.commandPrefix + c + "`: " + d + "\n" await self.pm.clientWrap.send_message(self.name, message_object.author, hstr) except KeyError: await self.pm.clientWrap.send_message(self.name, message_object.author, ":exclamation: That\'s not a valid plugin name") if not message_object.channel.is_private: await self.pm.client.delete_message(message_object) async def show_help_assigned(self, message_object): x = "Bot Help\n```" for name, commands in self.pm.comlist.items(): if len(commands) > 0: x = x + name[:-3] + " " x += "```\n`" + self.pm.botPreferences.commandPrefix + "help [help_topic]` to evoke a help topic.\n`" + \ self.pm.botPreferences.commandPrefix + "help all` for all commands." await self.pm.clientWrap.send_message(self.name, message_object.author, x) if not message_object.channel.is_private: await self.pm.client.delete_message(message_object)
mit
2,830,629,725,129,842,000
44.692308
126
0.5578
false
3.89083
false
false
false
georgemarselis/homeworkdb
getDisgenetData.py
1
1991
#!/usr/bin/env python3.4 import sys import urllib.request, urllib.error, urllib.parse import pandas import numpy import csv from clint.textui import colored # c1 (diseaseId, name, hpoName, STY, MESH, diseaseClassName, doName, type, OMIM ), query=""" DEFINE c0='/data/gene_disease_summary', c1='/data/diseases', c2='/data/genes', c3='/data/gene_to_associated_diseases', c4='/data/sources' ON 'http://www.disgenet.org/web/DisGeNET' SELECT c1 (diseaseId, OMIM ), c2 (symbol, geneId, uniprotId, description, pantherName ), c0 (score, Npmids, Nsnps ), c3 (Ndiseases) FROM c0 WHERE ( c1 = 'C0030567' AND c4 = 'ALL' AND c0.score > '0.25' ) ORDER BY c0.score DESC""" binary_data = query.encode("utf-8") req = urllib.request.Request("http://www.disgenet.org/oql") res = urllib.request.urlopen(req, binary_data) csvresults = res.read().decode( 'utf-8' ) print( colored.green( csvresults ) ) disgenetDataFile = 'disgenet/disgenet_data.tsv' with open( disgenetDataFile, 'w' ) as file: for row in csvresults: file.write( row ) ## disgenet ########################################### disgenetDataFile = 'disgenet/disgenet_data.tsv' disgenetFieldNames = [ 'c1.diseaseId', 'c1.OMIM', 'c2.symbol', 'c2.geneId', 'c2.uniprotId', 'c2.description', 'c2.pantherName', 'c0.score', 'c0.Npmids', 'c0.Nsnps', 'c3.Ndiseases' ] restkey = 'unknownkey'; restval = 'uknownvalue'; dialect = 'excel-tab'; # read payload ########################################### disgenetCsvfile = open( disgenetDataFile ) disgenetReader = csv.DictReader( disgenetCsvfile, disgenetFieldNames, restkey, restval, dialect ); array = [] kot = 0 # magic to skip the first header row for row in disgenetReader: if kot == 0 : kot = 1 continue if row['c2.symbol'] not in array: array.append( row['c2.symbol'] ) print( "Array of genes to be writen to disk: " + colored.yellow( array ) ) listOfGenes = 'listOfGenes.tsv' with open( listOfGenes, 'w' ) as file: file.write( '\n'.join( array ) )
gpl-3.0
-370,553,118,357,286,200
24.525641
181
0.663988
false
2.510719
false
false
false
NationalSecurityAgency/ghidra
GhidraBuild/IDAPro/Python/7xx/plugins/xml_exporter.py
1
2785
## ### # IP: GHIDRA # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ## #--------------------------------------------------------------------- # xmlexp.py - IDA XML Exporter plugin #--------------------------------------------------------------------- """ Plugin for IDA which exports a XML PROGRAM document file from a database. This file must be placed in the IDA plugins directory. The file idaxml.py must be placed in the IDA python directory. """ import ida_auto import ida_idaapi import ida_kernwin import idaxml import idc import sys class XmlExporterPlugin(ida_idaapi.plugin_t): """ XML Exporter plugin class """ flags = 0 comment = "Export database as XML file" help = "Export database as XML <PROGRAM> document" wanted_name = "XML Exporter" wanted_hotkey = "Ctrl-Shift-x" def init(self): """ init function for XML Exporter plugin. Returns: Constant PLUGIN_OK if this IDA version supports the plugin, else returns PLUGIN_SKIP if this IDA is older than the supported baseline version. """ if idaxml.is_ida_version_supported(): return ida_idaapi.PLUGIN_OK else: return ida_idaapi.PLUGIN_SKIP def run(self, arg): """ run function for XML Exporter plugin. Args: arg: Integer, non-zero value enables auto-run feature for IDA batch (no gui) processing mode. Default is 0. """ st = idc.set_ida_state(idc.IDA_STATUS_WORK) xml = idaxml.XmlExporter(arg) try: try: xml.export_xml() except idaxml.Cancelled: ida_kernwin.hide_wait_box() msg = "XML Export cancelled!" print "\n" + msg idc.warning(msg) except: ida_kernwin.hide_wait_box() msg = "***** Exception occurred: XML Exporter failed! *****" print "\n" + msg + "\n", sys.exc_type, sys.exc_value idc.warning(msg) finally: xml.cleanup() ida_auto.set_ida_state(st) def term(self): pass def PLUGIN_ENTRY(): return XmlExporterPlugin()
apache-2.0
-2,619,736,467,100,112,000
29.271739
76
0.571275
false
4.053857
false
false
false
elin-moco/metrics
metrics/etl/tools/pd_transform.py
1
1526
# -*- coding: utf-8 -*- import pandas as pd import numpy as np import re from urlparse import urlparse fx_path_pattern = re.compile('(/firefox/)([0-9a-z\.]+)/(whatsnew|firstrun|releasenotes)') blog_path_pattern = re.compile('(/posts/[0-9]+)(/.*)') def actual_path(url): #print url path = urlparse(url).path while path.endswith('/'): path = path[:-1] fxPathMatch = fx_path_pattern.search(path) if fxPathMatch: path = fxPathMatch.group(1) + fxPathMatch.group(3) blogPathMatch = blog_path_pattern.search(path) if blogPathMatch: path = blogPathMatch.group(1) print path if path == '': path = '/' return path def main(argv = []): df = pd.read_hdf('mocotw.h5', 'fx_download') actualPathSeries = df['previousPagePath'].apply(actual_path) print actualPathSeries df['actualPagePath'] = actualPathSeries df.to_hdf('mocotw.h5', 'fx_download') df_sum = df[['actualPagePath', 'pageviews']].groupby('actualPagePath').sum().sort('pageviews', ascending=False) print df_sum df_sum.to_hdf('mocotw.h5', 'fx_download_sum') df_stack = df.groupby(['actualPagePath', 'date']).sum() df_stack = df_stack.reset_index() df_stack = df_stack[df_stack.actualPagePath.isin(df_sum[:10].index)] df_stack = df_stack.pivot(index='date', columns='actualPagePath', values='pageviews') df_stack = df_stack.fillna(0) df_stack = df_stack.reset_index() print df_stack df_stack.to_hdf('mocotw.h5', 'fx_download_stack')
bsd-3-clause
526,834,806,051,038,400
28.346154
115
0.644168
false
3.120654
false
false
false
SystemRage/py-kms
py-kms/pykms_RpcBind.py
1
7955
#!/usr/bin/env python3 import logging import binascii import uuid import pykms_RpcBase from pykms_Dcerpc import MSRPCHeader, MSRPCBindAck from pykms_Structure import Structure from pykms_Format import justify, byterize, enco, deco, pretty_printer #-------------------------------------------------------------------------------------------------------------------------------------------------------- loggersrv = logging.getLogger('logsrv') uuidNDR32 = uuid.UUID('8a885d04-1ceb-11c9-9fe8-08002b104860') uuidNDR64 = uuid.UUID('71710533-beba-4937-8319-b5dbef9ccc36') uuidTime = uuid.UUID('6cb71c2c-9812-4540-0300-000000000000') uuidEmpty = uuid.UUID('00000000-0000-0000-0000-000000000000') class CtxItem(Structure): structure = ( ('ContextID', '<H=0'), ('TransItems', 'B=0'), ('Pad', 'B=0'), ('AbstractSyntaxUUID', '16s=""'), ('AbstractSyntaxVer', '<I=0'), ('TransferSyntaxUUID', '16s=""'), ('TransferSyntaxVer', '<I=0'), ) def ts(self): return uuid.UUID(bytes_le = enco(self['TransferSyntaxUUID'], 'latin-1')) class CtxItemResult(Structure): structure = ( ('Result', '<H=0'), ('Reason', '<H=0'), ('TransferSyntaxUUID', '16s=""'), ('TransferSyntaxVer', '<I=0'), ) def __init__(self, result, reason, tsUUID, tsVer): Structure.__init__(self) self['Result'] = result self['Reason'] = reason self['TransferSyntaxUUID'] = tsUUID.bytes_le self['TransferSyntaxVer'] = tsVer class MSRPCBind(Structure): class CtxItemArray: def __init__(self, data): self.data = data def __len__(self): return len(self.data) def __str__(self): return self.data def __getitem__(self, i): return CtxItem(self.data[(len(CtxItem()) * i):]) _CTX_ITEM_LEN = len(CtxItem()) structure = ( ('max_tfrag', '<H=4280'), ('max_rfrag', '<H=4280'), ('assoc_group', '<L=0'), ('ctx_num', 'B=0'), ('Reserved', 'B=0'), ('Reserved2', '<H=0'), ('_ctx_items', '_-ctx_items', 'self["ctx_num"]*self._CTX_ITEM_LEN'), ('ctx_items', ':', CtxItemArray), ) class handler(pykms_RpcBase.rpcBase): def parseRequest(self): request = MSRPCHeader(self.data) pretty_printer(num_text = 3, where = "srv") request = byterize(request) loggersrv.debug("RPC Bind Request Bytes: \n%s\n" % justify(deco(binascii.b2a_hex(self.data), 'utf-8'))) loggersrv.debug("RPC Bind Request: \n%s\n%s\n" % (justify(request.dump(print_to_stdout = False)), justify(MSRPCBind(request['pduData']).dump(print_to_stdout = False)))) return request def generateResponse(self, request): response = MSRPCBindAck() bind = MSRPCBind(request['pduData']) response['ver_major'] = request['ver_major'] response['ver_minor'] = request['ver_minor'] response['type'] = self.packetType['bindAck'] response['flags'] = self.packetFlags['firstFrag'] | self.packetFlags['lastFrag'] | self.packetFlags['multiplex'] response['representation'] = request['representation'] response['frag_len'] = 36 + bind['ctx_num'] * 24 response['auth_len'] = request['auth_len'] response['call_id'] = request['call_id'] response['max_tfrag'] = bind['max_tfrag'] response['max_rfrag'] = bind['max_rfrag'] response['assoc_group'] = 0x1063bf3f port = str(self.srv_config['port']) response['SecondaryAddrLen'] = len(port) + 1 response['SecondaryAddr'] = port pad = (4 - ((response["SecondaryAddrLen"] + MSRPCBindAck._SIZE) % 4)) % 4 response['Pad'] = '\0' * pad response['ctx_num'] = bind['ctx_num'] preparedResponses = {} preparedResponses[uuidNDR32] = CtxItemResult(0, 0, uuidNDR32, 2) preparedResponses[uuidNDR64] = CtxItemResult(2, 2, uuidEmpty, 0) preparedResponses[uuidTime] = CtxItemResult(3, 3, uuidEmpty, 0) response['ctx_items'] = '' for i in range (0, bind['ctx_num']): ts_uuid = bind['ctx_items'][i].ts() resp = preparedResponses[ts_uuid] response['ctx_items'] += str(resp) pretty_printer(num_text = 4, where = "srv") response = byterize(response) loggersrv.debug("RPC Bind Response: \n%s\n" % justify(response.dump(print_to_stdout = False))) loggersrv.debug("RPC Bind Response Bytes: \n%s\n" % justify(deco(binascii.b2a_hex(enco(str(response), 'latin-1')), 'utf-8'))) return response def generateRequest(self): firstCtxItem = CtxItem() firstCtxItem['ContextID'] = 0 firstCtxItem['TransItems'] = 1 firstCtxItem['Pad'] = 0 firstCtxItem['AbstractSyntaxUUID'] = uuid.UUID('51c82175-844e-4750-b0d8-ec255555bc06').bytes_le firstCtxItem['AbstractSyntaxVer'] = 1 firstCtxItem['TransferSyntaxUUID'] = uuidNDR32.bytes_le firstCtxItem['TransferSyntaxVer'] = 2 secondCtxItem = CtxItem() secondCtxItem['ContextID'] = 1 secondCtxItem['TransItems'] = 1 secondCtxItem['Pad'] = 0 secondCtxItem['AbstractSyntaxUUID'] = uuid.UUID('51c82175-844e-4750-b0d8-ec255555bc06').bytes_le secondCtxItem['AbstractSyntaxVer'] = 1 secondCtxItem['TransferSyntaxUUID'] = uuidTime.bytes_le secondCtxItem['TransferSyntaxVer'] = 1 bind = MSRPCBind() bind['max_tfrag'] = 5840 bind['max_rfrag'] = 5840 bind['assoc_group'] = 0 bind['ctx_num'] = 2 bind['ctx_items'] = str(bind.CtxItemArray(str(firstCtxItem) + str(secondCtxItem))) request = MSRPCHeader() request['ver_major'] = 5 request['ver_minor'] = 0 request['type'] = self.packetType['bindReq'] request['flags'] = self.packetFlags['firstFrag'] | self.packetFlags['lastFrag'] | self.packetFlags['multiplex'] request['call_id'] = self.srv_config['call_id'] request['pduData'] = str(bind) pretty_printer(num_text = 0, where = "clt") bind = byterize(bind) request = byterize(request) loggersrv.debug("RPC Bind Request: \n%s\n%s\n" % (justify(request.dump(print_to_stdout = False)), justify(MSRPCBind(request['pduData']).dump(print_to_stdout = False)))) loggersrv.debug("RPC Bind Request Bytes: \n%s\n" % justify(deco(binascii.b2a_hex(enco(str(request), 'latin-1')), 'utf-8'))) return request def parseResponse(self): return response
unlicense
2,967,289,741,253,012,000
44.457143
153
0.488121
false
4.005539
false
false
false
vadim-ivlev/STUDY
coding/drawtree.py
1
1325
# VISUALIZATION ---------------------- import networkx as nx from networkx.drawing.nx_agraph import write_dot, graphviz_layout import matplotlib.pyplot as plt def draw_graph(G): plt.rcParams["figure.figsize"] = [10., 5.] pos = graphviz_layout(G, prog='dot') node_labels = nx.get_node_attributes(G, 'name') nx.draw(G, pos, with_labels=True, labels=node_labels, width=2, node_size=1000, node_color="orange", alpha=1.0) lbls = nx.get_edge_attributes(G, 'label') nx.draw_networkx_edge_labels(G, pos, edge_labels=lbls) # nx.draw_networkx_nodes(G,pos,node_size=2000, nodelist=['x']) # nx.draw_networkx_edges(G, pos, alpha=0.9, width=6, edge_color="orange", edgelist=[(1, 'Petya')]) # plt.figure(1) plt.show() import uuid # import random def build_graph(g, parent_g_node, t, edge_label=None): # global count if not t: return node = next(uid) # str(uuid.uuid4()) #random.random() g.add_node(node, name=t.get_value()) if parent_g_node: g.add_edge(parent_g_node, node, label=edge_label) left = t.get_left() right = t.get_right() if left: build_graph(g, node, left, 'L') if right: build_graph(g, node, right, 'R') return node G = nx.DiGraph() root = build_graph(G, None, t) draw_graph(G)
mit
235,742,579,492,465,060
23.537037
102
0.613585
false
2.88671
false
false
false
ojarva/home-info-display
display_control_consumer/run.py
1
1488
from setproctitle import setproctitle import json import os import redis import subprocess import time class DisplayControlConsumer(object): def __init__(self, redis_host, redis_port): self.redis_instance = redis.StrictRedis(host=redis_host, port=redis_port) self.env = {"DISPLAY": ":0"} def get_brightness(self): p = subprocess.Popen(["xrandr", "--verbose"], env=self.env, stdout=subprocess.PIPE) (stdout, _) = p.communicate() for line in stdout.split("\n"): if "Brightness" in line: return float(line.strip().split(": ")[1]) def set_brightness(self, brightness): p = subprocess.Popen(["xrandr", "--q1", "--output", "HDMI-0", "--brightness", brightness], env=self.env) p.wait() self.redis_instance.publish("home:broadcast:generic", json.dumps({"key": "display_brightness", "content": brightness})) def run(self): pubsub = self.redis_instance.pubsub(ignore_subscribe_messages=True) pubsub.subscribe("display-control-set-brightness") for _ in pubsub.listen(): # Only poll redis after triggered with pubsub self.set_brightness(item["data"]) def main(): setproctitle("display-control-consumer: run") redis_host = os.environ["REDIS_HOST"] redis_port = os.environ["REDIS_PORT"] dcc = DisplayControlConsumer(redis_host, redis_port) dcc.run() if __name__ == '__main__': main()
bsd-3-clause
4,833,940,609,596,993,000
32.818182
127
0.62164
false
3.78626
false
false
false
3dfxsoftware/cbss-addons
account_aged_partner_balance_vw/wizard/wizard_open_move_line.py
1
4342
# -*- encoding: utf-8 -*- ########################################################################### # Module Writen to OpenERP, Open Source Management Solution # # Copyright (c) 2011 Vauxoo - http://www.vauxoo.com/ # All Rights Reserved. # info Vauxoo (info@vauxoo.com) ############################################################################ # Coded by: moylop260 (moylop260@vauxoo.com) ############################################################################ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import pooler import wizard class wizard_open_move_line(wizard.interface): def _open_window(self, cr, uid, data, context={}): if not context: context = {} mod_obj = pooler.get_pool(cr.dbname).get('ir.model.data') act_obj = pooler.get_pool(cr.dbname).get('ir.actions.act_window') aged_partner_balance_vw_obj = pooler.get_pool( cr.dbname).get('account.aged.partner.balance.vw') partner_ids = [aged_partner_balance_vw.partner_id and aged_partner_balance_vw.partner_id.id or False for aged_partner_balance_vw in aged_partner_balance_vw_obj.browse( cr, uid, data['ids'], context=context)] # result = mod_obj._get_id(cr, uid, 'account', # 'action_account_moves_all_a') result = mod_obj._get_id(cr, uid, 'account', 'action_move_line_select') id = mod_obj.read(cr, uid, [result], ['res_id'])[0]['res_id'] result = act_obj.read(cr, uid, [id])[0] # result['context'] = {'partner_id': partner_ids} # result['domain'] = [('partner_id','in',partner_ids), # ('account_id.type','=','receivable')] where_query = [] days_due_start = context.get('days_due_start', False) if not days_due_start is False: where_query.append('days_due >= %d' % (days_due_start)) days_due_end = context.get('days_due_end', False) if not days_due_end is False: where_query.append('days_due <= %d' % (days_due_end)) # where_query_str = (where_query and ' WHERE ' or '') + ' AND '.join( # where_query ) where_query_str = ( where_query and ' AND ' or '') + ' AND '.join(where_query) query = """SELECT l.id as id--, l.partner_id, l.company_id FROM account_move_line l INNER JOIN ( SELECT id, EXTRACT(DAY FROM (now() - COALESCE(lt.date_maturity,lt.date))) AS days_due FROM account_move_line lt ) l2 ON l2.id = l.id INNER JOIN account_account ON account_account.id = l.account_id INNER JOIN res_company ON account_account.company_id = res_company.id INNER JOIN account_move ON account_move.id = l.move_id WHERE account_account.active AND (account_account.type IN ('receivable')) AND (l.reconcile_id IS NULL) AND account_move.state = 'posted' AND l.reconcile_id is null --and l.currency_id is null """+where_query_str cr.execute(query) res = cr.fetchall() move_ids = [r[0] for r in res] result['domain'] = [('partner_id', 'in', partner_ids), ( 'id', 'in', move_ids)] return result states = { 'init': { 'actions': [], 'result': {'type': 'action', 'action': _open_window, 'state': 'end'} } } wizard_open_move_line('wizard.open.move.line') # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
gpl-2.0
8,739,498,346,271,963,000
45.191489
175
0.546292
false
3.756055
false
false
false
firebase/firebase-admin-python
tests/test_http_client.py
1
5782
# Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for firebase_admin._http_client.""" import pytest from pytest_localserver import http import requests from firebase_admin import _http_client from tests import testutils _TEST_URL = 'http://firebase.test.url/' def test_http_client_default_session(): client = _http_client.HttpClient() assert client.session is not None assert client.base_url == '' recorder = _instrument(client, 'body') resp = client.request('get', _TEST_URL) assert resp.status_code == 200 assert resp.text == 'body' assert len(recorder) == 1 assert recorder[0].method == 'GET' assert recorder[0].url == _TEST_URL def test_http_client_custom_session(): session = requests.Session() client = _http_client.HttpClient(session=session) assert client.session is session assert client.base_url == '' recorder = _instrument(client, 'body') resp = client.request('get', _TEST_URL) assert resp.status_code == 200 assert resp.text == 'body' assert len(recorder) == 1 assert recorder[0].method == 'GET' assert recorder[0].url == _TEST_URL def test_base_url(): client = _http_client.HttpClient(base_url=_TEST_URL) assert client.session is not None assert client.base_url == _TEST_URL recorder = _instrument(client, 'body') resp = client.request('get', 'foo') assert resp.status_code == 200 assert resp.text == 'body' assert len(recorder) == 1 assert recorder[0].method == 'GET' assert recorder[0].url == _TEST_URL + 'foo' def test_credential(): client = _http_client.HttpClient( credential=testutils.MockGoogleCredential()) assert client.session is not None recorder = _instrument(client, 'body') resp = client.request('get', _TEST_URL) assert resp.status_code == 200 assert resp.text == 'body' assert len(recorder) == 1 assert recorder[0].method == 'GET' assert recorder[0].url == _TEST_URL assert recorder[0].headers['Authorization'] == 'Bearer mock-token' @pytest.mark.parametrize('options, timeout', [ ({}, _http_client.DEFAULT_TIMEOUT_SECONDS), ({'timeout': 7}, 7), ({'timeout': 0}, 0), ({'timeout': None}, None), ]) def test_timeout(options, timeout): client = _http_client.HttpClient(**options) assert client.timeout == timeout recorder = _instrument(client, 'body') client.request('get', _TEST_URL) assert len(recorder) == 1 if timeout is None: assert recorder[0]._extra_kwargs['timeout'] is None else: assert recorder[0]._extra_kwargs['timeout'] == pytest.approx(timeout, 0.001) def _instrument(client, payload, status=200): recorder = [] adapter = testutils.MockAdapter(payload, status, recorder) client.session.mount(_TEST_URL, adapter) return recorder class TestHttpRetry: """Unit tests for the default HTTP retry configuration.""" ENTITY_ENCLOSING_METHODS = ['post', 'put', 'patch'] ALL_METHODS = ENTITY_ENCLOSING_METHODS + ['get', 'delete', 'head', 'options'] @classmethod def setup_class(cls): # Turn off exponential backoff for faster execution. _http_client.DEFAULT_RETRY_CONFIG.backoff_factor = 0 # Start a test server instance scoped to the class. server = http.ContentServer() server.start() cls.httpserver = server @classmethod def teardown_class(cls): cls.httpserver.stop() def setup_method(self): # Clean up any state in the server before starting a new test case. self.httpserver.requests = [] @pytest.mark.parametrize('method', ALL_METHODS) def test_retry_on_503(self, method): self.httpserver.serve_content({}, 503) client = _http_client.JsonHttpClient( credential=testutils.MockGoogleCredential(), base_url=self.httpserver.url) body = None if method in self.ENTITY_ENCLOSING_METHODS: body = {'key': 'value'} with pytest.raises(requests.exceptions.HTTPError) as excinfo: client.request(method, '/', json=body) assert excinfo.value.response.status_code == 503 assert len(self.httpserver.requests) == 5 @pytest.mark.parametrize('method', ALL_METHODS) def test_retry_on_500(self, method): self.httpserver.serve_content({}, 500) client = _http_client.JsonHttpClient( credential=testutils.MockGoogleCredential(), base_url=self.httpserver.url) body = None if method in self.ENTITY_ENCLOSING_METHODS: body = {'key': 'value'} with pytest.raises(requests.exceptions.HTTPError) as excinfo: client.request(method, '/', json=body) assert excinfo.value.response.status_code == 500 assert len(self.httpserver.requests) == 5 def test_no_retry_on_404(self): self.httpserver.serve_content({}, 404) client = _http_client.JsonHttpClient( credential=testutils.MockGoogleCredential(), base_url=self.httpserver.url) with pytest.raises(requests.exceptions.HTTPError) as excinfo: client.request('get', '/') assert excinfo.value.response.status_code == 404 assert len(self.httpserver.requests) == 1
apache-2.0
-4,307,229,820,997,090,300
35.36478
86
0.663611
false
3.801446
true
false
false
phobson/statsmodels
statsmodels/regression/recursive_ls.py
1
25492
""" Recursive least squares model Author: Chad Fulton License: Simplified-BSD """ from __future__ import division, absolute_import, print_function from warnings import warn from statsmodels.compat.collections import OrderedDict import numpy as np import pandas as pd from statsmodels.regression.linear_model import OLS from statsmodels.tools.data import _is_using_pandas from statsmodels.tsa.statespace.mlemodel import ( MLEModel, MLEResults, MLEResultsWrapper) from statsmodels.tools.tools import Bunch from statsmodels.tools.decorators import cache_readonly, resettable_cache import statsmodels.base.wrapper as wrap # Columns are alpha = 0.1, 0.05, 0.025, 0.01, 0.005 _cusum_squares_scalars = np.array([ [1.0729830, 1.2238734, 1.3581015, 1.5174271, 1.6276236], [-0.6698868, -0.6700069, -0.6701218, -0.6702672, -0.6703724], [-0.5816458, -0.7351697, -0.8858694, -1.0847745, -1.2365861] ]) class RecursiveLS(MLEModel): r""" Recursive least squares Parameters ---------- endog : array_like The observed time-series process :math:`y` exog : array_like Array of exogenous regressors, shaped nobs x k. Notes ----- Recursive least squares (RLS) corresponds to expanding window ordinary least squares (OLS). This model applies the Kalman filter to compute recursive estimates of the coefficients and recursive residuals. References ---------- .. [1] Durbin, James, and Siem Jan Koopman. 2012. Time Series Analysis by State Space Methods: Second Edition. Oxford University Press. """ def __init__(self, endog, exog, **kwargs): # Standardize data if not _is_using_pandas(endog, None): endog = np.asanyarray(endog) exog_is_using_pandas = _is_using_pandas(exog, None) if not exog_is_using_pandas: exog = np.asarray(exog) # Make sure we have 2-dimensional array if exog.ndim == 1: if not exog_is_using_pandas: exog = exog[:, None] else: exog = pd.DataFrame(exog) self.k_exog = exog.shape[1] # Handle coefficient initialization # By default, do not calculate likelihood while it is controlled by # diffuse initial conditions. kwargs.setdefault('loglikelihood_burn', self.k_exog) kwargs.setdefault('initialization', 'approximate_diffuse') kwargs.setdefault('initial_variance', 1e9) # Initialize the state space representation super(RecursiveLS, self).__init__( endog, k_states=self.k_exog, exog=exog, **kwargs ) # Setup the state space representation self['design'] = self.exog[:, :, None].T self['transition'] = np.eye(self.k_states) # Notice that the filter output does not depend on the measurement # variance, so we set it here to 1 self['obs_cov', 0, 0] = 1. @classmethod def from_formula(cls, formula, data, subset=None): """ Not implemented for state space models """ return super(MLEModel, cls).from_formula(formula, data, subset) def fit(self): """ Fits the model by application of the Kalman filter Returns ------- RecursiveLSResults """ # Get the smoother results with an arbitrary measurement variance smoother_results = self.smooth(return_ssm=True) # Compute the MLE of sigma2 (see Harvey, 1989 equation 4.2.5) resid = smoother_results.standardized_forecasts_error[0] sigma2 = (np.inner(resid, resid) / (self.nobs - self.loglikelihood_burn)) # Now construct a results class, where the params are the final # estimates of the regression coefficients self['obs_cov', 0, 0] = sigma2 return self.smooth() def filter(self, return_ssm=False, **kwargs): # Get the state space output result = super(RecursiveLS, self).filter([], transformed=True, cov_type='none', return_ssm=True, **kwargs) # Wrap in a results object if not return_ssm: params = result.filtered_state[:, -1] cov_kwds = { 'custom_cov_type': 'nonrobust', 'custom_cov_params': result.filtered_state_cov[:, :, -1], 'custom_description': ('Parameters and covariance matrix' ' estimates are RLS estimates' ' conditional on the entire sample.') } result = RecursiveLSResultsWrapper( RecursiveLSResults(self, params, result, cov_type='custom', cov_kwds=cov_kwds) ) return result def smooth(self, return_ssm=False, **kwargs): # Get the state space output result = super(RecursiveLS, self).smooth([], transformed=True, cov_type='none', return_ssm=True, **kwargs) # Wrap in a results object if not return_ssm: params = result.filtered_state[:, -1] cov_kwds = { 'custom_cov_type': 'nonrobust', 'custom_cov_params': result.filtered_state_cov[:, :, -1], 'custom_description': ('Parameters and covariance matrix' ' estimates are RLS estimates' ' conditional on the entire sample.') } result = RecursiveLSResultsWrapper( RecursiveLSResults(self, params, result, cov_type='custom', cov_kwds=cov_kwds) ) return result @property def param_names(self): return self.exog_names @property def start_params(self): # Only parameter is the measurment disturbance standard deviation return np.zeros(0) def update(self, params, **kwargs): """ Update the parameters of the model Updates the representation matrices to fill in the new parameter values. Parameters ---------- params : array_like Array of new parameters. transformed : boolean, optional Whether or not `params` is already transformed. If set to False, `transform_params` is called. Default is True.. Returns ------- params : array_like Array of parameters. """ pass class RecursiveLSResults(MLEResults): """ Class to hold results from fitting a recursive least squares model. Parameters ---------- model : RecursiveLS instance The fitted model instance Attributes ---------- specification : dictionary Dictionary including all attributes from the recursive least squares model instance. See Also -------- statsmodels.tsa.statespace.kalman_filter.FilterResults statsmodels.tsa.statespace.mlemodel.MLEResults """ def __init__(self, model, params, filter_results, cov_type='opg', **kwargs): super(RecursiveLSResults, self).__init__( model, params, filter_results, cov_type, **kwargs) self.df_resid = np.inf # attribute required for wald tests # Save _init_kwds self._init_kwds = self.model._get_init_kwds() # Save the model specification self.specification = Bunch(**{ 'k_exog': self.model.k_exog}) @property def recursive_coefficients(self): """ Estimates of regression coefficients, recursively estimated Returns ------- out: Bunch Has the following attributes: - `filtered`: a time series array with the filtered estimate of the component - `filtered_cov`: a time series array with the filtered estimate of the variance/covariance of the component - `smoothed`: a time series array with the smoothed estimate of the component - `smoothed_cov`: a time series array with the smoothed estimate of the variance/covariance of the component - `offset`: an integer giving the offset in the state vector where this component begins """ out = None spec = self.specification start = offset = 0 end = offset + spec.k_exog out = Bunch( filtered=self.filtered_state[start:end], filtered_cov=self.filtered_state_cov[start:end, start:end], smoothed=None, smoothed_cov=None, offset=offset ) if self.smoothed_state is not None: out.smoothed = self.smoothed_state[start:end] if self.smoothed_state_cov is not None: out.smoothed_cov = ( self.smoothed_state_cov[start:end, start:end]) return out @cache_readonly def resid_recursive(self): """ Recursive residuals Returns ------- resid_recursive : array_like An array of length `nobs` holding the recursive residuals. Notes ----- The first `k_exog` residuals are typically unreliable due to initialization. """ # See Harvey (1989) section 5.4; he defines the standardized # innovations in 5.4.1, but they have non-unit variance, whereas # the standardized forecast errors assume unit variance. To convert # to Harvey's definition, we need to multiply by the standard # deviation. return (self.filter_results.standardized_forecasts_error.squeeze() * self.filter_results.obs_cov[0, 0]**0.5) @cache_readonly def cusum(self): r""" Cumulative sum of standardized recursive residuals statistics Returns ------- cusum : array_like An array of length `nobs - k_exog` holding the CUSUM statistics. Notes ----- The CUSUM statistic takes the form: .. math:: W_t = \frac{1}{\hat \sigma} \sum_{j=k+1}^t w_j where :math:`w_j` is the recursive residual at time :math:`j` and :math:`\hat \sigma` is the estimate of the standard deviation from the full sample. Excludes the first `k_exog` datapoints. Due to differences in the way :math:`\hat \sigma` is calculated, the output of this function differs slightly from the output in the R package strucchange and the Stata contributed .ado file cusum6. The calculation in this package is consistent with the description of Brown et al. (1975) References ---------- .. [1] Brown, R. L., J. Durbin, and J. M. Evans. 1975. "Techniques for Testing the Constancy of Regression Relationships over Time." Journal of the Royal Statistical Society. Series B (Methodological) 37 (2): 149-92. """ llb = self.loglikelihood_burn return (np.cumsum(self.resid_recursive[self.loglikelihood_burn:]) / np.std(self.resid_recursive[llb:], ddof=1)) @cache_readonly def cusum_squares(self): r""" Cumulative sum of squares of standardized recursive residuals statistics Returns ------- cusum_squares : array_like An array of length `nobs - k_exog` holding the CUSUM of squares statistics. Notes ----- The CUSUM of squares statistic takes the form: .. math:: s_t = \left ( \sum_{j=k+1}^t w_j^2 \right ) \Bigg / \left ( \sum_{j=k+1}^T w_j^2 \right ) where :math:`w_j` is the recursive residual at time :math:`j`. Excludes the first `k_exog` datapoints. References ---------- .. [1] Brown, R. L., J. Durbin, and J. M. Evans. 1975. "Techniques for Testing the Constancy of Regression Relationships over Time." Journal of the Royal Statistical Society. Series B (Methodological) 37 (2): 149-92. """ numer = np.cumsum(self.resid_recursive[self.loglikelihood_burn:]**2) denom = numer[-1] return numer / denom def plot_recursive_coefficient(self, variables=0, alpha=0.05, legend_loc='upper left', fig=None, figsize=None): r""" Plot the recursively estimated coefficients on a given variable Parameters ---------- variables : int or str or iterable of int or string, optional Integer index or string name of the variable whose coefficient will be plotted. Can also be an iterable of integers or strings. Default is the first variable. alpha : float, optional The confidence intervals for the coefficient are (1 - alpha) % legend_loc : string, optional The location of the legend in the plot. Default is upper left. fig : Matplotlib Figure instance, optional If given, subplots are created in this figure instead of in a new figure. Note that the grid will be created in the provided figure using `fig.add_subplot()`. figsize : tuple, optional If a figure is created, this argument allows specifying a size. The tuple is (width, height). Notes ----- All plots contain (1 - `alpha`) % confidence intervals. """ # Get variables if isinstance(variables, (int, str)): variables = [variables] k_variables = len(variables) # If a string was given for `variable`, try to get it from exog names exog_names = self.model.exog_names for i in range(k_variables): variable = variables[i] if isinstance(variable, str): variables[i] = exog_names.index(variable) # Create the plot from scipy.stats import norm from statsmodels.graphics.utils import _import_mpl, create_mpl_fig plt = _import_mpl() fig = create_mpl_fig(fig, figsize) for i in range(k_variables): variable = variables[i] ax = fig.add_subplot(k_variables, 1, i + 1) # Get dates, if applicable if hasattr(self.data, 'dates') and self.data.dates is not None: dates = self.data.dates._mpl_repr() else: dates = np.arange(self.nobs) llb = self.loglikelihood_burn # Plot the coefficient coef = self.recursive_coefficients ax.plot(dates[llb:], coef.filtered[variable, llb:], label='Recursive estimates: %s' % exog_names[variable]) # Legend handles, labels = ax.get_legend_handles_labels() # Get the critical value for confidence intervals if alpha is not None: critical_value = norm.ppf(1 - alpha / 2.) # Plot confidence intervals std_errors = np.sqrt(coef.filtered_cov[variable, variable, :]) ci_lower = ( coef.filtered[variable] - critical_value * std_errors) ci_upper = ( coef.filtered[variable] + critical_value * std_errors) ci_poly = ax.fill_between( dates[llb:], ci_lower[llb:], ci_upper[llb:], alpha=0.2 ) ci_label = ('$%.3g \\%%$ confidence interval' % ((1 - alpha)*100)) # Only add CI to legend for the first plot if i == 0: # Proxy artist for fill_between legend entry # See http://matplotlib.org/1.3.1/users/legend_guide.html p = plt.Rectangle((0, 0), 1, 1, fc=ci_poly.get_facecolor()[0]) handles.append(p) labels.append(ci_label) ax.legend(handles, labels, loc=legend_loc) # Remove xticks for all but the last plot if i < k_variables - 1: ax.xaxis.set_ticklabels([]) fig.tight_layout() return fig def _cusum_significance_bounds(self, alpha, ddof=0, points=None): """ Parameters ---------- alpha : float, optional The significance bound is alpha %. ddof : int, optional The number of periods additional to `k_exog` to exclude in constructing the bounds. Default is zero. This is usually used only for testing purposes. points : iterable, optional The points at which to evaluate the significance bounds. Default is two points, beginning and end of the sample. Notes ----- Comparing against the cusum6 package for Stata, this does not produce exactly the same confidence bands (which are produced in cusum6 by lw, uw) because they burn the first k_exog + 1 periods instead of the first k_exog. If this change is performed (so that `tmp = (self.nobs - llb - 1)**0.5`), then the output here matches cusum6. The cusum6 behavior does not seem to be consistent with Brown et al. (1975); it is likely they did that because they needed three initial observations to get the initial OLS estimates, whereas we do not need to do that. """ # Get the constant associated with the significance level if alpha == 0.01: scalar = 1.143 elif alpha == 0.05: scalar = 0.948 elif alpha == 0.10: scalar = 0.950 else: raise ValueError('Invalid significance level.') # Get the points for the significance bound lines llb = self.loglikelihood_burn tmp = (self.nobs - llb - ddof)**0.5 upper_line = lambda x: scalar * tmp + 2 * scalar * (x - llb) / tmp if points is None: points = np.array([llb, self.nobs]) return -upper_line(points), upper_line(points) def plot_cusum(self, alpha=0.05, legend_loc='upper left', fig=None, figsize=None): r""" Plot the CUSUM statistic and significance bounds. Parameters ---------- alpha : float, optional The plotted significance bounds are alpha %. legend_loc : string, optional The location of the legend in the plot. Default is upper left. fig : Matplotlib Figure instance, optional If given, subplots are created in this figure instead of in a new figure. Note that the grid will be created in the provided figure using `fig.add_subplot()`. figsize : tuple, optional If a figure is created, this argument allows specifying a size. The tuple is (width, height). Notes ----- Evidence of parameter instability may be found if the CUSUM statistic moves out of the significance bounds. References ---------- .. [1] Brown, R. L., J. Durbin, and J. M. Evans. 1975. "Techniques for Testing the Constancy of Regression Relationships over Time." Journal of the Royal Statistical Society. Series B (Methodological) 37 (2): 149-92. """ # Create the plot from statsmodels.graphics.utils import _import_mpl, create_mpl_fig plt = _import_mpl() fig = create_mpl_fig(fig, figsize) ax = fig.add_subplot(1, 1, 1) # Get dates, if applicable if hasattr(self.data, 'dates') and self.data.dates is not None: dates = self.data.dates._mpl_repr() else: dates = np.arange(self.nobs) llb = self.loglikelihood_burn # Plot cusum series and reference line ax.plot(dates[llb:], self.cusum, label='CUSUM') ax.hlines(0, dates[llb], dates[-1], color='k', alpha=0.3) # Plot significance bounds lower_line, upper_line = self._cusum_significance_bounds(alpha) ax.plot([dates[llb], dates[-1]], upper_line, 'k--', label='%d%% significance' % (alpha * 100)) ax.plot([dates[llb], dates[-1]], lower_line, 'k--') ax.legend(loc=legend_loc) return fig def _cusum_squares_significance_bounds(self, alpha, points=None): """ Notes ----- Comparing against the cusum6 package for Stata, this does not produce exactly the same confidence bands (which are produced in cusum6 by lww, uww) because they use a different method for computing the critical value; in particular, they use tabled values from Table C, pp. 364-365 of "The Econometric Analysis of Time Series" Harvey, (1990), and use the value given to 99 observations for any larger number of observations. In contrast, we use the approximating critical values suggested in Edgerton and Wells (1994) which allows computing relatively good approximations for any number of observations. """ # Get the approximate critical value associated with the significance # level llb = self.loglikelihood_burn n = 0.5 * (self.nobs - llb) - 1 try: ix = [0.1, 0.05, 0.025, 0.01, 0.005].index(alpha / 2) except ValueError: raise ValueError('Invalid significance level.') scalars = _cusum_squares_scalars[:, ix] crit = scalars[0] / n**0.5 + scalars[1] / n + scalars[2] / n**1.5 # Get the points for the significance bound lines if points is None: points = np.array([llb, self.nobs]) line = (points - llb) / (self.nobs - llb) return line - crit, line + crit def plot_cusum_squares(self, alpha=0.05, legend_loc='upper left', fig=None, figsize=None): r""" Plot the CUSUM of squares statistic and significance bounds. Parameters ---------- alpha : float, optional The plotted significance bounds are alpha %. legend_loc : string, optional The location of the legend in the plot. Default is upper left. fig : Matplotlib Figure instance, optional If given, subplots are created in this figure instead of in a new figure. Note that the grid will be created in the provided figure using `fig.add_subplot()`. figsize : tuple, optional If a figure is created, this argument allows specifying a size. The tuple is (width, height). Notes ----- Evidence of parameter instability may be found if the CUSUM of squares statistic moves out of the significance bounds. Critical values used in creating the significance bounds are computed using the approximate formula of [2]_. References ---------- .. [1] Brown, R. L., J. Durbin, and J. M. Evans. 1975. "Techniques for Testing the Constancy of Regression Relationships over Time." Journal of the Royal Statistical Society. Series B (Methodological) 37 (2): 149-92. .. [2] Edgerton, David, and Curt Wells. 1994. "Critical Values for the Cusumsq Statistic in Medium and Large Sized Samples." Oxford Bulletin of Economics and Statistics 56 (3): 355-65. """ # Create the plot from statsmodels.graphics.utils import _import_mpl, create_mpl_fig plt = _import_mpl() fig = create_mpl_fig(fig, figsize) ax = fig.add_subplot(1, 1, 1) # Get dates, if applicable if hasattr(self.data, 'dates') and self.data.dates is not None: dates = self.data.dates._mpl_repr() else: dates = np.arange(self.nobs) llb = self.loglikelihood_burn # Plot cusum series and reference line ax.plot(dates[llb:], self.cusum_squares, label='CUSUM of squares') ref_line = (np.arange(llb, self.nobs) - llb) / (self.nobs - llb) ax.plot(dates[llb:], ref_line, 'k', alpha=0.3) # Plot significance bounds lower_line, upper_line = self._cusum_squares_significance_bounds(alpha) ax.plot([dates[llb], dates[-1]], upper_line, 'k--', label='%d%% significance' % (alpha * 100)) ax.plot([dates[llb], dates[-1]], lower_line, 'k--') ax.legend(loc=legend_loc) return fig class RecursiveLSResultsWrapper(MLEResultsWrapper): _attrs = {} _wrap_attrs = wrap.union_dicts(MLEResultsWrapper._wrap_attrs, _attrs) _methods = {} _wrap_methods = wrap.union_dicts(MLEResultsWrapper._wrap_methods, _methods) wrap.populate_wrapper(RecursiveLSResultsWrapper, RecursiveLSResults)
bsd-3-clause
-7,399,047,095,831,074,000
35.573888
79
0.574494
false
4.207989
false
false
false
nuxeh/morph
morphlib/buildcommand.py
1
22770
# Copyright (C) 2011-2015 Codethink Limited # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; version 2 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program. If not, see <http://www.gnu.org/licenses/>. import itertools import os import shutil import logging import tempfile import datetime import morphlib import distbuild class MultipleRootArtifactsError(morphlib.Error): def __init__(self, artifacts): self.msg = ('System build has multiple root artifacts: %r' % [a.name for a in artifacts]) self.artifacts = artifacts class BuildCommand(object): '''High level logic for building. This controls how the whole build process goes. This is a separate class to enable easy experimentation of different approaches to the various parts of the process. ''' def __init__(self, app, build_env = None): self.supports_local_build = True self.app = app self.lac, self.rac = self.new_artifact_caches() self.lrc, self.rrc = self.new_repo_caches() def build(self, repo_name, ref, filename, original_ref=None): '''Build a given system morphology.''' self.app.status( msg='Building %(repo_name)s %(ref)s %(filename)s', repo_name=repo_name, ref=ref, filename=filename) self.app.status(msg='Deciding on task order') srcpool = self.create_source_pool( repo_name, ref, filename, original_ref) self.validate_sources(srcpool) root_artifact = self.resolve_artifacts(srcpool) self.build_in_order(root_artifact) self.app.status( msg='Build of %(repo_name)s %(ref)s %(filename)s ended ' 'successfully', repo_name=repo_name, ref=ref, filename=filename) def new_artifact_caches(self): '''Create interfaces for the build artifact caches. This includes creating the directories on disk if they are missing. ''' return morphlib.util.new_artifact_caches(self.app.settings) def new_repo_caches(self): return morphlib.util.new_repo_caches(self.app) def new_build_env(self, arch): '''Create a new BuildEnvironment instance.''' return morphlib.buildenvironment.BuildEnvironment(self.app.settings, arch) def create_source_pool(self, repo_name, ref, filename, original_ref=None): '''Find the source objects required for building a the given artifact The SourcePool will contain every stratum and chunk dependency of the given artifact (which must be a system) but will not take into account any Git submodules which are required in the build. ''' self.app.status(msg='Creating source pool', chatty=True) srcpool = morphlib.sourceresolver.create_source_pool( self.lrc, self.rrc, repo_name, ref, filename, cachedir=self.app.settings['cachedir'], original_ref=original_ref, update_repos=not self.app.settings['no-git-update'], status_cb=self.app.status) return srcpool def validate_sources(self, srcpool): self.app.status( msg='Validating cross-morphology references', chatty=True) self._validate_cross_morphology_references(srcpool) self.app.status(msg='Validating for there being non-bootstrap chunks', chatty=True) self._validate_has_non_bootstrap_chunks(srcpool) def _validate_root_artifact(self, root_artifact): self._validate_root_kind(root_artifact) self._validate_architecture(root_artifact) @staticmethod def _validate_root_kind(root_artifact): root_kind = root_artifact.source.morphology['kind'] if root_kind != 'system': raise morphlib.Error( 'Building a %s directly is not supported' % root_kind) def _validate_architecture(self, root_artifact): '''Perform the validation between root and target architectures.''' root_arch = root_artifact.source.morphology['arch'] host_arch = morphlib.util.get_host_architecture() if root_arch == host_arch: return # Since the armv8 instruction set is nearly entirely armv7 compatible, # and since the incompatibilities are appropriately trapped in the # kernel, we can safely run any armv7 toolchain natively on armv8. if host_arch == 'armv8l' and root_arch in ('armv7l', 'armv7lhf'): return if host_arch == 'armv8b' and root_arch in ('armv7b', 'armv7bhf'): return raise morphlib.Error( 'Are you trying to cross-build? Host architecture is %s but ' 'target is %s' % (host_arch, root_arch)) @staticmethod def _validate_has_non_bootstrap_chunks(srcpool): stratum_sources = [src for src in srcpool if src.morphology['kind'] == 'stratum'] # any will return true for an empty iterable, which will give # a false positive when there are no strata. # This is an error by itself, but the source of this error can # be better diagnosed later, so we abort validating here. if not stratum_sources: return if not any(spec.get('build-mode', 'staging') != 'bootstrap' for src in stratum_sources for spec in src.morphology['chunks']): raise morphlib.Error('No non-bootstrap chunks found.') def _compute_cache_keys(self, root_artifact): arch = root_artifact.source.morphology['arch'] self.app.status(msg='Creating build environment for %(arch)s', arch=arch, chatty=True) build_env = self.new_build_env(arch) self.app.status(msg='Computing cache keys', chatty=True) ckc = morphlib.cachekeycomputer.CacheKeyComputer(build_env) for source in set(a.source for a in root_artifact.walk()): source.cache_key = ckc.compute_key(source) source.cache_id = ckc.get_cache_id(source) root_artifact.build_env = build_env def resolve_artifacts(self, srcpool): '''Resolve the artifacts that will be built for a set of sources''' self.app.status(msg='Creating artifact resolver', chatty=True) ar = morphlib.artifactresolver.ArtifactResolver() self.app.status(msg='Resolving artifacts', chatty=True) root_artifacts = ar.resolve_root_artifacts(srcpool) if len(root_artifacts) > 1: # Validate root artifacts to give a more useful error message for root_artifact in root_artifacts: self._validate_root_artifact(root_artifact) raise MultipleRootArtifactsError(root_artifacts) root_artifact = root_artifacts[0] self.app.status(msg='Validating root artifact', chatty=True) self._validate_root_artifact(root_artifact) self._compute_cache_keys(root_artifact) return root_artifact def _validate_cross_morphology_references(self, srcpool): '''Perform validation across all morphologies involved in the build''' stratum_names = {} for src in srcpool: kind = src.morphology['kind'] # Verify that chunks pointed to by strata really are chunks, etc. method_name = '_validate_cross_refs_for_%s' % kind if hasattr(self, method_name): logging.debug('Calling %s' % method_name) getattr(self, method_name)(src, srcpool) else: logging.warning('No %s' % method_name) # Verify stratum build-depends agree with the system's contents. # It is permissible for a stratum to build-depend on a stratum that # isn't specified in the target system morphology. # Multiple references to the same stratum are permitted. This is # handled by the SourcePool deduplicating added Sources. # It is forbidden to have two different strata with the same name. # Hence if a Stratum is defined in the System, and in a Stratum as # a build-dependency, then they must both have the same Repository # and Ref specified. if src.morphology['kind'] == 'stratum': name = src.name if name in stratum_names: raise morphlib.Error( "Multiple strata produce a '%s' artifact: %s and %s" % (name, stratum_names[name].filename, src.filename)) stratum_names[name] = src def _validate_cross_refs_for_system(self, src, srcpool): self._validate_cross_refs_for_xxx( src, srcpool, src.morphology['strata'], 'stratum') def _validate_cross_refs_for_stratum(self, src, srcpool): self._validate_cross_refs_for_xxx( src, srcpool, src.morphology['chunks'], 'chunk') def _validate_cross_refs_for_xxx(self, src, srcpool, specs, wanted): for spec in specs: repo_name = spec.get('repo') or src.repo_name ref = spec.get('ref') or src.original_ref filename = morphlib.util.sanitise_morphology_path( spec.get('morph', spec.get('name'))) logging.debug( 'Validating cross ref to %s:%s:%s' % (repo_name, ref, filename)) for other in srcpool.lookup(repo_name, ref, filename): if other.morphology['kind'] != wanted: raise morphlib.Error( '%s %s references %s:%s:%s which is a %s, ' 'instead of a %s' % (src.morphology['kind'], src.name, repo_name, ref, filename, other.morphology['kind'], wanted)) @staticmethod def get_ordered_sources(artifacts): ordered_sources = [] known_sources = set() for artifact in artifacts: if artifact.source not in known_sources: known_sources.add(artifact.source) yield artifact.source def build_in_order(self, root_artifact): '''Build everything specified in a build order.''' self.app.status(msg='Building a set of sources') build_env = root_artifact.build_env ordered_sources = list(self.get_ordered_sources(root_artifact.walk())) old_prefix = self.app.status_prefix for i, s in enumerate(ordered_sources): self.app.status_prefix = ( old_prefix + '[Build %(index)d/%(total)d] [%(name)s] ' % { 'index': (i+1), 'total': len(ordered_sources), 'name': s.name, }) self.cache_or_build_source(s, build_env) self.app.status_prefix = old_prefix def cache_or_build_source(self, source, build_env): '''Make artifacts of the built source available in the local cache. This can be done by retrieving from a remote artifact cache, or if that doesn't work for some reason, by building the source locally. ''' artifacts = source.artifacts.values() if self.rac is not None: try: self.cache_artifacts_locally(artifacts) except morphlib.remoteartifactcache.GetError: # Error is logged by the RemoteArtifactCache object. pass if any(not self.lac.has(artifact) for artifact in artifacts): self.build_source(source, build_env) for a in artifacts: self.app.status(msg='%(kind)s %(name)s is cached at %(cachepath)s', kind=source.morphology['kind'], name=a.name, cachepath=self.lac.artifact_filename(a), chatty=(source.morphology['kind'] != "system")) def build_source(self, source, build_env): '''Build all artifacts for one source. All the dependencies are assumed to be built and available in either the local or remote cache already. ''' starttime = datetime.datetime.now() self.app.status(msg='Building %(kind)s %(name)s', name=source.name, kind=source.morphology['kind']) self.fetch_sources(source) # TODO: Make an artifact.walk() that takes multiple root artifacts. # as this does a walk for every artifact. This was the status # quo before build logic was made to work per-source, but we can # now do better. deps = self.get_recursive_deps(source.artifacts.values()) self.cache_artifacts_locally(deps) use_chroot = False setup_mounts = False if source.morphology['kind'] == 'chunk': build_mode = source.build_mode extra_env = {'PREFIX': source.prefix} dep_prefix_set = set(a.source.prefix for a in deps if a.source.morphology['kind'] == 'chunk') extra_path = [os.path.join(d, 'bin') for d in dep_prefix_set] if build_mode not in ['bootstrap', 'staging', 'test']: logging.warning('Unknown build mode %s for chunk %s. ' 'Defaulting to staging mode.' % (build_mode, artifact.name)) build_mode = 'staging' if build_mode == 'staging': use_chroot = True setup_mounts = True staging_area = self.create_staging_area(build_env, use_chroot, extra_env=extra_env, extra_path=extra_path) try: self.install_dependencies(staging_area, deps, source) except BaseException: staging_area.abort() raise else: staging_area = self.create_staging_area(build_env, False) self.build_and_cache(staging_area, source, setup_mounts) self.remove_staging_area(staging_area) td = datetime.datetime.now() - starttime hours, remainder = divmod(int(td.total_seconds()), 60*60) minutes, seconds = divmod(remainder, 60) td_string = "%02d:%02d:%02d" % (hours, minutes, seconds) self.app.status(msg="Elapsed time %(duration)s", duration=td_string) def get_recursive_deps(self, artifacts): deps = set() ordered_deps = [] for artifact in artifacts: for dep in artifact.walk(): if dep not in deps and dep not in artifacts: deps.add(dep) ordered_deps.append(dep) return ordered_deps def fetch_sources(self, source): '''Update the local git repository cache with the sources.''' repo_name = source.repo_name source.repo = self.lrc.get_updated_repo(repo_name, ref=source.sha1) self.lrc.ensure_submodules(source.repo, source.sha1) def cache_artifacts_locally(self, artifacts): '''Get artifacts missing from local cache from remote cache.''' def fetch_files(to_fetch): '''Fetch a set of files atomically. If an error occurs during the transfer of any files, all downloaded data is deleted, to ensure integrity of the local cache. ''' try: for remote, local in to_fetch: shutil.copyfileobj(remote, local) except BaseException: for remote, local in to_fetch: local.abort() raise else: for remote, local in to_fetch: remote.close() local.close() for artifact in artifacts: # This block should fetch all artifact files in one go, using the # 1.0/artifacts method of morph-cache-server. The code to do that # needs bringing in from the distbuild.worker_build_connection # module into morphlib.remoteartififactcache first. to_fetch = [] if not self.lac.has(artifact): to_fetch.append((self.rac.get(artifact), self.lac.put(artifact))) if artifact.source.morphology.needs_artifact_metadata_cached: if not self.lac.has_artifact_metadata(artifact, 'meta'): to_fetch.append(( self.rac.get_artifact_metadata(artifact, 'meta'), self.lac.put_artifact_metadata(artifact, 'meta'))) if len(to_fetch) > 0: self.app.status( msg='Fetching to local cache: artifact %(name)s', name=artifact.name) fetch_files(to_fetch) def create_staging_area(self, build_env, use_chroot=True, extra_env={}, extra_path=[]): '''Create the staging area for building a single artifact.''' self.app.status(msg='Creating staging area') staging_dir = tempfile.mkdtemp( dir=os.path.join(self.app.settings['tempdir'], 'staging')) staging_area = morphlib.stagingarea.StagingArea( self.app, staging_dir, build_env, use_chroot, extra_env, extra_path) return staging_area def remove_staging_area(self, staging_area): '''Remove the staging area.''' self.app.status(msg='Removing staging area') staging_area.remove() # Nasty hack to avoid installing chunks built in 'bootstrap' mode in a # different stratum when constructing staging areas. # TODO: make nicer by having chunk morphs keep a reference to the # stratum they were in def in_same_stratum(self, s1, s2): '''Checks whether two chunk sources are from the same stratum. In the absence of morphologies tracking where they came from, this checks whether both sources are depended on by artifacts that belong to sources which have the same morphology. ''' def dependent_stratum_morphs(source): dependents = set(itertools.chain.from_iterable( a.dependents for a in source.artifacts.itervalues())) dependent_strata = set(s for s in dependents if s.morphology['kind'] == 'stratum') return set(s.morphology for s in dependent_strata) return dependent_stratum_morphs(s1) == dependent_stratum_morphs(s2) def install_dependencies(self, staging_area, artifacts, target_source): '''Install chunk artifacts into staging area. We only ever care about chunk artifacts as build dependencies, so this is not a generic artifact installer into staging area. Any non-chunk artifacts are silently ignored. All artifacts MUST be in the local artifact cache already. ''' for artifact in artifacts: if artifact.source.morphology['kind'] != 'chunk': continue if artifact.source.build_mode == 'bootstrap': if not self.in_same_stratum(artifact.source, target_source): continue self.app.status( msg='Installing chunk %(chunk_name)s from cache %(cache)s', chunk_name=artifact.name, cache=artifact.source.cache_key[:7], chatty=True) handle = self.lac.get(artifact) staging_area.install_artifact(handle) if target_source.build_mode == 'staging': morphlib.builder.ldconfig(self.app.runcmd, staging_area.dirname) def build_and_cache(self, staging_area, source, setup_mounts): '''Build a source and put its artifacts into the local cache.''' self.app.status(msg='Starting actual build: %(name)s ' '%(sha1)s', name=source.name, sha1=source.sha1[:7]) builder = morphlib.builder.Builder( self.app, staging_area, self.lac, self.rac, self.lrc, self.app.settings['max-jobs'], setup_mounts) return builder.build_and_cache(source) class InitiatorBuildCommand(BuildCommand): RECONNECT_INTERVAL = 30 # seconds MAX_RETRIES = 1 def __init__(self, app, addr, port): self.app = app self.addr = addr self.port = port self.app.settings['push-build-branches'] = True super(InitiatorBuildCommand, self).__init__(app) def build(self, repo_name, ref, filename, original_ref=None): '''Initiate a distributed build on a controller''' distbuild.add_crash_conditions(self.app.settings['crash-condition']) if self.addr == '': raise morphlib.Error( 'Need address of controller to run a distbuild') self.app.status(msg='Starting distributed build') loop = distbuild.MainLoop() args = [repo_name, ref, filename, original_ref or ref] cm = distbuild.InitiatorConnectionMachine(self.app, self.addr, self.port, distbuild.Initiator, [self.app] + args, self.RECONNECT_INTERVAL, self.MAX_RETRIES) loop.add_state_machine(cm) try: loop.run() except KeyboardInterrupt: # This will run if the user presses Ctrl+C or sends SIGINT during # the build. It won't trigger on SIGTERM, SIGKILL or unhandled # Python exceptions. logging.info('Received KeyboardInterrupt, aborting.') for initiator in loop.state_machines_of_type(distbuild.Initiator): initiator.handle_cancel()
gpl-2.0
7,502,372,622,402,206,000
40.101083
79
0.584629
false
4.301096
false
false
false
ufal/neuralmonkey
neuralmonkey/model/feedable.py
1
2178
from abc import ABCMeta from typing import Any, Dict, List # pylint: disable=unused-import from typing import Optional # pylint: enable=unused-import import tensorflow as tf from neuralmonkey.dataset import Dataset # pylint: disable=invalid-name FeedDict = Dict[tf.Tensor, Any] # pylint: enable=invalid-name class Feedable(metaclass=ABCMeta): """Base class for feedable model parts. In TensorFlow, data is provided to the model using placeholders. Neural Monkey abstraction objects, such as encoders or decoders, can be members of this class in order to be able to receive data inputs from the framework. All feedable objects have a `feed_dict` method, which gets the current dataset and returns a `FeedDict` dictionary which assigns values to symbolic placeholders. Additionally, each Feedable object has two placeholders which are fed automatically in this super class - `batch_size` and `train_mode`. """ def __init__(self) -> None: self.train_mode = tf.placeholder(tf.bool, [], "train_mode") self.batch_size = tf.placeholder(tf.int32, [], "batch_size") self._dataset = None # type: Optional[Dict[str, tf.Tensor]] def feed_dict(self, dataset: Dataset, train: bool = True) -> FeedDict: """Return a feed dictionary for the given feedable object. Arguments: dataset: A dataset instance from which to get the data. train: Boolean indicating whether the model runs in training mode. Returns: A `FeedDict` dictionary object. """ fd = {} # type: FeedDict fd[self.train_mode] = train fd[self.batch_size] = len(dataset) return fd @property def input_types(self) -> Dict[str, tf.DType]: return {} @property def input_shapes(self) -> Dict[str, List[int]]: return {} @property def dataset(self) -> Dict[str, tf.Tensor]: if self._dataset is None: raise RuntimeError("Getting dataset before registering it.") return self._dataset def register_input(self, dataset: Dict[str, tf.Tensor]) -> None: self._dataset = dataset
bsd-3-clause
-153,059,823,488,650,460
32
79
0.664371
false
4.117202
false
false
false
shaunster0/object_recognition_service
recognition_server/recognition_server.py
1
8875
# -*- coding: utf-8 -*- """ Created on Sat Aug 12 18:08:52 2017 @author: Shaun Werkhoven @purpose: To create a image classification system for the Image Intelligence TakeHome Assignment Licensed under the Apache License, Version 2.0 (the "License"); Simple image classification with flask-based HTTP API, TensorFlow and the Inception model (trained on ImageNet 2012 Challenge data set). The server maintains a list of images, with URLs, on which image inference can be run, or has been run. It is a list of tasks to do, or that have been done. Functions to add, delete or run inference on images are given as HTTP addresses, using JSON arguments. This program creates a graph from a saved GraphDef protocol buffer, and runs inference on an input JPEG, GIF or PNG image. It outputs human readable strings of the top 5 predictions along with their probabilities. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys import json from flask import Flask, jsonify, abort, make_response, request from flask_httpauth import HTTPBasicAuth try: from recognition_server import tf_operations except: import tf_operations # set up HTTP app auth = HTTPBasicAuth() app = Flask(__name__) # initialise image list with some random images, not strictly necessary images = [ { 'id': 1, 'title': u'Nikes', 'url': 'http://imgdirect.s3-website-us-west-2.amazonaws.com/nike.jpg', 'results': '', 'resize': False, 'size': "" }, { 'id': 2, 'title': u'Altra', 'url': 'https://s3-us-west-2.amazonaws.com/imgdirect/altra.jpg', 'results': '', 'resize': False, 'size': "" } ] # set up some HTTP error handlers @auth.error_handler def unauthorized(): return make_response(jsonify({'error': 'unauthorized access'}), 403) @app.errorhandler(404) def not_found(error): return make_response(jsonify({'error': 'not found'}), 404) @app.errorhandler(400) def bad_request(error): return make_response(jsonify({'error': 'missing json data'}), 400) @app.errorhandler(410) def missing_URL(error): return make_response(jsonify({'error': 'missing URL field'}), 410) # first API function, can be used for testing @app.route('/') @app.route('/index') def index(): """returns Hello, World!""" return "Hello, World!" # test string # curl -i http://127.0.0.1:5000/img/api/v1.0/images @app.route('/img/api/v1.0/images', methods=['GET']) #@auth.login_required def get_imgs(): """ returns in JSON format all the images currently stored by the server. Includes all fields, such as ID, and URL """ return jsonify({'images': images}) # test String # curl -i http://127.0.0.1:5000/img/api/v1.0/images/2 @app.route('/img/api/v1.0/images/<int:img_id>', methods = ['GET']) #@auth.login_required def get_img(img_id): """ returns in JSON format a specific image currently stored by the server. Requires the image ID to be included in the HTTP address """ img = [img for img in images if img['id'] == img_id] if len(img) == 0: abort(404) return jsonify({'img': img[0]}) # test String # curl -i -H "Content-Type: application/json" -X POST -d '{"url":"http://imgdirect.s3-website-us-west-2.amazonaws.com/neither.jpg"}' http://127.0.0.1:5000/img/api/v1.0/images @app.route('/img/api/v1.0/images', methods = ['POST']) #@auth.login_required def add_imgs(): """ adds images to the server image list. The images must be provided as a list encoded with JSON and sent with the HTTP post. A URL is required. Inference is not automatically run on them. """ if not request.json: abort(400) missing_url = False json_str = request.json img_data = json_str['new_imgs'] new_images = [] for img in img_data: # URL is required, other fields not if img.get('url') == None: missing_url = True continue if img.get('title') == None: new_title = "" else: new_title = img.get('title') if img.get('results') == None: new_results = "" else: new_results = img.get('results') image = { # simple way to ensure a unique id 'id' : images[-1]['id'] + 1, 'title': new_title, # url is required, otherwise return error 'url': img['url'], 'results': new_results, 'resize': False, 'size': "" } # add new image records to image list images.append(image) new_images.append(image) if missing_url: return_val = jsonify(new_images), 410 else: return_val = jsonify(new_images), 201 return return_val # test string # curl -X PUT -i -H "Content-Type: application/json" -d '{ \"id\": \"1\"}' http://127.0.0.1:5000/img/api/v1.0/infer/1 @app.route('/img/api/v1.0/infer/<int:img_id>', methods = ['PUT']) #@auth.login_required def infer(img_id): """ runs TensorFlow inference (recognition) on an image which is already in the images list. The image ID must be included in the HTTP address and encoded with JSON. Results are returned in JSON """ img = [img for img in images if img['id'] == img_id] if len(img) == 0: abort(404) if not request.json: abort(400) url = img[0]['url'] # call TensorFlow img[0]['results'] = tf_operations.run_inference_on_image(url) return jsonify({'img': img[0]}), 200 # test string # curl -X PUT -i http://127.0.0.1:5000/img/api/v1.0/inferundone # calls TensorFlow, so can be slow if many images are undone @app.route('/img/api/v1.0/inferundone', methods = ['PUT']) #@auth.login_required def infer_undone(): """ runs TensorFlow inference (recognition) on all images which are in the images list but for which inference has not already been run. Results are returned in JSON """ undone_imgs = [img for img in images if img['results'] == ''] if len(undone_imgs) == 0: abort(404) for img in undone_imgs: # call TensorFlow img['results'] = tf_operations.run_inference_on_image(img['url']) return jsonify({'images': undone_imgs}), 200 # test String # curl -i -H "Content-Type: application/json" -X POST -d '{"url":"http://imgdirect.s3-website-us-west-2.amazonaws.com/neither.jpg"}' http://127.0.0.1:5000/img/api/v1.0/imagesinfer # another TensorFlow function, again can be slow if many images are added @app.route('/img/api/v1.0/imagesinfer', methods = ['POST']) #@auth.login_required def add_imgs_infer(): """ adds new images to the image list and runs TensorFlow inference (recognition) on them. New images must be provided with a URL, and given in JSON format. Results are returned in JSON format. """ if not request.json: abort(400) missing_url = False json_str = request.json img_data = json_str['new_imgs'] new_images = [] for img in img_data: # URL is required, other fields not if img.get('url') == None: missing_url = True continue if img.get('title') == None: new_title = "" else: new_title = img.get('title') # call TensorFlow new_results = tf_operations.run_inference_on_image(img['url']) image = { # simple way to ensure a unique id 'id' : images[-1]['id'] + 1, 'title': new_title, # url is required, otherwise return error 'url': img['url'], 'results': new_results, 'resize': False, 'size': "" } images.append(image) new_images.append(image) if missing_url: return_val = jsonify(new_images), 410 else: return_val = jsonify(new_images), 201 return return_val # test String # curl -i -H "Content-Type: application/json" -X DELETE http://127.0.0.1:5000/img/api/v1.0/images/3 @app.route('/img/api/v1.0/images/<int:img_id>', methods=['DELETE']) #@auth.login_required def delete_img(img_id): """ deletes an image from the server image list. The image ID must be given in the HTTP address """ img = [img for img in images if img['id'] == img_id] if len(img) == 0: abort(404) images.remove(img[0]) return jsonify({'result': True}) def main(_): tf_operations.parse_args() # checks if model data is downloaded. If not, does that tf_operations.download_and_extract_model_if_needed() app.run(host = '0.0.0.0') if __name__ == '__main__': tf_operations.tf.app.run(main = main, argv = [sys.argv[0]])
apache-2.0
-1,915,504,158,426,327,300
28.986486
179
0.61262
false
3.517638
true
false
false
Copper-Head/RoboSanta
clingo_stats.py
1
13466
#!/usr/bin/python import os import sys import clingo import json # # STATS # class Stats: def __init__(self): self.__width = 13 def __ratio(self,x,y): return float(x)/float(y) if float(y)!=0 else 0 def __percent(self,x,y): return 100*self.__ratio(x,y) def __print_key(self,key): return key + " "*(self.__width-len(key)) + ": " def __print_key_value(self,key,value): return self.__print_key(key) + value # requires Control initialized with --stats def summary(self,control,models=True): out = "" summary = control.statistics['summary'] moreStr = "+" if int(summary['exhausted'])==0 else "" numEnum = int(summary['models']['enumerated']) if models: out += self.__print_key("Models") out += "{}{}\n".format(numEnum,moreStr) step = int(summary['call']) out += self.__print_key_value("Calls","{}\n".format(step+1)) # return out if no stats if not 'accu' in control.statistics: return out times = control.statistics['accu']['times'] out += self.__print_key("Time") totalTime = float(times['total']) solveTime = float(times['solve']) satTime = float(times['sat']) unsatTime = float(times['unsat']) cpuTime = float(times['cpu']) out += "{:.3f}s (Solving: {:.2f}s 1st Model: {:.2f}s Unsat: {:.2f}s)\n".format(totalTime,solveTime,satTime,unsatTime) out += self.__print_key_value("CPU Time","{:.3f}s".format(cpuTime)) concurrency = int(summary['concurrency']) if concurrency > 1: out += "\n" + self.__print_key_value("Threads","{:<8}".format(concurrency)) # when winner info becomes available: " (Winner: {})\n".format(winner) return out # requires Control initialized with --stats def statistics(self,control): # return "" if no stats if not 'accu' in control.statistics: return "" # choices... solver = control.statistics['accu']['solving']['solvers'] extra = solver['extra'] choices = int(solver['choices']) domChoices = int(extra['domain_choices']) conflicts = int(solver['conflicts']) backjumps = int(solver['conflicts_analyzed']) restarts = int(solver['restarts']) avgRestart = self.__ratio(backjumps,restarts) lastRestart = int(solver['restarts_last']) out = "\n" + self.__print_key_value("Choices","{:<8}".format(choices)) if domChoices: out += " (Domain: {})".format(domChoices) out += "\n" out += self.__print_key_value("Conflicts","{:<8}".format(conflicts)) out += " (Analyzed: {})\n".format(backjumps) out += self.__print_key_value("Restarts","{:<8}".format(restarts)) if restarts>0: out += " (Average: {:.2f} Last: {})".format(avgRestart,lastRestart) out += "\n" # hccs hccTests = int(extra['hcc_tests']) hccPartial = int(extra['hcc_partial']) if hccTests: out += self.__print_key_value("Stab. Tests","{:<8}".format(hccTests)) out += " (Full: {} Partial: {})\n".format(hccTests-hccPartial,hccPartial) # model level models = extra['models'] modelLits = extra['models_level'] avgModel = self.__ratio(modelLits,models) if models: out += self.__print_key_value("Model-Level","{:<8.1f}\n".format(avgModel)) # lemmas gps = int(extra['guiding_paths']) gpLits = int(extra['guiding_paths_lits']) avgGp = self.__ratio(gpLits, gps) splits = int(extra['splits']) sum = int(extra['lemmas']) deleted = int(extra['lemmas_deleted']) binary = int(extra['lemmas_binary']) ternary = int(extra['lemmas_ternary']) conflict = int(extra['lemmas_conflict']) loop = int(extra['lemmas_loop']) other = int(extra['lemmas_other']) lits_conflict = int(extra['lits_conflict']) lits_loop = int(extra['lits_loop']) lits_other = int(extra['lits_other']) out += self.__print_key_value("Problems","{:<8}".format(gps)) out += " (Average Length: {:.2f} Splits: {})\n".format(avgGp,splits) out += self.__print_key_value("Lemmas","{:<8}".format(sum)) out += " (Deleted: {})\n".format(deleted) out += self.__print_key_value(" Binary","{:<8}".format(binary)) out += " (Ratio: {:6.2f}%)\n".format(self.__percent(binary,sum)) out += self.__print_key_value(" Ternary","{:<8}".format(ternary)) out += " (Ratio: {:6.2f}%)\n".format(self.__percent(ternary,sum)) out += self.__print_key_value(" Conflict","{:<8}".format(conflict)) out += " (Average Length: {:6.1f} Ratio: {:6.2f}%) \n".format(self.__ratio(lits_conflict,conflict),self.__percent(conflict,sum)) out += self.__print_key_value(" Loop","{:<8}".format(loop)) out += " (Average Length: {:6.1f} Ratio: {:6.2f}%) \n".format(self.__ratio(lits_loop,loop),self.__percent(loop,sum)) out += self.__print_key_value(" Other","{:<8}".format(other)) out += " (Average Length: {:6.1f} Ratio: {:6.2f}%) \n".format(self.__ratio(lits_other,other),self.__percent(other,sum)) # distributed... distributed = int(extra['distributed']) integrated = int(extra['integrated']) if distributed or integrated: distRatio = self.__ratio(distributed,conflict+loop) sumDistLbd = int(extra['distributed_sum_lbd']) avgDistLbd = self.__ratio(sumDistLbd,distributed) intRatio = self.__ratio(integrated,distributed) intImps = int(extra['integrated_imps']) intJumps = int(extra['integrated_jumps']) avgIntJump = self.__ratio(intJumps,intImps) out += self.__print_key_value(" Distributed","{:<8}".format(distributed)) out += " (Ratio: {:6.2f}% Average LBD: {:.2f}) \n".format(distRatio*100.0,avgDistLbd) out += self.__print_key_value(" Integrated","{:<8}".format(integrated)) out += " (Ratio: {:6.2f}% ".format(intRatio*100.0) # for not accu: if not _accu: "(" out += "Unit: {} Average Jumps: {:.2f})\n".format(intImps,avgIntJump) # jumps jumps = extra['jumps'] _jumps = int(jumps['jumps']) bounded = int(jumps['jumps_bounded']) jumpSum = int(jumps['levels']) boundSum = int(jumps['levels_bounded']) maxJump = int(jumps['max']) maxJumpEx = int(jumps['max_executed']) maxBound = int(jumps['max_bounded']) jumped = jumpSum - boundSum jumpedRatio = self.__ratio(jumped,jumpSum) avgBound = self.__ratio(boundSum,bounded) avgJump = self.__ratio(jumpSum,_jumps) avgJumpEx = self.__ratio(jumped,_jumps) out += self.__print_key_value("Backjumps","{:<8}".format(_jumps)) out += " (Average: {:5.2f} Max: {:>3} Sum: {:>6})\n".format(avgJump,maxJump,jumpSum) out += self.__print_key_value(" Executed","{:<8}".format(_jumps-bounded)) out += " (Average: {:5.2f} Max: {:>3} Sum: {:>6} Ratio: {:6.2f}%)\n".format(avgJumpEx,maxJumpEx,jumped,jumpedRatio*100.0) out += self.__print_key_value(" Bounded","{:<8}".format(bounded)) out += " (Average: {:5.2f} Max: {:>3} Sum: {:>6} Ratio: {:6.2f}%)\n".format(avgBound,maxBound,boundSum,100.0 - (jumpedRatio*100.0)) out += "\n" # logic program lp = control.statistics['problem']['lp'] # rules rOriginal = int(lp['rules']) rules_normal = int(lp['rules_normal']) rules_choice = int(lp['rules_choice']) rules_minimize = int(lp['rules_minimize']) rules_acyc = int(lp['rules_acyc']) rules_heuristic = int(lp['rules_heuristic']) rFinal = int(lp['rules_tr']) rules_tr_normal = int(lp['rules_tr_normal']) rules_tr_choice = int(lp['rules_tr_choice']) rules_tr_minimize = int(lp['rules_tr_minimize']) rules_tr_acyc = int(lp['rules_tr_acyc']) rules_tr_heuristic = int(lp['rules_tr_heuristic']) out += self.__print_key_value("Rules","{:<8}".format(rFinal)) if (rFinal != rOriginal): out += " (Original: {})".format(rOriginal) out += "\n" for i in [#[" Normal", rules_normal, rules_tr_normal], [" Choice", rules_choice, rules_tr_choice], [" Minimize", rules_minimize, rules_tr_minimize], [" Acyc", rules_acyc, rules_tr_acyc], [" Heuristic", rules_heuristic, rules_tr_heuristic]]: if i[2]: out += self.__print_key_value(i[0],"{:<8}".format(i[2])) if (i[2] != i[1]): out += " (Original: {})".format(i[1]) out += "\n" # atoms atoms = int(lp['atoms']) auxAtoms = int(lp['atoms_aux']) out += self.__print_key_value("Atoms","{:<8}".format(atoms)) if (auxAtoms): out += " (Original: {} Auxiliary: {})".format(atoms-auxAtoms,auxAtoms) out += "\n" # disjunctions disjunctions = int(lp['disjunctions']) disjunctions_tr = int(lp['disjunctions_non_hcf']) if disjunctions: out += self.__print_key_value("Disjunctions","{:<8}".format(disjunctions_tr)) out += " (Original: {})\n".format(disjunctions) # bodies bFinal = int(lp['bodies_tr']) bOriginal = int(lp['bodies']) count_bodies = int(lp['count_bodies']) count_bodies_tr = int(lp['count_bodies_tr']) sum_bodies = int(lp['sum_bodies']) sum_bodies_tr = int(lp['sum_bodies_tr']) out += self.__print_key_value("Bodies","{:<8}".format(bFinal)) if (bFinal != bOriginal): out += " (Original: {})".format(bOriginal) out += "\n" for i in [[" Count", count_bodies, count_bodies_tr], [" Sum", sum_bodies, sum_bodies_tr ]]: if i[1]: out += self.__print_key_value(i[0],"{:<8}".format(i[2])) if (i[2] != i[1]): out += " (Original: {})".format(i[1]) out += "\n" # equivalences eqs = int(lp['eqs']) eqsAtom = int(lp['eqs_atom']) eqsBody = int(lp['eqs_body']) eqsOther = int(lp['eqs_other']) if eqs > 0: out += self.__print_key_value("Equivalences","{:<8}".format(eqs)) out += " (Atom=Atom: {} Body=Body: {} Other: {})\n".format(eqsAtom,eqsBody,eqsOther) # sccs sccs = int(lp['sccs']) nonHcfs = int(lp['sccs_non_hcf']) ufsNodes = int(lp['ufs_nodes']) gammas = int(lp['gammas']) out += self.__print_key("Tight") if sccs==0: out += "Yes" # for supported models: elif sccs == PrgNode:noScc else: out += "{:<8} (SCCs: {} Non-Hcfs: {} Nodes: {} Gammas: {})".format("No",sccs,nonHcfs,ufsNodes,gammas) out += "\n" # problem gen = control.statistics['problem']['generator'] vars = int(gen['vars']) eliminated = int(gen['vars_eliminated']) frozen = int(gen['vars_frozen']) binary = int(gen['constraints_binary']) ternary = int(gen['constraints_ternary']) sum = int(gen['constraints']) + binary + ternary acycEdges = int(gen['acyc_edges']) out += self.__print_key_value("Variables","{:<8}".format(vars)) out += " (Eliminated: {:>4} Frozen: {:>4})\n".format(eliminated,frozen) out += self.__print_key_value("Constraints","{:<8}".format(sum)) out += " (Binary: {:5.1f}% Ternary: {:5.1f}% Other: {:5.1f}%)\n".format(self.__percent(binary,sum),self.__percent(ternary,sum),self.__percent(sum-binary-ternary,sum)) return out program = """ % pigeonhole problem #const n=8. pigeon(1..n+1). box(1..n). 1 { in(X,Y) : box(Y) } 1 :- pigeon(X). :- 2 { in(X,Y) : pigeon(X) }, box(Y). % heuristic #heuristic in(X,Y) : pigeon(X), box(Y). [1,true] % disjunction a | b. a :- b. b :- a. % SAT box(n+1). """ satisfiable = False def on_model(model): global satisfiable sys.stdout.write("Answer: 1\n{}\n".format(str(model))) satisfiable = True def run(): # set options options = "-t4 --stats --heuristic=Domain" #options = "" # with Control() control = clingo.Control(options.split()) control.add("a",[],program) control.ground([("a",[])]) control.solve(on_model=on_model) if satisfiable: sys.stdout.write("SATISFIABLE\n") else: sys.stdout.write("UNSATISFIABLE\n") sys.stdout.write(Stats().summary(control)+"\n") sys.stdout.write(Stats().statistics(control)+"\n") # with $clingo file = "tmp.lp" with open(file, "w") as text_file: text_file.write(program) os.system("clingo {} {}; rm {}".format(options,file,file)) if __name__ == "__main__": run()
mit
4,832,443,471,218,957,000
40.180428
174
0.527551
false
3.254229
true
false
false
FluidityProject/fluidity
tests/mms_tracer_P1dg_cdg_diff_steady_3d_cjc/cdg3d.py
1
1175
import os from fluidity_tools import stat_parser from sympy import * from numpy import array,max,abs meshtemplate=''' Point(1) = {0.0,0.0,0,0.1}; Extrude {1,0,0} { Point{1}; Layers{<layers>}; } Extrude {0,1,0} { Line{1}; Layers{<layers>}; } Extrude {0,0,1} { Surface{5}; Layers{<layers>}; } Physical Surface(28) = {5,14,26,22,27,18}; Physical Volume(29) = {1}; ''' def generate_meshfile(name,layers): geo = meshtemplate.replace('<layers>',str(layers)) open(name+".geo",'w').write(geo) os.system("gmsh -3 "+name+".geo") def run_test(layers, binary): '''run_test(layers, binary) Run a single test of the channel problem. Layers is the number of mesh points in the cross-channel direction. The mesh is unstructured and isotropic. binary is a string containing the fluidity command to run. The return value is the error in u and p at the end of the simulation.''' generate_meshfile("channel",layers) os.system(binary+" channel_viscous.flml") s=stat_parser("channel-flow-dg.stat") return (s["Water"]['AnalyticUVelocitySolutionError']['l2norm'][-1], s["Water"]['AnalyticPressureSolutionError']['l2norm'][-1])
lgpl-2.1
7,518,357,913,638,427,000
26.325581
77
0.669787
false
3.005115
false
false
false
tamland/trakt-sync
xbmc_library.py
1
4438
# -*- coding: utf-8 -*- # # Copyright (C) 2015 Thomas Amland # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals import logging import json import pykka from models import Movie, Episode logger = logging.getLogger(__name__) class XBMCLibrary(pykka.ThreadingActor): _movie_properties = ['title', 'year', 'imdbnumber', 'playcount'] def __init__(self): pykka.ThreadingActor.__init__(self) def movie(self, movieid): params = { 'movieid': movieid, 'properties': self._movie_properties } response = jsonrpc('VideoLibrary.GetMovieDetails', params) movie = response['result']['moviedetails'] return _load_movie(movie) def episode(self, episodeid): params = { 'episodeid': episodeid, 'properties': ['season', 'episode', 'playcount', 'tvshowid'], } episode = jsonrpc('VideoLibrary.GetEpisodeDetails', params)['result']['episodedetails'] params = {'tvshowid': episode['tvshowid'], 'properties': ['imdbnumber']} tvshow = jsonrpc('VideoLibrary.GetTVShowDetails', params)['result']['tvshowdetails'] return _load_episode(episode, tvshow['imdbnumber']) def movies(self): params = {'properties': self._movie_properties} response = jsonrpc('VideoLibrary.GetMovies', params) movies = response['result'].get('movies', []) movies = map(_load_movie, movies) return [m for m in movies if m is not None] def episodes(self): params = {'properties': ['imdbnumber']} tvshows = jsonrpc('VideoLibrary.GetTVShows', params)['result']\ .get('tvshows', []) ret = [] for tvshow in tvshows: params = { 'tvshowid': tvshow['tvshowid'], 'properties': ['season', 'episode', 'playcount', 'lastplayed'] } episodes = jsonrpc('VideoLibrary.GetEpisodes', params)['result']\ .get('episodes', []) episodes = [_load_episode(ep, tvshow['imdbnumber']) for ep in episodes] ret.extend(episodes) return ret def update_movie_details(self, movie): if not movie.xbmcid or movie.playcount <= 0: return False params = {'movieid': movie.xbmcid, 'playcount': movie.playcount} r = jsonrpc('VideoLibrary.SetMovieDetails', params) return r.get('result') == 'OK' def update_episode_details(self, item): if not item.xbmcid or item.playcount <= 0: return False params = {'episodeid': item.xbmcid, 'playcount': item.playcount} r = jsonrpc('VideoLibrary.SetEpisodeDetails', params) return r.get('result') == 'OK' def _load_movie(r): return Movie( title=r['title'], year=r['year'], imdbid=r['imdbnumber'], xbmcid=r['movieid'], playcount=r['playcount'], ) def _load_episode(r, tvshowid): return Episode( tvdbid=tvshowid, season=r['season'], episode=r['episode'], xbmcid=r['episodeid'], playcount=r['playcount'], ) def jsonrpc(method, params=None): if params is None: params = {} payload = { 'jsonrpc': '2.0', 'id': 1, 'method': method, 'params': params, } payload = json.dumps(payload, encoding='utf-8') try: import xbmc except: import requests response = requests.post( "http://localhost:8081/jsonrpc", data=payload, headers={'content-type': 'application/json'}).json() else: response = json.loads(xbmc.executeJSONRPC(payload), encoding='utf-8') if 'error' in response: logger.error("jsonrpc error: %r" % response) return None return response
gpl-3.0
6,390,157,565,330,041,000
31.874074
95
0.605228
false
3.934397
false
false
false
dgouldin/invisible-ink
invisible_ink.py
1
2998
# -*- coding: utf-8 -*- from __future__ import unicode_literals import re import uuid _INVISIBLE_CHARS = ( '\u200b', '\u200c', '\u200d', '\ufeff', ) _INVISIBLE_MAP = dict(zip( '0123456789abcdef', (''.join((i, j)) for i in _INVISIBLE_CHARS for j in _INVISIBLE_CHARS), )) _INVISIBLE_REVERSE_MAP = {v: k for k, v in _INVISIBLE_MAP.iteritems()} def uuid_to_watermark(watermark_uuid): "Returns the watermark unicode string for a given uuid" return ''.join(_INVISIBLE_MAP[c] for c in watermark_uuid.get_hex()) _WATERMARK_LENGTH = len(uuid_to_watermark(uuid.uuid4())) _WATERMARK_RE = re.compile(r'[{}]{{{}}}'.format( ''.join(_INVISIBLE_CHARS), _WATERMARK_LENGTH, )) def watermark_to_uuid(watermark): "Returns the uuid for a given watermark string" if len(watermark) != _WATERMARK_LENGTH: raise ValueError('Watermark must be {} characters'.format( _WATERMARK_LENGTH)) try: watermark_hex = ''.join( _INVISIBLE_REVERSE_MAP[k] for k in map(''.join, zip(*[iter(watermark)] * 2)) ) except KeyError: raise ValueError('Watermark contains invalid characters') return uuid.UUID(hex=watermark_hex) def find_all_watermark_uuids(encoded_text): return map(watermark_to_uuid, _WATERMARK_RE.findall(encoded_text)) def encode_watermark(text, watermark_uuid=None, prepend=False): """Encodes the given text with a watermark string generated from the given uuid. Optionally appends or prepends the watermark string. Returns a 2-tuple (encoded_text, watermark_uuid) """ if not isinstance(text, unicode): raise ValueError('text must be a unicode string') watermark_uuid = watermark_uuid or uuid.uuid4() watermark = uuid_to_watermark(watermark_uuid) if prepend: encoded_text = ''.join((watermark, text)) else: encoded_text = ''.join((text, watermark)) return encoded_text, watermark_uuid def decode_watermark(encoded_text): """Decodes the given text, separating out the original text and the watermark uuid. Returns a 2-tuple (text, watermark_uuid). If no watermark is detected, text is the original text and watermark_uuid is None. """ if not isinstance(encoded_text, unicode): raise ValueError('encoded_text must be a unicode string') if len(encoded_text) < _WATERMARK_LENGTH: return encoded_text, None # appended watermark watermark = encoded_text[-_WATERMARK_LENGTH:] text = encoded_text[:-_WATERMARK_LENGTH] try: watermark_uuid = watermark_to_uuid(watermark) except ValueError: pass else: return text, watermark_uuid # prepended watermark watermark = encoded_text[:_WATERMARK_LENGTH] text = encoded_text[_WATERMARK_LENGTH:] try: watermark_uuid = watermark_to_uuid(watermark) except ValueError: pass else: return text, watermark_uuid return encoded_text, None
mit
6,584,132,110,583,274,000
27.552381
79
0.657772
false
3.552133
false
false
false
samdowd/drumm-farm
drumm_env/lib/python2.7/site-packages/storages/backends/s3boto.py
1
20374
import os import posixpath import mimetypes from datetime import datetime from gzip import GzipFile from tempfile import SpooledTemporaryFile from django.core.files.base import File from django.core.files.storage import Storage from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation from django.utils.deconstruct import deconstructible from django.utils.encoding import force_text, smart_str, filepath_to_uri, force_bytes from django.utils.six import BytesIO from django.utils.six.moves.urllib import parse as urlparse try: from boto import __version__ as boto_version from boto.s3.connection import S3Connection, SubdomainCallingFormat from boto.exception import S3ResponseError from boto.s3.key import Key as S3Key from boto.utils import parse_ts, ISO8601 except ImportError: raise ImproperlyConfigured("Could not load Boto's S3 bindings.\n" "See https://github.com/boto/boto") from storages.utils import setting boto_version_info = tuple([int(i) for i in boto_version.split('-')[0].split('.')]) if boto_version_info[:2] < (2, 32): raise ImproperlyConfigured("The installed Boto library must be 2.32 or " "higher.\nSee https://github.com/boto/boto") def safe_join(base, *paths): """ A version of django.utils._os.safe_join for S3 paths. Joins one or more path components to the base path component intelligently. Returns a normalized version of the final path. The final path must be located inside of the base path component (otherwise a ValueError is raised). Paths outside the base path indicate a possible security sensitive operation. """ base_path = force_text(base) base_path = base_path.rstrip('/') paths = [force_text(p) for p in paths] final_path = base_path for path in paths: final_path = urlparse.urljoin(final_path.rstrip('/') + "/", path) # Ensure final_path starts with base_path and that the next character after # the final path is '/' (or nothing, in which case final_path must be # equal to base_path). base_path_len = len(base_path) if (not final_path.startswith(base_path) or final_path[base_path_len:base_path_len + 1] not in ('', '/')): raise ValueError('the joined path is located outside of the base path' ' component') return final_path.lstrip('/') @deconstructible class S3BotoStorageFile(File): """ The default file object used by the S3BotoStorage backend. This file implements file streaming using boto's multipart uploading functionality. The file can be opened in read or write mode. This class extends Django's File class. However, the contained data is only the data contained in the current buffer. So you should not access the contained file object directly. You should access the data via this class. Warning: This file *must* be closed using the close() method in order to properly write the file to S3. Be sure to close the file in your application. """ # TODO: Read/Write (rw) mode may be a bit undefined at the moment. Needs testing. # TODO: When Django drops support for Python 2.5, rewrite to use the # BufferedIO streams in the Python 2.6 io module. buffer_size = setting('AWS_S3_FILE_BUFFER_SIZE', 5242880) def __init__(self, name, mode, storage, buffer_size=None): self._storage = storage self.name = name[len(self._storage.location):].lstrip('/') self._mode = mode self.key = storage.bucket.get_key(self._storage._encode_name(name)) if not self.key and 'w' in mode: self.key = storage.bucket.new_key(storage._encode_name(name)) self._is_dirty = False self._file = None self._multipart = None # 5 MB is the minimum part size (if there is more than one part). # Amazon allows up to 10,000 parts. The default supports uploads # up to roughly 50 GB. Increase the part size to accommodate # for files larger than this. if buffer_size is not None: self.buffer_size = buffer_size self._write_counter = 0 @property def size(self): return self.key.size def _get_file(self): if self._file is None: self._file = SpooledTemporaryFile( max_size=self._storage.max_memory_size, suffix=".S3BotoStorageFile", dir=setting("FILE_UPLOAD_TEMP_DIR", None) ) if 'r' in self._mode: self._is_dirty = False self.key.get_contents_to_file(self._file) self._file.seek(0) if self._storage.gzip and self.key.content_encoding == 'gzip': self._file = GzipFile(mode=self._mode, fileobj=self._file) return self._file def _set_file(self, value): self._file = value file = property(_get_file, _set_file) def read(self, *args, **kwargs): if 'r' not in self._mode: raise AttributeError("File was not opened in read mode.") return super(S3BotoStorageFile, self).read(*args, **kwargs) def write(self, content, *args, **kwargs): if 'w' not in self._mode: raise AttributeError("File was not opened in write mode.") self._is_dirty = True if self._multipart is None: provider = self.key.bucket.connection.provider upload_headers = { provider.acl_header: self._storage.default_acl } upload_headers.update({'Content-Type': mimetypes.guess_type(self.key.name)[0] or self._storage.key_class.DefaultContentType}) upload_headers.update(self._storage.headers) self._multipart = self._storage.bucket.initiate_multipart_upload( self.key.name, headers=upload_headers, reduced_redundancy=self._storage.reduced_redundancy, encrypt_key=self._storage.encryption, ) if self.buffer_size <= self._buffer_file_size: self._flush_write_buffer() return super(S3BotoStorageFile, self).write(force_bytes(content), *args, **kwargs) @property def _buffer_file_size(self): pos = self.file.tell() self.file.seek(0, os.SEEK_END) length = self.file.tell() self.file.seek(pos) return length def _flush_write_buffer(self): """ Flushes the write buffer. """ if self._buffer_file_size: self._write_counter += 1 self.file.seek(0) headers = self._storage.headers.copy() self._multipart.upload_part_from_file( self.file, self._write_counter, headers=headers) def close(self): if self._is_dirty: self._flush_write_buffer() self._multipart.complete_upload() else: if not self._multipart is None: self._multipart.cancel_upload() self.key.close() if self._file is not None: self._file.close() self._file = None @deconstructible class S3BotoStorage(Storage): """ Amazon Simple Storage Service using Boto This storage backend supports opening files in read or write mode and supports streaming(buffering) data in chunks to S3 when writing. """ connection_class = S3Connection connection_response_error = S3ResponseError file_class = S3BotoStorageFile key_class = S3Key # used for looking up the access and secret key from env vars access_key_names = ['AWS_S3_ACCESS_KEY_ID', 'AWS_ACCESS_KEY_ID'] secret_key_names = ['AWS_S3_SECRET_ACCESS_KEY', 'AWS_SECRET_ACCESS_KEY'] access_key = setting('AWS_S3_ACCESS_KEY_ID', setting('AWS_ACCESS_KEY_ID')) secret_key = setting('AWS_S3_SECRET_ACCESS_KEY', setting('AWS_SECRET_ACCESS_KEY')) file_overwrite = setting('AWS_S3_FILE_OVERWRITE', True) headers = setting('AWS_HEADERS', {}) bucket_name = setting('AWS_STORAGE_BUCKET_NAME') auto_create_bucket = setting('AWS_AUTO_CREATE_BUCKET', False) default_acl = setting('AWS_DEFAULT_ACL', 'public-read') bucket_acl = setting('AWS_BUCKET_ACL', default_acl) querystring_auth = setting('AWS_QUERYSTRING_AUTH', True) querystring_expire = setting('AWS_QUERYSTRING_EXPIRE', 3600) reduced_redundancy = setting('AWS_REDUCED_REDUNDANCY', False) location = setting('AWS_LOCATION', '') encryption = setting('AWS_S3_ENCRYPTION', False) custom_domain = setting('AWS_S3_CUSTOM_DOMAIN') calling_format = setting('AWS_S3_CALLING_FORMAT', SubdomainCallingFormat()) secure_urls = setting('AWS_S3_SECURE_URLS', True) file_name_charset = setting('AWS_S3_FILE_NAME_CHARSET', 'utf-8') gzip = setting('AWS_IS_GZIPPED', False) preload_metadata = setting('AWS_PRELOAD_METADATA', False) gzip_content_types = setting('GZIP_CONTENT_TYPES', ( 'text/css', 'text/javascript', 'application/javascript', 'application/x-javascript', 'image/svg+xml', )) url_protocol = setting('AWS_S3_URL_PROTOCOL', 'http:') host = setting('AWS_S3_HOST', S3Connection.DefaultHost) use_ssl = setting('AWS_S3_USE_SSL', True) port = setting('AWS_S3_PORT', None) proxy = setting('AWS_S3_PROXY_HOST', None) proxy_port = setting('AWS_S3_PROXY_PORT', None) # The max amount of memory a returned file can take up before being # rolled over into a temporary file on disk. Default is 0: Do not roll over. max_memory_size = setting('AWS_S3_MAX_MEMORY_SIZE', 0) def __init__(self, acl=None, bucket=None, **settings): # check if some of the settings we've provided as class attributes # need to be overwritten with values passed in here for name, value in settings.items(): if hasattr(self, name): setattr(self, name, value) # For backward-compatibility of old differing parameter names if acl is not None: self.default_acl = acl if bucket is not None: self.bucket_name = bucket self.location = (self.location or '').lstrip('/') # Backward-compatibility: given the anteriority of the SECURE_URL setting # we fall back to https if specified in order to avoid the construction # of unsecure urls. if self.secure_urls: self.url_protocol = 'https:' self._entries = {} self._bucket = None self._connection = None if not self.access_key and not self.secret_key: self.access_key, self.secret_key = self._get_access_keys() @property def connection(self): if self._connection is None: self._connection = self.connection_class( self.access_key, self.secret_key, is_secure=self.use_ssl, calling_format=self.calling_format, host=self.host, port=self.port, proxy=self.proxy, proxy_port=self.proxy_port ) return self._connection @property def bucket(self): """ Get the current bucket. If there is no current bucket object create it. """ if self._bucket is None: self._bucket = self._get_or_create_bucket(self.bucket_name) return self._bucket @property def entries(self): """ Get the locally cached files for the bucket. """ if self.preload_metadata and not self._entries: self._entries = dict((self._decode_name(entry.key), entry) for entry in self.bucket.list(prefix=self.location)) return self._entries def _get_access_keys(self): """ Gets the access keys to use when accessing S3. If none are provided to the class in the constructor or in the settings then get them from the environment variables. """ def lookup_env(names): for name in names: value = os.environ.get(name) if value: return value access_key = self.access_key or lookup_env(self.access_key_names) secret_key = self.secret_key or lookup_env(self.secret_key_names) return access_key, secret_key def _get_or_create_bucket(self, name): """ Retrieves a bucket if it exists, otherwise creates it. """ try: return self.connection.get_bucket(name, validate=self.auto_create_bucket) except self.connection_response_error: if self.auto_create_bucket: bucket = self.connection.create_bucket(name) bucket.set_acl(self.bucket_acl) return bucket raise ImproperlyConfigured("Bucket %s does not exist. Buckets " "can be automatically created by " "setting AWS_AUTO_CREATE_BUCKET to " "``True``." % name) def _clean_name(self, name): """ Cleans the name so that Windows style paths work """ # Normalize Windows style paths clean_name = posixpath.normpath(name).replace('\\', '/') # os.path.normpath() can strip trailing slashes so we implement # a workaround here. if name.endswith('/') and not clean_name.endswith('/'): # Add a trailing slash as it was stripped. return clean_name + '/' else: return clean_name def _normalize_name(self, name): """ Normalizes the name so that paths like /path/to/ignored/../something.txt work. We check to make sure that the path pointed to is not outside the directory specified by the LOCATION setting. """ try: return safe_join(self.location, name) except ValueError: raise SuspiciousOperation("Attempted access to '%s' denied." % name) def _encode_name(self, name): return smart_str(name, encoding=self.file_name_charset) def _decode_name(self, name): return force_text(name, encoding=self.file_name_charset) def _compress_content(self, content): """Gzip a given string content.""" zbuf = BytesIO() # The GZIP header has a modification time attribute (see http://www.zlib.org/rfc-gzip.html) # This means each time a file is compressed it changes even if the other contents don't change # For S3 this defeats detection of changes using MD5 sums on gzipped files # Fixing the mtime at 0.0 at compression time avoids this problem zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf, mtime=0.0) try: zfile.write(force_bytes(content.read())) finally: zfile.close() zbuf.seek(0) content.file = zbuf content.seek(0) return content def _open(self, name, mode='rb'): name = self._normalize_name(self._clean_name(name)) f = self.file_class(name, mode, self) if not f.key: raise IOError('File does not exist: %s' % name) return f def _save(self, name, content): cleaned_name = self._clean_name(name) name = self._normalize_name(cleaned_name) headers = self.headers.copy() _type, encoding = mimetypes.guess_type(name) content_type = getattr(content, 'content_type', _type or self.key_class.DefaultContentType) # setting the content_type in the key object is not enough. headers.update({'Content-Type': content_type}) if self.gzip and content_type in self.gzip_content_types: content = self._compress_content(content) headers.update({'Content-Encoding': 'gzip'}) elif encoding: # If the content already has a particular encoding, set it headers.update({'Content-Encoding': encoding}) content.name = cleaned_name encoded_name = self._encode_name(name) key = self.bucket.get_key(encoded_name) if not key: key = self.bucket.new_key(encoded_name) if self.preload_metadata: self._entries[encoded_name] = key key.last_modified = datetime.utcnow().strftime(ISO8601) key.set_metadata('Content-Type', content_type) self._save_content(key, content, headers=headers) return cleaned_name def _save_content(self, key, content, headers): # only pass backwards incompatible arguments if they vary from the default kwargs = {} if self.encryption: kwargs['encrypt_key'] = self.encryption key.set_contents_from_file(content, headers=headers, policy=self.default_acl, reduced_redundancy=self.reduced_redundancy, rewind=True, **kwargs) def delete(self, name): name = self._normalize_name(self._clean_name(name)) self.bucket.delete_key(self._encode_name(name)) def exists(self, name): if not name: # root element aka the bucket try: self.bucket return True except ImproperlyConfigured: return False name = self._normalize_name(self._clean_name(name)) if self.entries: return name in self.entries k = self.bucket.new_key(self._encode_name(name)) return k.exists() def listdir(self, name): name = self._normalize_name(self._clean_name(name)) # for the bucket.list and logic below name needs to end in / # But for the root path "" we leave it as an empty string if name and not name.endswith('/'): name += '/' dirlist = self.bucket.list(self._encode_name(name)) files = [] dirs = set() base_parts = name.split("/")[:-1] for item in dirlist: parts = item.name.split("/") parts = parts[len(base_parts):] if len(parts) == 1: # File files.append(parts[0]) elif len(parts) > 1: # Directory dirs.add(parts[0]) return list(dirs), files def size(self, name): name = self._normalize_name(self._clean_name(name)) if self.entries: entry = self.entries.get(name) if entry: return entry.size return 0 return self.bucket.get_key(self._encode_name(name)).size def modified_time(self, name): name = self._normalize_name(self._clean_name(name)) entry = self.entries.get(name) # only call self.bucket.get_key() if the key is not found # in the preloaded metadata. if entry is None: entry = self.bucket.get_key(self._encode_name(name)) # Parse the last_modified string to a local datetime object. return parse_ts(entry.last_modified) def url(self, name, headers=None, response_headers=None, expire=None): # Preserve the trailing slash after normalizing the path. name = self._normalize_name(self._clean_name(name)) if self.custom_domain: return "%s//%s/%s" % (self.url_protocol, self.custom_domain, filepath_to_uri(name)) if expire is None: expire = self.querystring_expire return self.connection.generate_url( expire, method='GET', bucket=self.bucket.name, key=self._encode_name(name), headers=headers, query_auth=self.querystring_auth, force_http=not self.secure_urls, response_headers=response_headers, ) def get_available_name(self, name, max_length=None): """ Overwrite existing file with the same name. """ if self.file_overwrite: name = self._clean_name(name) return name return super(S3BotoStorage, self).get_available_name(name, max_length)
mit
4,176,623,037,583,207,400
38.030651
137
0.605232
false
4.096923
false
false
false
Smetterleen/Neopets-Python-API
neopapi/explore/world/island/TrainingSchool.py
1
6309
from neopapi.explore.world.island.Exceptions import UnknownStatException,\ PetNotFoundException, PetNotOnCourseException, PetAlreadyOnCourseException,\ StatTooHighException from neopapi.core.browse import register_page from neopapi.core.browse.Browser import BROWSER import re from datetime import timedelta """ This module provides the API for the Mystery Island Training school """ register_page('island/training.phtml', ['island/training.phtml?type=status', 'island/training.phtml?type=courses']) register_page('island/training.phtml?type=status', ['island/training.phtml?type=status', 'island/training.phtml?type=courses']) register_page('island/training.phtml?type=courses', ['island/training.phtml?type=status', 'island/training.phtml?type=courses']) register_page('island/process_training.phtml') # Stats to train STATS = ['Level', 'Endurance', 'Strength', 'Defence', 'Agility'] LEVEL, HP, STRENGTH, DEFENCE, MOVEMENT = STATS # Training statusses IDLE, AWAITING_PAYMENT, TRAINING, FINISHED = 1, 2, 3, 4 def get_status(pet_name): ''' Get the current status of the given pet in the island training school in the form of a dictionary ''' page = BROWSER.goto('island/training.phtml?type=status', force_refresh=True) pet_td = page.find('td', text=re.compile(pet_name + '.*')) if pet_td is None: raise PetNotFoundException(pet_name) infos = pet_td.find_parent('tr').find_next('tr').find_all('b') info = {} info['level'] = int(infos[0].text) info['strength'] = int(infos[1].text) info['defence'] = int(infos[2].text) info['movement'] = int(infos[3].text) info['current_hp'] = int(infos[4].text.split(' / ')[0]) info['hp'] = int(infos[4].text.split(' / ')[1]) return info def get_course_status(pet_name): page = BROWSER.goto('island/training.phtml?type=status', force_refresh=True) pet_td = page.find('td', text=re.compile(pet_name + '.*')) if pet_td is None: raise PetNotFoundException(pet_name) status_td = pet_td.find_parent('tr').find_next_sibling('tr').find_all('td')[1] if status_td.text == 'Course Finished!': return FINISHED elif 'This course has not been paid for yet' in status_td.text: return AWAITING_PAYMENT elif 'Time till course finishes' in status_td.text: return TRAINING return IDLE def get_course_time_remaining(pet_name): page = BROWSER.goto('island/training.phtml?type=status', force_refresh=True) status_td = page.find('td', text=re.compile(pet_name + '.*')).find_parent('tr').find_next_sibling('tr').find_all('td')[1] if 'Time till course finishes' not in status_td.text: raise PetNotOnCourseException(pet_name) time_parts = status_td.find('b').text.split(',') hours = int(time_parts[0].replace('hrs', '').strip()) minutes = int(time_parts[1].replace('minutes', '').strip()) seconds = int(time_parts[2].replace('seconds', '').strip()) return timedelta(hours=hours, minutes=minutes, seconds=seconds) def start_course(pet_name, stat): ''' This method starts a course for the given pet in the given stat ''' if not stat in STATS: raise UnknownStatException(stat) page = BROWSER.goto('island/training.phtml?type=courses') if page.find('select', {'name': 'pet_name'}).find('option', value=pet_name) is None: raise PetNotFoundException(pet_name) post_dict = {'course_type' : stat, 'pet_name' : pet_name, 'type' : 'start'} result_page = BROWSER.post('island/process_training.phtml', post_dict) if 'That pet is already doing a course' in result_page.text: BROWSER.back() raise PetAlreadyOnCourseException(pet_name) if 'No statistic can go above twice your pet' in result_page.text or 'Endurance can not go above three times your pet\'s level' in result_page.text: BROWSER.back() raise StatTooHighException(pet_name) # TODO: check if everything went all right return result_page def get_course_cost(pet_name): ''' This method checks if the given pet is currently enrolled in a course that still needs to be payed at the given school. If this is the case, it will return an array of item names that are needed to pay for the course. Otherwise it returns None. ''' page = BROWSER.goto('island/training.phtml?type=status') pet_td = page.find('td', text=re.compile(pet_name + '.*')) if pet_td is None: raise PetNotFoundException(pet_name) status_td = pet_td.find_parent('tr').find_next_sibling('tr').find_all('td')[1] if not 'This course has not been paid for yet' in status_td.text: raise PetNotOnCourseException(pet_name) return [tag.text for tag in status_td.find('p').find_all('b')] def pay_course(pet_name): ''' This method tries to pay the current course of the given pet. ''' page = BROWSER.goto('island/training.phtml?type=status') pet_td = page.find('td', text=re.compile(pet_name + '.*')) if pet_td is None: raise PetNotFoundException(pet_name) status_td = pet_td.find_parent('tr').find_next_sibling('tr').find_all('td')[1] if not 'This course has not been paid for yet' in status_td.text: raise PetNotOnCourseException(pet_name) BROWSER._get('island/process_training.phtml?type=pay&pet_name=' + pet_name) return get_course_status(pet_name) def finish_course(pet_name): ''' This method finishes the current course of the given pet if it is finished ''' page = BROWSER.goto('island/training.phtml?type=status', force_refresh=True) pet_td = page.find('td', text=re.compile(pet_name + '.*')) if pet_td is None: raise PetNotFoundException(pet_name) status_td = pet_td.find_parent('tr').find_next_sibling('tr').find_all('td')[1] if not 'Course Finished!' in status_td.text: raise PetNotOnCourseException(pet_name) post_dict = {'pet_name': pet_name, 'type': 'complete'} result_page = BROWSER.post('island/process_training.phtml', post_dict) # TODO: check if everything went all right return result_page
gpl-3.0
-1,034,167,616,519,363,800
37.242424
152
0.656839
false
3.343402
false
false
false
vigilo/vigiconf
src/vigilo/vigiconf/lib/server/base.py
1
11398
# -*- coding: utf-8 -*- # Copyright (C) 2007-2020 CS GROUP - France # License: GNU GPL v2 <http://www.gnu.org/licenses/gpl-2.0.html> """ Ce module contient la classe de base pour un serveur Vigilo: L{Server}. """ from __future__ import absolute_import import os import shutil import glob import re from vigilo.common.conf import settings from vigilo.models import tables from vigilo.models.session import DBSession from vigilo.common.logging import get_logger LOGGER = get_logger(__name__) from vigilo.common.gettext import translate _ = translate(__name__) from vigilo.vigiconf import conf from vigilo.vigiconf.lib import VigiConfError from vigilo.vigiconf.lib.systemcommand import SystemCommand, SystemCommandError class ServerError(VigiConfError): """Exception concernant un objet L{Server}""" def __init__(self, value, iServerName = ''): super(ServerError, self).__init__(value) self.value = value self.mServer = iServerName def __str__(self): _srvStr = "" if( len(self.mServer)>0): _srvStr = " on server %s" % (self.mServer) return repr("ServerError : %s%s" % (self.value, _srvStr)) class Server(object): """ Un serveur Vigilo. @ivar name: nom du serveur (DNS) @type name: C{str} @ivar revisions: révisions des configurations déployées sur ce serveur. @type revisions: C{dict} """ def __init__(self, name): self.name = name self._rev_filename = os.path.join( settings["vigiconf"].get("libdir"), "revisions" , "%s.revisions" % name) self.revisions = {"conf": None, "deployed": None, "installed": None, "previous": None, } def getName(self): """@return: L{name}""" return self.name def needsDeployment(self): """ Teste si le serveur nécessite un déploiement. @rtype: C{bool} """ return self.revisions["conf"] != self.revisions["deployed"] def needsRestart(self): """ Teste si le serveur nécessite un redémarrage des applications. @rtype: C{bool} """ return self.revisions["deployed"] != self.revisions["installed"] # external references def getBaseDir(self): # pylint: disable-msg=R0201 """ @return: Répertoire de base pour les déploiements. @rtype: C{str} """ return os.path.join(settings["vigiconf"].get("libdir"), "deploy") def createCommand(self, iCommand): """ @note: À réimplémenter dans les sous-classes. @param iCommand: commande à exécuter. @type iCommand: C{str} @return: L'instance de la commande @rtype: L{SystemCommand<lib.systemcommand.SystemCommand>} """ c = SystemCommand(iCommand) c.simulate = self.is_simulation() return c def is_simulation(self): """ @return: État du mode simulation @rtype: C{bool} """ simulate = False try: simulate = settings["vigiconf"].as_bool("simulate") except KeyError: pass return simulate # methods def switch_directories(self): """ Archive le répertoire contenant les anciennes configurations, et active les nouvelles, à l'aide de C{vigiconf-local}. """ cmd = ["vigiconf-local", "activate-conf"] _command = self.createCommand(cmd) try: _command.execute() except SystemCommandError as e: raise ServerError(_("Can't activate the configuration on " "%(server)s. COMMAND \"%(cmd)s\" FAILED. " "REASON: %(reason)s") % { 'server': self.getName(), 'cmd': " ".join(cmd), 'reason': e.value, }, self.getName()) LOGGER.debug("Switched directories on %s", self.name) def tarConf(self): """ I{Tarre} les fichiers de configuration, avant déploiement. """ cmd = ["tar", "-C", os.path.join(self.getBaseDir(), self.getName()), "-cvf", os.path.join(self.getBaseDir(), "%s.tar" % self.getName()), "."] cmd = SystemCommand(cmd) try: cmd.execute() except SystemCommandError as e: raise ServerError(_("Can't tar config for server " "%(server)s: %(error)s") % { 'server': self.getName(), 'error': e.value, }) def deployTar(self): raise NotImplementedError def deployFiles(self): """ Copie tous les fichiers de configuration. """ self.tarConf() self.deployTar() LOGGER.info(_("%s : deployment successful."), self.getName()) def _copy(self, source, destination): """ Un simple wrapper pour shutil.copyfile. @param source: source @type source: C{str} @param destination: destination @type destination: C{str} """ try: os.makedirs(os.path.dirname(destination)) except OSError: pass try: shutil.copyfile(source, destination) except Exception as e: raise ServerError(_("Cannot copy files (%(from)s to %(to)s): " "%(error)s.") % { 'from': source, 'to': destination, 'error': e, }, self.getName()) def getValidationDir(self): return os.path.join(self.getBaseDir(), self.getName(), "validation") def insertValidationDir(self): """ Prepare le répertoire avec les scripts de validation. """ validation_dir = self.getValidationDir() if not os.path.exists(validation_dir): os.makedirs(validation_dir) validation_scripts = os.path.join(conf.CODEDIR, "validation", "*.sh") for validation_script in glob.glob(validation_scripts): shutil.copy(validation_script, validation_dir) def deploy(self): # insert the "validation" directory in the deployment directory self.insertValidationDir() # now, the deployment directory is complete. self.deployFiles() def set_revision(self, rev): # update local revision files self.revisions["conf"] = rev self.revisions["deployed"] = rev self.write_revisions() cmd = self.createCommand(["vigiconf-local", "set-revision", str(rev)]) cmd.execute() def update_revisions(self): cmd = self.createCommand(["vigiconf-local", "get-revisions"]) cmd.execute() rev_re = re.compile("^\s*(\w+)\s+(\d+)\s*$") revisions = {"new": 0, "prod": 0, "old": 0} for line in cmd.getResult().split("\n"): rev_match = rev_re.match(line) if not rev_match: continue directory = rev_match.group(1) revision = rev_match.group(2) revisions[directory] = int(revision) self.revisions["deployed"] = revisions["new"] self.revisions["installed"] = revisions["prod"] self.revisions["previous"] = revisions["old"] def write_revisions(self): """ Écrit la révision SVN dans le fichier d'état. """ directory = os.path.dirname(self._rev_filename) if not os.path.exists(directory): os.makedirs(directory) try: _file = open(self._rev_filename, 'wb') _file.write("Revision: %d\n" % self.revisions["conf"]) _file.close() except Exception as e: # pylint: disable-msg=W0703 LOGGER.exception(_("Cannot write the revision file: %s"), e) def get_state_text(self, last_revision): self.update_revisions() self.revisions["conf"] = last_revision state = ( _("Server %(server)s:\n" " deployed: %(deployed)d\n" " installed: %(installed)d\n" " previous: %(previous)d" ) % {"server": self.name, "deployed": self.revisions["deployed"], "installed": self.revisions["installed"], "previous": self.revisions["previous"], } ) if self.needsDeployment() or self.needsRestart(): todo = [] if self.needsDeployment(): todo.append(_("should be deployed")) if self.needsRestart(): todo.append(_("should restart")) state += "\n -> %s" % ", ".join(todo) if not self.is_enabled(): state += "\n " + _("disabled").upper() return state def is_enabled(self): """ @return: L'état d'activation du serveur (C{True} pour actif, C{False} pour inactif) """ server_db = tables.VigiloServer.by_vigiloserver_name( unicode(self.name)) if server_db is None: # pas en base, donc pas désactivé (peut-être qu'il vient # d'être ajouté) return True if server_db.disabled: return False else: return True def disable(self): """ Désactive ce serveur Vigilo """ vserver = tables.VigiloServer.by_vigiloserver_name(unicode(self.name)) if vserver is None: raise VigiConfError(_("The Vigilo server %s does not exist") % self.name) if vserver.disabled: raise VigiConfError(_("The Vigilo server %s is already disabled") % self.name) vserver.disabled = True DBSession.flush() def enable(self): """ Active ce serveur Vigilo """ vserver = tables.VigiloServer.by_vigiloserver_name(unicode(self.name)) if vserver is None: raise VigiConfError(_("The Vigilo server %s does not exist") % self.name) if not vserver.disabled: raise VigiConfError(_("The Vigilo server %s is already enabled") % self.name) # On efface les associations précédentes prev_ventil = DBSession.query( tables.Ventilation.idapp, tables.Ventilation.idhost ).filter( tables.Ventilation.idvigiloserver == vserver.idvigiloserver ).all() for idapp, idhost in prev_ventil: temp_ventils = DBSession.query(tables.Ventilation ).filter( tables.Ventilation.idapp == idapp ).filter( tables.Ventilation.idhost == idhost ).filter( tables.Ventilation.idvigiloserver != vserver.idvigiloserver ).all() for temp_ventil in temp_ventils: DBSession.delete(temp_ventil) vserver.disabled = False DBSession.flush() # vim:set expandtab tabstop=4 shiftwidth=4:
gpl-2.0
4,204,519,866,043,522,000
33.550152
79
0.539456
false
4.062545
true
false
false
h4ck3rm1k3/hacker-public-radio-publisher
setup.py
1
1139
""" setup """ from setuptools import setup, find_packages setup( name = "HackerPublicRadioPublisher", version = "0.1", description = "Python Uploader for Hacker Public Radio", long_description=u''' A set of scripts to manage the creation and uploading of shows into HPR ''', platforms = "Debian GNU/Linux", author = "James Michael DuPont", author_email = "jamesmikedupont@gmail.com", license = "GNU GPLv3", url = "github.com/h4ck3rm1k3/hacker-public-radio-publisher", packages = find_packages(), package_data = { '': ['*.txt', '*.flac', '*.html'], }, install_requires = [ 'nose', 'ftputil>=2.8', 'internetarchive>=0.4.4', 'Jinja>=1.2', 'PyYAML>=3.10', 'docopt>=0.6.1', 'pytest>=2.3.4', 'jsonpatch>=1.1', 'requests>=2.0.0', # 'requests>=1.2.0', 'py>=1.4.14', 'jsonpointer>=1.1', #'audiotools', #not working with pip, # get code from : https://github.com/tuffy/python-audio-tools.git ], test_suite = 'nose.collector' )
gpl-3.0
755,017,810,238,400,400
24.311111
75
0.545215
false
3.282421
false
false
false
Azure/azure-sdk-for-python
sdk/core/azure-core/azure/core/_match_conditions.py
1
1506
# -------------------------------------------------------------------------- # # Copyright (c) Microsoft Corporation. All rights reserved. # # The MIT License (MIT) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the ""Software""), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # # -------------------------------------------------------------------------- from enum import Enum class MatchConditions(Enum): """An enum to describe match conditions. """ Unconditionally = 1 IfNotModified = 2 IfModified = 3 IfPresent = 4 IfMissing = 5
mit
8,286,881,614,508,012,000
42.028571
78
0.679283
false
4.811502
false
false
false
punitvanjani/test1
api/interface.py
1
7869
from errors import invalid_operation, no_records, no_communication from models import Properties from debugger import debug_msg import serial table = ( 0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241, 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440, 0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40, 0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841, 0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40, 0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41, 0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641, 0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040, 0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240, 0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441, 0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41, 0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840, 0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41, 0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40, 0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640, 0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041, 0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240, 0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441, 0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41, 0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840, 0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41, 0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40, 0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640, 0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041, 0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241, 0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440, 0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40, 0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841, 0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40, 0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41, 0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641, 0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040 ) def calcString(st, crc=0xFFFF): """Given a hex string and starting CRC, Calc a final CRC-16 """ for ch in st: crc = table[(crc ^ ord(ch)) & 0xFF] ^ (crc >> 8) # after calculation, interchange LSB and MSB crc1 = crc & 0xFF00 crc1 = crc1 >> 8 crc2 = crc & 0x00FF crc2 = crc2 << 8 crc = crc2 ^ crc1 return crc def convert(int_value): encoded = format(int_value, 'x') length = len(encoded) encoded = encoded.zfill(length+length%2) return encoded.decode('hex') def bit_from_string(string, index): i, j = divmod(index, 8) if ord(string[i]) & (1 << j): return 1 else: return 0 def webswitch(endpoint,expected_status): status = -1 errors = "" # Fetch the mode of communication and Baud Rate for Webswitch wbs_comm = Properties.query.filter_by(key='WbSwtComm').first() if wbs_comm == None: errors = no_records('interface.webswitch.properties','WbSwtComm') status = -1 return (status,errors) wbb_comm = Properties.query.filter_by(key='WbSwtBaud').first() if wbb_comm == None: errors = no_records('interface.webswitch.properties','WbSwtBaud') status = -1 return (status,errors) # Establish communication try: serusb = serial.Serial(wbs_comm.value, int(wbb_comm.value)) except: errors = no_communication() debug_msg("No communication", wbs_comm.value, int(wbb_comm.value)) status = -1 return (status,errors) # Check the type of endpointtype, if it is switch proceed if endpoint.endpoint_type==1000 or endpoint.endpoint_type==1002 or endpoint.endpoint_type==1004 or endpoint.endpoint_type==1005 or endpoint.endpoint_type==1006 or endpoint.endpoint_type==1007 or endpoint.endpoint_type==1008 or endpoint.endpoint_type==1009 or endpoint.endpoint_type==1010 or endpoint.endpoint_type==1011 or endpoint.endpoint_type==1012 or endpoint.endpoint_type==1013 or endpoint.endpoint_type==1014 or endpoint.endpoint_type==1015 or endpoint.endpoint_type==1016 or endpoint.endpoint_type==1017 or endpoint.endpoint_type==1018 or endpoint.endpoint_type==1019: if (expected_status == 1): action_id = 255 elif (expected_status == 0): action_id = 0 # Form the outbound communication string to write coil : st0 = Slave ID, st1 = Function code for modbus write coil, st3 = device id/endpoint id, st4 = expected Status (converted), st6 = crc code this is 16 bit code st0 = convert(endpoint.internal_nod_id) st1 = convert(5) st2 = convert(0) st3 = convert(endpoint.internal_end_id) st4 = convert(action_id) st5 = convert(0) st6 = convert(calcString(st0+st1+st2+st3+st4+st5)) serusb.close() serusb.open() debug_msg("WS outbound", int(st0.encode('hex'), 16),int(st1.encode('hex'), 16),int(st2.encode('hex'), 16),int(st3.encode('hex'), 16),int(st4.encode('hex'), 16),int(st5.encode('hex'), 16),int(st0.encode('hex'), 16),int(st6.encode('hex'), 16)) print (st0,st1,st2,st3,st4,st5,st6) serusb.flushInput() serusb.write(st0+st1+st2+st3+st4+st5+st6) serusb.timeout=2 try: read_val = serusb.read() # Wait forever for anything for i in range(1,100000): # This dummy loop is added so that we can complete serial buffer pass data_left = serusb.inWaiting() # Get the number of characters ready to be read read_val += serusb.read(size=data_left) # Do the read and combine it with the first character serusb.close() except: data_left = 0 read_val = "" serusb.close() try: debug_msg( "WS inbound:", int(read_val[0].encode('hex'), 16), int(read_val[1].encode('hex'), 16), int(read_val[2].encode('hex'), 16), int(read_val[3].encode('hex'), 16), int(read_val[4].encode('hex'), 16), int(read_val[5].encode('hex'), 16), int(read_val[6].encode('hex'), 16), int(read_val[7].encode('hex'), 16))#, int(read_val[8].encode('hex'), 16), int(read_val[9].encode('hex'), 16), int(read_val[10].encode('hex'), 16), int(read_val[11].encode('hex'), 16), int(read_val[12].encode('hex'), 16), int(read_val[13].encode('hex'), 16)) except: pass if(data_left != 0 and data_left >= 4): ws_bit = int(read_val[4].encode('hex'), 16) print "ws_bit", ws_bit if ws_bit == 255: status = 1 elif ws_bit == 0: status = 0 else: status = -1 else: status = -1 errors = "" # Check the type of endpointtype, if it is dimmer proceed elif endpoint.endpoint_type == 1001 or endpoint.endpoint_type == 1020: status = expected_status return str(status),errors def touchswitch(endpoint,expected_status): debug_msg("test") errors = "" print "touchswitch called2" status = expected_status return str(status),errors def acremote(endpoint,expected_status): errors = "" status = expected_status return str(status),errors def tvremote(endpoint,expected_status): errors = "" status = expected_status return str(status),errors def settopbox(endpoint,expected_status): errors = "" status = expected_status return str(status),errors
mit
-4,152,577,362,719,511,000
44.294118
580
0.634007
false
2.593606
false
false
false
qwergram/data-structures
src/dll.py
1
3676
# -*- coding: utf-8 -*- """Doubl linked List implementation.""" class Node(object): """Creates a node object.""" def __init__(self, value, next, prev): """Initalize Node Object.""" self.value = value self.next = next self.prev = prev class DoublyLinkedList(object): """Define a double pointered list.""" # It was quite difficult trying to solve this problem, so I got some help # with my logic from the following site: # http://ls.pwd.io/2014/08/singly-and-doubly-linked-lists-in-python/ head = None tail = None def __init__(self, values): """Accept a list of values and generate a chain of Nodes using those values.""" if isinstance(values, list): for value in values: self.append(value) else: raise TypeError("Please package your item into a list!") def append(self, value): """Append a value to the tail of the linked list.""" new_node = Node(value, None, None) if self.head is None: self.head = self.tail = new_node else: new_node.prev = self.tail new_node.next = None self.tail.next = new_node self.tail = new_node def insert(self, value): """Insert a value to the head of the linked list.""" new_node = Node(value, None, None) if self.head is None: self.head = self.tail = new_node else: new_node.next = self.head new_node.prev = None self.head.prev = new_node self.head = new_node def pop(self): """Remove the head of the chain and return the Node.""" if self.head is None: raise IndexError("Cannot execute on an empty list!") elif self.head.next is None: old_head = self.head self.head = self.tail = None return old_head else: old_head = self.head new_head = self.head.next new_head.prev = None self.head = new_head old_head.next = None old_head.prev = None return old_head def shift(self): """Remove the tail of the chain and return the Node.""" if self.head is None: raise IndexError("Cannot execute an empty list!") elif self.head.next is None: old_head = self.head self.head = self.tail = None return old_head else: old_tail = self.tail new_tail = self.tail.prev new_tail.next = None self.tail = new_tail old_tail.next = None old_tail.prev = None return old_tail def remove(self, value): """Remove the specified item from the node chain and rebind the Nodes again.""" if self.tail is not None and self.tail.value == value: self.shift() elif self.head is not None and self.head.value == value: self.pop() else: current_node = self.head previous_node = None while current_node is not None: if current_node.value == value: if previous_node is not None: previous_node.next = current_node.next previous_node.next.prev = previous_node else: self.head = current_node.next break previous_node = current_node current_node = current_node.next else: raise ValueError("Item was not found in list!")
mit
8,665,013,767,160,872,000
32.724771
87
0.533188
false
4.334906
false
false
false
sam-m888/gprime
gprime/plugins/rel/rel_pl.py
1
32078
# -*- coding: utf-8 -*- # # gPrime - A web-based genealogy program # # Copyright (C) 2003-2005 Donald N. Allingham # Copyright (C) 2008 Brian G. Matherly # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Rewritten in 2008 for 3.x version by Łukasz Rymarczyk # Written in 2007 by Piotr Czubaszek, largely based on rel_de.py by Alex Roitman. # PL: Po objaśnienia oznaczania relacji zobacz relationship.py # EN: For more information see relationship.py # """ Polish-specific definitions of relationships. """ #------------------------------------------------------------------------- # # Gprime modules # #------------------------------------------------------------------------- from gprime.lib import Person import gprime.relationship #------------------------------------------------------------------------- # określa liczebnik porządkowy _level_name = [ "pierwszego", "drugiego", "trzeciego", "czwartego", "piątego", "szóstego", "siódmego", "ósmego", "dziewiątego", "dziesiątego", "jedenastego", "dwunastego","trzynastego", "czternastego", "piętnastego", "szesnastego", "siedemnastego", "osiemnastego","dziewiętnastego", "dwudziestego", ] _father_level = [ "", "ojciec", "dziadek", "pradziadek", "prapradziadek", "praprapradziadek", "prapraprapradziadek", "praprapraprapradziadek", "prapraprapraprapradziadek", "praprapraprapraprapradziadek", "prapraprapraprapraprapradziadek", ] _mother_level = [ "", "matka", "babcia", "prababcia", "praprababcia", "prapraprababcia", "praprapraprababcia", "prapraprapraprababcia", "praprapraprapraprababcia", "prapraprapraprapraprababcia", "praprapraprapraprapraprababcia", ] _son_level = [ "", "syn", "wnuk", "prawnuk", "praprawnuk", "prapraprauwnuk", "praprapraprauwnuk", "prapraprapraprawnuk", "praprapraprapraprawnuk", "prapraprapraprapraprawnuk", "praprapraprapraprapraprawnuk", ] _daughter_level = [ "", "córka", "wnuczka", "prawnuczka", "praprawnuczka", "prapraprauwnuczka", "praprapraprauwnuczka", "prapraprapraprawnuczka", "praprapraprapraprawnuczka", "prapraprapraprapraprawnuczka", "praprapraprapraprapraprawnuczka", ] _sister_level_of_male = [ "", "siostra", "ciotka stryjeczna", "babcia stryjeczna", "prababcia stryjeczna", "praprababcia stryjeczna", "prapraprababcia stryjeczna", "praprapraprababcia stryjeczna", "prapraprapraprababcia stryjeczna", "praprapraprapraprababcia stryjeczna", "prapraprapraprapraprababcia stryjeczna", "praprapraprapraprapraprababcia stryjeczna", ] _sister_level_of_female = [ "", "siostra", "ciotka", "babcia cioteczna", "prababcia cioteczna", "praprababcia cioteczna", "prapraprababcia cioteczna", "praprapraprababcia cioteczna", "prapraprapraprababcia cioteczna", "praprapraprapraprababcia cioteczna", "prapraprapraprapraprababcia cioteczna", "praprapraprapraprapraprababcia cioteczna", ] _brother_level_of_male = [ "", "brat", "stryj", "dziadek stryjeczny", "pradziadek stryjeczny", "prapradziadek stryjeczny", "praprapradziadek stryjeczny", "prapraprapradziadek stryjeczny", "praprapraprapradziadek stryjeczny", "prapraprapraprapradziadek stryjeczny", "praprapraprapraprapradziadek stryjeczny", "prapraprapraprapraprapradziadek stryjeczny", ] _brother_level_of_female = [ "", "brat", "wuj", "dziadek cioteczny", "pradziadek cioteczny", "prapradziadek cioteczny", "praprapradziadek cioteczny", "prapraprapradziadek cioteczny", "praprapraprapradziadek cioteczny", "prapraprapraprapradziadek cioteczny", "praprapraprapraprapradziadek cioteczny", "prapraprapraprapraprapradziadek cioteczny", ] _nephew_level_of_brothers_son = [ "", "bratanek", "syn bratanka", "wnuk bratanka", "prawnuk bratanka", "praprawnuk bratanka", "prapraprawnuk bratanka", "praprapraprawnuk bratanka", "prapraprapraprawnuk bratanka", "praprapraprapraprawnuk bratanka", "prapraprapraprapraprawnuk bratanka", ] _nephew_level_of_brothers_daughter = [ "", "bratanica", "syn bratanicy", "wnuk bratanicy", "prawnuk bratanicy", "praprawnuk bratanicy", "prapraprawnuk bratanicy", "praprapraprawnuk bratanicy", "prapraprapraprawnuk bratanicy", "praprapraprapraprawnuk bratanicy", "prapraprapraprapraprawnuk bratanicy", "praprapraprapraprapraprawnuk bratanicy", ] _nephew_level_of_sisters_son = [ "", "siostrzeniec", "syn siostrzeńca", "wnuk siostrzeńca", "prawnuk siostrzeńca", "praprawnuk siostrzeńca", "prapraprawnuk siostrzeńca", "praprapraprawnuk siostrzeńca", "prapraprapraprawnuk siostrzeńca", "praprapraprapraprawnuk siostrzeńca", "prapraprapraprapraprawnuk siostrzeńca", ] _nephew_level_of_sisters_daughter = [ "", "siostrzenica", "syn siostrzenicy", "wnuk siostrzenicy", "prawnuk siostrzenicy", "praprawnuk siostrzenicy", "prapraprawnuk siostrzenicy", "praprapraprawnuk siostrzenicy", "prapraprapraprawnuk siostrzenicy", "praprapraprapraprawnuk siostrzenicy", "prapraprapraprapraprawnuk siostrzenicy", ] _niece_level_of_brothers_son = [ "", "bratanica", "córka bratanka", "wnuczka bratanka", "prawnuczka bratanka", "praprawnuczka bratanka", "prapraprawnuczka bratanka", "praprapraprawnuczka bratanka", "prapraprapraprawnuczka bratanka", "praprapraprapraprawnuczka bratanka", ] _niece_level_of_brothers_daughter = [ "", "bratanica", "córka bratanicy", "wnuczka bratanicy", "prawnuczka bratanicy", "praprawnuczka bratanicy", "prapraprawnuczka bratanicy", "praprapraprawnuczka bratanicy", "prapraprapraprawnuczka bratanicy", "praprapraprapraprawnuczka bratanicy", ] _niece_level_of_sisters_son = [ "", "siostrzenica", "córka siostrzeńca", "wnuczka siostrzeńca", "prawnuczka siostrzeńca", "praprawnuczka siostrzeńca", "prapraprawnuczka siostrzeńca", "praprapraprawnuczka siostrzeńca", "prapraprapraprawnuczka siostrzeńca", "praprapraprapraprawnuczka siostrzeńca", ] _niece_level_of_sisters_daughter = [ "", "siostrzenica", "córka siostrzenicy", "wnuczka siostrzenicy", "prawnuczka siostrzenicy", "praprawnuczka siostrzenicy", "prapraprawnuczka siostrzenicy", "praprapraprawnuczka siostrzenicy", "prapraprapraprawnuczka siostrzenicy", "praprapraprapraprawnuczka siostrzenicy", ] #------------------------------------------------------------------------- # # # #------------------------------------------------------------------------- class RelationshipCalculator(gramps.gen.relationship.RelationshipCalculator): """ RelationshipCalculator Class """ def __init__(self): gramps.gen.relationship.RelationshipCalculator.__init__(self) def get_son(self, level, inlaw=''): """ Podaje tekst zawierający informację, jak bardzo potomek męski (np. syn) jest spokrewniony do danej osoby """ # Określ, czy osoba jest przybraną, czy rodzoną if inlaw == '': t_inlaw = "" else: t_inlaw = "przybrany " # TODO: dodać rozpoznawanie pasierb/pasierbica if level >= 0 and level < len(_son_level): return t_inlaw +_son_level[level] elif level >= len(_son_level) and (level - 1) < len(_level_name): return t_inlaw + \ "potomek męski %s pokolenia" % _level_name[level - 1] else: return t_inlaw + \ "potomek męski w %d pokoleniu" % level def get_daughter(self, level, inlaw=''): """ Podaje tekst zawierający informację, jak bardzo potomek żeński (np. córka) jest spokrewniony do danej osoby """ # Określ, czy osoba jest przybraną, czy rodzoną # + stwórz obie formy (męską i żeńską) if inlaw == '': t_inlaw = "" t_inlawM = "" else: t_inlaw = "przybrana " t_inlawM = "przybrany " # TODO: dodać rozpoznawanie pasierb/pasierbica if level >= 0 and level < len(_daughter_level): return t_inlaw + _daughter_level[level] elif level >= len(_daughter_level) and (level - 1) < len(_level_name): return t_inlawM + \ "potomek żeński %s pokolenia" % _level_name[level - 1] else: return t_inlawM + \ "potomek żeński w %d pokoleniu" % level def get_child_unknown(self, level, inlaw=''): """ Podaje tekst zawierający informację, jak bardzo potomek o nieokreślonej płci jest spokrewniony dodanej osoby """ # Określ, czy osoba jest przybraną, czy rodzoną if inlaw == '': t_inlaw = "" else: t_inlaw = "przybrany " if level == 1: if inlaw == '' : return "dziecko" else: return "przybrane dziecko" elif level >= 1 and (level - 1) < len(_level_name): return t_inlaw + "potomek %s pokolenia" % _level_name[level - 1] else: return t_inlaw + "potomek w %d pokoleniu" % level def get_sword_distaff(self, level, reltocommon, spacebefore = ""): """ PL: Generuje relację po mieczu/po kądzieli EN: Generate relation 'by sword' or 'by distaff', polish specific """ if level <= 1: return "" elif level == 2: # dziadek/babcia if reltocommon[0] == self.REL_FATHER: # ze strony rodzonego ojca return spacebefore + "po mieczu" elif reltocommon[0] == self.REL_MOTHER: # ze strony rodzonej matki return spacebefore + "po kądzieli" else: # relacja inna niż rodzona return "" elif level == 3: # pradziadek/prababcia if (reltocommon[0] == self.REL_FATHER) \ & (reltocommon[1] == self.REL_FATHER): # pradziadek od dziadka ze strony ojca return spacebefore + "podwójnego miecza" elif (reltocommon[0] == self.REL_FATHER) \ & (reltocommon[1] == self.REL_MOTHER): # pradziadek od babci ze strony ojca return spacebefore + "raz po mieczu, dalej po kądzieli" elif (reltocommon[0] == self.REL_MOTHER) \ & (reltocommon[1] == self.REL_FATHER): # pradziadek od dziadka ze strony matki return spacebefore + "raz po kądzieli, dalej po mieczu" elif (reltocommon[0] == self.REL_MOTHER) \ & (reltocommon[1] == self.REL_MOTHER): # pradziadek od babci ze strony matki return spacebefore + "podwójnej kądzieli" else: # relacja inna niż rodzona return "" elif level == 4: # prapradziadek/praprababcia if (reltocommon[0] == self.REL_FATHER) \ & (reltocommon[1] == self.REL_FATHER) \ & (reltocommon[2] == self.REL_FATHER): # tzw. linia męska return spacebefore + "potrójnego miecza" if (reltocommon[0] == self.REL_FATHER) \ & (reltocommon[1] == self.REL_FATHER) \ & (reltocommon[2] == self.REL_FATHER): # tzw. linia żeńska return spacebefore + "potrójnego miecza" else: return "" else: return "" def get_father(self, level, reltocommon, inlaw=''): """ Podaje tekst zawierający informację, jak bardzo przodek męski (np. ojciec) jest spokrewniony do danej osoby """ if inlaw == '': t_inlaw = "" else: t_inlaw = "przybrany " if level >= 0 and level < len(_father_level): # Jeśli znasz bezpośrednią nazwę relacji, to ją zastosuj if level == 1: # ojciec return t_inlaw + _father_level[level] elif (level >= 2) & (level <= 4): # dziadek, pradziadek, prapradziadek return t_inlaw + _father_level[level] \ + self.get_sword_distaff(level, reltocommon, ' ') else: return t_inlaw + _father_level[level] elif level >= len(_father_level) and (level - 1) < len(_level_name): # jeśli istnieje liczebnik dla danej liczby return t_inlaw + \ "przodek męski %s pokolenia" % (_level_name[level - 1]) else: # dla pozostałych przypadków wypisz relację liczbowo return t_inlaw + \ "przodek męski w %d pokoleniu" % level def get_mother(self, level, reltocommon, inlaw=''): """ Podaje tekst zawierający informację, jak bardzo przodek żeński (np. matka) jest spokrewniony do danej osoby """ if inlaw == '': t_inlaw = "" else: t_inlaw = "przybrana " if level >= 0 and level < len(_mother_level): # Jeśli znasz bezpośrednią nazwę relacji, to ją zastosuj if level == 1: # matka return t_inlaw + _mother_level[level] elif (level >= 2) & (level <= 4): # babcia, prababcia, praprababcia return t_inlaw + _mother_level[level] \ + self.get_sword_distaff(level, reltocommon, ' ') else: return t_inlaw + _mother_level[level] elif level >= len(_mother_level) and (level - 1) < len(_level_name): # jeśli istnieje liczebnik dla danej liczby return t_inlaw + \ "przodek żeński %s pokolenia" % (_level_name[level - 1]) else: # dla pozostałych przypadków wypisz relację liczbowo return t_inlaw +"przodek żeński w %d pokoleniu" % level def get_parent_unknown(self, level, inlaw=''): """ Podaje tekst zawierający informację, jak bardzo przodek o nieokreślonej płci jest spokrewniony dodanej osoby """ if inlaw == '': t_inlaw = "" else: t_inlaw = "przybrany " if level == 1: return t_inlaw + "rodzic" elif level > 1 and (level - 1) < len(_level_name): if (level >= 2) & (level <= 4): # babcia, prababcia, praprababcia # (albo dziadek, pradziadek, prapradziadek) tmp = t_inlaw +\ "przodek %s pokolenia" % (_level_name[level - 1]) # TODO: try to recognize a gender... return tmp # + self.get_sword_distaff(level, reltocommon, ' ') else: return t_inlaw + \ "przodek %s pokolenia" % (_level_name[level - 1]) else: return t_inlaw +"przodek w %d pokoleniu" % level def get_single_relationship_string(self, Ga, Gb, gender_a, gender_b, reltocommon_a, reltocommon_b, only_birth=True, in_law_a=False, in_law_b=False): """ Provide a string that describes the relationsip between a person, and another person. E.g. "grandparent" or "child". """ if only_birth: step = '' else: step = self.STEP if in_law_a or in_law_b : inlaw = self.INLAW else: inlaw = '' # b is the same person as a if Ga == Gb == 0: rel_str = 'ta sama osoba' elif Ga == 0: # b is son/descendant of a if gender_b == Person.MALE: if inlaw and Gb == 1 and not step: rel_str = "zięć" else: rel_str = self.get_son(Gb, inlaw) elif gender_b == Person.FEMALE: if inlaw and Gb == 1 and not step: rel_str = "synowa" else: rel_str = self.get_daughter(Gb, inlaw) else: rel_str = self.get_child_unknown(Gb, inlaw) elif Gb == 0: # b is parent/grand parent of a if gender_b == Person.MALE: if inlaw and Gb == 1 and not step: # TODO: znaleźć odpowiedniki w zależności czy to syn/córka rel_str = "teść" else: rel_str = self.get_father(Ga, reltocommon_a, inlaw) elif gender_b == Person.FEMALE: if inlaw and Gb == 1 and not step: # TODO: znaleźć odpowiedniki w zależności czy to syn/córka rel_str = "teściowa" else: rel_str = self.get_mother(Ga, reltocommon_a, inlaw) else: rel_str = self.get_parent_unknown(Ga, inlaw) elif Ga == Gb == 1: # rodzeństwo if gender_b == Person.MALE: if inlaw and not step: rel_str = "brat przyrodni" else: rel_str = "brat rodzony" elif gender_b == Person.FEMALE: if inlaw and not step: rel_str = "siostra przyrodnia" else: rel_str = "siostra rodzony" else: rel_str = "brat/siostra" elif Gb == 1 and Ga > 1: # Przyjmij, że nie rozróżniamy osób prawnie i nieprawnie przybranych... if Ga == 2: # rodzeństwo rodziców # brat ojca, czyli stryj if (gender_b == Person.MALE) \ & (reltocommon_a[0] == self.REL_FATHER): rel_str = "stryj" # siostra ojca, czyli ciotka ??? elif (gender_b == Person.FEMALE) \ & (reltocommon_a[0] == self.REL_FATHER): rel_str = "ciotka (tzw. stryjna)" # brat matki, czyli wuj/wujek elif (gender_b == Person.MALE) \ & (reltocommon_a[0] == self.REL_MOTHER): rel_str = "wuj (wujek)" # siostra matki, czyli ciotka elif (gender_b == Person.FEMALE) \ & (reltocommon_a[0] == self.REL_MOTHER): rel_str = "ciotka" else: rel_str = "brat lub siostra rodzica" elif Ga == 3: # rodzeństwo dziadków rodziców osoby sprawdzanej # rodzeństwo dziadka po mieczu (ojca ojca) if (reltocommon_a[0] == self.REL_FATHER) \ & (reltocommon_a[1] == self.REL_FATHER): if (gender_b == Person.MALE): rel_str = "dziadek stryjeczny (tzw przestryj, stary stryj)" elif (gender_b == Person.FEMALE): rel_str = "babcia stryjeczna" else: rel_str = "rodzeństwo przodka w 2 pokoleniu" # rodzeństwo babki po mieczu (matki ojca) elif (reltocommon_a[0] == self.REL_FATHER) \ & (reltocommon_a[1] == self.REL_MOTHER): # TODO: Należy sprawdzić, czy w staropolszczyźnie nie ma # dokładniejszych określeń dla tego typu relacji # TODO: EN: Try to check, whether in old polish language # are more specific word for this kind of relation if (gender_b == Person.MALE): rel_str = "dziadek stryjeczny (tzw przestryj, stary stryj)" elif (gender_b == Person.FEMALE): rel_str = "babcia stryjeczna" else: rel_str = "rodzeństwo przodka w 2 pokoleniu" # rodzeństwo dziadka po kądzieli (ojca matki) elif (reltocommon_a[0] == self.REL_MOTHER) \ & (reltocommon_a[1] == self.REL_FATHER): # TODO: Należy sprawdzić, czy w staropolszczyźnie nie ma # dokładniejszych określeń dla tego typu relacji # TODO: EN: Try to check, whether in old polish language # are more specific word for this kind of relation if (gender_b == Person.MALE): rel_str = "dziadek cioteczny (starop. prapociot)" elif (gender_b == Person.FEMALE): rel_str = "babcia cioteczna (starop. praciota)" else: rel_str = "rodzeństwo przodka w 2 pokoleniu" # rodzeństwo babki po kądzieli (matki matki) elif (reltocommon_a[0] == self.REL_MOTHER) \ & (reltocommon_a[1] == self.REL_MOTHER): # TODO: Należy sprawdzić, czy w staropolszczyźnie nie ma # dokładniejszych określeń dla tego typu relacji # TODO: EN: Try to check, whether in old polish language # are more specific word for this kind of relation if (gender_b == Person.MALE): rel_str = "dziadek cioteczny (starop. prapociot)" elif (gender_b == Person.FEMALE): rel_str = "babcia cioteczna (starop. praciota)" else: rel_str = "rodzeństwo przodka w 2 pokoleniu" else: if (gender_b == Person.MALE): rel_str = "rodzeństwo dziadka" elif (gender_b == Person.FEMALE): rel_str = "rodzeństwo babci" else: rel_str = "rodzeństwo przodka w 2 pokoleniu" elif Ga > 3: # pradziadkowie... (grandparents) if (gender_b == Person.MALE) \ & (reltocommon_a[0] == self.REL_FATHER): if Ga >= 0 and Ga < len(_brother_level_of_male): rel_str = _brother_level_of_male[Ga] else: rel_str = "rodzeństwo przodka męskiego %d pokolenia" % Ga elif (gender_b == Person.FEMALE) \ & (reltocommon_a[0] == self.REL_FATHER): if Ga >= 0 and Ga < len(_sister_level_of_male): rel_str = _sister_level_of_male[Ga] else: rel_str = "rodzeństwo przodka żeńskiego %d pokolenia" % Ga elif (gender_b == Person.MALE) \ & (reltocommon_a[0] == self.REL_MOTHER): if Ga >= 0 and Ga < len(_brother_level_of_female): rel_str = _brother_level_of_male[Ga] else: rel_str = "rodzeństwo przodka męskiego %d pokolenia" % Ga elif (gender_b == Person.FEMALE) \ & (reltocommon_a[0] == self.REL_MOTHER): if Ga >= 0 and Ga < len(_sister_level_of_female): rel_str = _sister_level_of_male[Ga] else: rel_str = "rodzeństwo przodka żeńskiego %d pokolenia" % Ga else: rel_str = "rodzeństwo przodka %d pokolenia" % Ga else: # A program should never goes there, but... rel_str = "Relacja nie określona" elif Ga ==1 and Gb > 1: # syn brata if (gender_b == Person.MALE) \ & (reltocommon_b[0] == self.REL_FATHER): if Gb < len(_nephew_level_of_brothers_son): rel_str = _nephew_level_of_brothers_son[Gb] else: rel_str = "męski potomek w %d pokoleniu brata" % Gb # córka brata elif (gender_b == Person.FEMALE) \ & (reltocommon_b[0] == self.REL_FATHER): if Gb < len(_nephew_level_of_brothers_daughter): rel_str = _nephew_level_of_brothers_daughter[Gb] else: rel_str = "żeński potomek w %d pokoleniu brata" % Gb # syn siostry if (gender_b == Person.MALE) \ & (reltocommon_b[0] == self.REL_MOTHER): if Gb < len(_nephew_level_of_sisters_son): rel_str = _nephew_level_of_sisters_son[Gb] else: rel_str = "męski potomek w %d pokoleniu brata" % Gb # córka siostry elif (gender_b == Person.FEMALE) \ & (reltocommon_b[0] == self.REL_MOTHER): if Gb < len(_nephew_level_of_sisters_daughter): rel_str = _nephew_level_of_sisters_daughter[Gb] else: rel_str = "żeński potomek w %d pokoleniu brata" % Gb # potomek brata elif (reltocommon_b[0] == self.REL_FATHER): rel_str = "potomek w %d pokoleniu brata" % Gb # potomek brata elif (reltocommon_b[0] == self.REL_MOTHER): rel_str = "potomek w %d pokoleniu brata" % Gb else : rel_str = "potomek w %d pokoleniu rodzeństwa" % Gb elif Ga > 1 and Gb > 1: if (gender_b == Person.MALE): if Ga == 2 and Gb == 2: rel_str = "kuzyn" else: rel_str = "daleki kuzyn (%d. stopień pokrewieństwa)" % (Ga+Gb) elif (gender_b == Person.FEMALE): if Ga == 2 and Gb == 2: rel_str = "kuzynka" else: rel_str = "daleka kuzynka (%d. stopień pokrewieństwa)" % (Ga+Gb) else: if Ga == 2 and Gb == 2: rel_str = "kuzyn(ka)" else: rel_str = "daleki członek rodziny (%d. stopień pokrewieństwa)" % (Ga+Gb) else: # A program should never goes there, but... rel_str = "nieokreślony stopień pokrewieństwa" return rel_str def get_plural_relationship_string(self, Ga, Gb, reltocommon_a='', reltocommon_b='', only_birth=True, in_law_a=False, in_law_b=False): """ Generate a text with information, how far away is a group of persons from a main person """ if Ga == Gb == 0: return 'ta sama osoba' if 0 == Ga: if 1 == Gb: return 'Dzieci' if 2 == Gb: return 'Wnuki' if 3 == Gb: return 'Prawnuki' if 4 == Gb: return 'Praprawnuki' return 'Praprapra(n)wnuki' if 0 == Gb: if 1 == Ga: return 'Rodzice' if 2 == Ga: return 'Dziadkowie' if 3 == Ga: return 'Pradziadkowie' if 4 == Ga: return 'Praprapradziadkowie' return 'Praprapra(n)dziadkowie' if 1 == Ga == Gb: return 'Rodzeństwo' if 1 == Gb and Ga > 1: return 'Wujowie/stryjowie i ciocie' if 1 < Gb and 1 == Ga: return 'bratankowie(ice)/siostrzeńcy(nice)' if 1 < Ga and 1 < Gb: return 'dalsza rodzina' return 'relacja nieznana' def get_sibling_relationship_string(self, sib_type, gender_a, gender_b, in_law_a=False, in_law_b=False): if in_law_a or in_law_b : inlaw = self.INLAW else: inlaw = '' if sib_type == self.NORM_SIB: if not inlaw: if gender_b == Person.MALE: rel_str = 'brat (rodzony)' elif gender_b == Person.FEMALE: rel_str = 'siostra (rodzona)' else: rel_str = 'brat lub siostra (rodzeni)' else: if gender_b == Person.MALE: # TODO: znaleźć odpowiednik rel_str = "brat (pasierb)" elif gender_b == Person.FEMALE: # TODO: znaleźć odpowiednik rel_str = "siostra (pasierbica)" else: # TODO: znaleźć odpowiednik rel_str = "brat lub siostra (pasierb/pasierbica)" elif sib_type == self.UNKNOWN_SIB: if not inlaw: if gender_b == Person.MALE: rel_str = 'brat' elif gender_b == Person.FEMALE: rel_str = 'siostra' else: rel_str = 'brat lub siostra' else: if gender_b == Person.MALE: # TODO: znaleźć odpowiednik rel_str = "brat (brat/szwagier)" elif gender_b == Person.FEMALE: # TODO: znaleźć odpowiednik rel_str = "siostra (bratowa/szwagierka)" else: # TODO: znaleźć odpowiednik rel_str = "brat lub siostra (szwagier/szagierka)" elif sib_type == self.HALF_SIB_FATHER: if gender_b == Person.MALE: rel_str = "brat przyrodni" elif gender_b == Person.FEMALE: rel_str = "siostra przyrodnia" else: rel_str = "brat/siostra przyrodni" elif sib_type == self.HALF_SIB_MOTHER: if gender_b == Person.MALE: rel_str = "brat przyrodni" elif gender_b == Person.FEMALE: rel_str = "siostra przyrodnia" else: rel_str = "brat/siostra przyrodni" elif sib_type == self.STEP_SIB: if gender_b == Person.MALE: rel_str = "brat przyrodni" elif gender_b == Person.FEMALE: rel_str = "siostra przyrodnia" else: rel_str = "brat lub siostra przyrodnia" else: rel_str = "nieokreślona relacja rodzeństwa" return rel_str if __name__ == "__main__": # Test function. Call it as follows from the command line (so as to find # imported modules): # export PYTHONPATH=/path/to/gramps/src # python src/plugins/rel/rel_pl.py """TRANSLATORS, copy this if statement at the bottom of your rel_xx.py module, and test your work with: python src/plugins/rel/rel_xx.py """ from gprime.relationship import test RC = RelationshipCalculator() test(RC, True)
gpl-2.0
8,653,816,077,234,441,000
33.513543
92
0.522382
false
3.048421
false
false
false
chrishokamp/maxent-decoder
phrase_table/Phrase_Table.py
1
1704
#!/usr/bin/env python # -*- coding: utf-8 -*- import re from collections import deque,defaultdict class Phrase_Table: # TODO: unit names should actually be objects with sub-fields def __init__(self, lines, unit_names): self.phrase_table = self.build_phrase_table(lines, unit_names) # store the field names in case we need them later self.field_names def build_phrase_table(self, lines, unit_names): line_units = [line.split('|||') for line in lines] def strip_list(l): return deque([u.strip() for u in l]) lists_of_units = [strip_list(x) for x in line_units] phrase_table = defaultdict(list) # assume first elem is the key for entry in lists_of_units: f_phrase = entry.popleft() e_phrase = entry.popleft() # currently unused counts = entry.pop() alignment = entry.pop() # end unused # split each field on whitespace except target -- there should be a name for every field flattened = [] for section in entry: flattened = flattened + re.split('\s+', section) flattened = [e_phrase] + flattened #TODO: hack e = { k:v for k,v in zip(unit_names, flattened) } phrase_table[f_phrase].append(e) return phrase_table # TODO: will throw error when item isn't found def getEntry(self, phrase): return self.phrase_table[phrase] def contains(self, phrase): if self.phrase_table[phrase] != []: return True else: return False def getTable(self): return self.phrase_table
mit
-8,461,846,650,403,378,000
30.555556
100
0.581573
false
4.125908
false
false
false
GoberInfinity/ExampleDjango
modeladmin/migrations/0001_initial.py
1
1439
# -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-01-13 08:31 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('crudclassviews', '0001_initial'), ] operations = [ migrations.CreateModel( name='Animal', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=140)), ('animal', models.CharField(max_length=140)), ('dbirth', models.DateField()), ('owner', models.ManyToManyField(to='crudclassviews.Person')), ], options={ 'ordering': ['animal'], }, ), migrations.CreateModel( name='Size', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('animal_size', models.CharField(choices=[('S', 'Small'), ('M', 'Medium'), ('L', 'Large')], max_length=1)), ], ), migrations.AddField( model_name='animal', name='size', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='modeladmin.Size'), ), ]
mit
6,050,514,650,440,598,000
32.465116
123
0.539958
false
4.257396
false
false
false
openstack/ceilometer
ceilometer/compute/pollsters/disk.py
1
3344
# # Copyright 2012 eNovance <licensing@enovance.com> # Copyright 2012 Red Hat, Inc # Copyright 2014 Cisco Systems, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometer.compute import pollsters from ceilometer import sample class PerDeviceDiskPollster(pollsters.GenericComputePollster): inspector_method = "inspect_disks" @staticmethod def get_resource_id(instance, stats): return "%s-%s" % (instance.id, stats.device) @staticmethod def get_additional_metadata(instance, stats): return {'disk_name': stats.device} class PerDeviceReadRequestsPollster(PerDeviceDiskPollster): sample_name = 'disk.device.read.requests' sample_unit = 'request' sample_type = sample.TYPE_CUMULATIVE sample_stats_key = 'read_requests' class PerDeviceReadBytesPollster(PerDeviceDiskPollster): sample_name = 'disk.device.read.bytes' sample_unit = 'B' sample_type = sample.TYPE_CUMULATIVE sample_stats_key = 'read_bytes' class PerDeviceWriteRequestsPollster(PerDeviceDiskPollster): sample_name = 'disk.device.write.requests' sample_unit = 'request' sample_type = sample.TYPE_CUMULATIVE sample_stats_key = 'write_requests' class PerDeviceWriteBytesPollster(PerDeviceDiskPollster): sample_name = 'disk.device.write.bytes' sample_unit = 'B' sample_type = sample.TYPE_CUMULATIVE sample_stats_key = 'write_bytes' class PerDeviceDiskLatencyPollster(PerDeviceDiskPollster): inspector_method = 'inspect_disk_latency' sample_name = 'disk.device.latency' sample_unit = 'ms' sample_stats_key = 'disk_latency' class PerDeviceDiskIOPSPollster(PerDeviceDiskPollster): inspector_method = 'inspect_disk_iops' sample_name = 'disk.device.iops' sample_unit = 'count/s' sample_stats_key = 'iops_count' class PerDeviceCapacityPollster(PerDeviceDiskPollster): inspector_method = 'inspect_disk_info' sample_name = 'disk.device.capacity' sample_unit = 'B' sample_stats_key = 'capacity' class PerDeviceAllocationPollster(PerDeviceDiskPollster): inspector_method = 'inspect_disk_info' sample_name = 'disk.device.allocation' sample_unit = 'B' sample_stats_key = 'allocation' class PerDevicePhysicalPollster(PerDeviceDiskPollster): inspector_method = 'inspect_disk_info' sample_name = 'disk.device.usage' sample_unit = 'B' sample_stats_key = 'physical' class PerDeviceDiskReadLatencyPollster(PerDeviceDiskPollster): sample_name = 'disk.device.read.latency' sample_type = sample.TYPE_CUMULATIVE sample_unit = 'ns' sample_stats_key = 'rd_total_times' class PerDeviceDiskWriteLatencyPollster(PerDeviceDiskPollster): sample_name = 'disk.device.write.latency' sample_type = sample.TYPE_CUMULATIVE sample_unit = 'ns' sample_stats_key = 'wr_total_times'
apache-2.0
8,738,284,241,909,994,000
30.252336
75
0.733553
false
3.422723
false
false
false
itucsdb1628/itucsdb1628
suggestion.py
1
3003
import psycopg2 as dbapi2 from flask import request from flask_login import current_user, login_required, login_user, logout_user from datetime import date from dsn_conf import get_dsn dsn = get_dsn() def select_suggestions(): with dbapi2.connect(dsn) as connection: try: cursor = connection.cursor() query = """SELECT SUGGESTION.ID,USERDATA.USERNAME, SUGGESTION.ARTIST, SUGGESTION.SONGNAME,SUGGESTION.RELEASEDATE,SUGGESTION.SUGGESTIONDATE, SUGGESTION.STATU FROM SUGGESTION,USERDATA WHERE( USERDATA.ID = SUGGESTION.USERID) ORDER BY SUGGESTION.STATU DESC""" cursor.execute(query) return cursor except dbapi2.DatabaseError as e: connection.rollback() def select_suggestions_user(): with dbapi2.connect(dsn) as connection: try: cursor = connection.cursor() query = """SELECT ID,ARTIST,SONGNAME,RELEASEDATE,SUGGESTIONDATE,STATU FROM SUGGESTION WHERE( SUGGESTION.USERID = %s ) ORDER BY SUGGESTION.SUGGESTIONDATE""" % current_user.id cursor.execute(query) return cursor except dbapi2.DatabaseError as e: connection.rollback() def insert_suggestion(userid,artist,songname,releasedate): with dbapi2.connect(dsn) as connection: try: cursor = connection.cursor() query = """INSERT INTO SUGGESTION(USERID,ARTIST,SONGNAME,SUGGESTIONDATE,RELEASEDATE,STATU) VALUES(%s,%s,%s,%s,%s,%s)""" myTime = date.today() cursor.execute(query,(userid,artist,songname,date.today(),releasedate,2)) connection.commit() except dbapi2.DatabaseError as e: connection.rollback() def delete_suggestion(deleteId): with dbapi2.connect(dsn) as connection: try: cursor = connection.cursor() cursor.execute("""DELETE FROM SUGGESTION WHERE ID = %s""", (int(deleteId),)) connection.commit() except dbapi2.DatabaseError as e: connection.rollback() def reject_suggestion(updateId): with dbapi2.connect(dsn) as connection: try: cursor = connection.cursor() query = """UPDATE SUGGESTION SET STATU = 0 WHERE ID = %s""" cursor.execute(query, (updateId,)) connection.commit() except dbapi2.DatabaseError as e: connection.rollback() def approve_suggestion(updateId): with dbapi2.connect(dsn) as connection: try: cursor = connection.cursor() query = """UPDATE SUGGESTION SET STATU = 1 WHERE ID = %s""" cursor.execute(query, (updateId,)) connection.commit() except dbapi2.DatabaseError as e: connection.rollback()
gpl-3.0
2,680,382,297,286,805,500
36.08642
102
0.589078
false
4.296137
false
false
false
rechner/Taxidi
signature.py
1
14273
#!/usr/bin/env python #*-* coding:utf-8 *-* # signature.py © 2012 Zac Sturgeon and Nathan Lex # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. """ A simple free-drawing area which outputs the drawing as a list of line segments, suitable for capturing signatures to store in a database. The module includes an optimised compression algorithm with a 92% compression ratio. See http://jkltech.net/taxidi/wiki/Signature_Format """ __version__ = '0.1' __all__ = ['SignaturePad'] import wx import math import zlib import base64 directions = [bin(0x7070563)[2:][i:i+3] for i in range(0,27,3)] def encode(lines): if len(lines) == 0: return '0' # check if 3 points are on the same line, in order def ison(a, c, b): within = lambda p, q, r: p <= q <= r or r <= q <= p return ((b[0] - a[0]) * (c[1] - a[1]) == (c[0] - a[0]) * (b[1] - a[1]) and (within(a[0], c[0], b[0]) if a[0] != b[0] else within(a[1], c[1], b[1]))) # converts series of lines to 'connect the dots', and looks for single dots strokes = [[lines[0][0:2]]]; dots = [] for line in lines: if line[0:2] != strokes[-1][-1]: if len(strokes[-1]) == 1: dots += strokes.pop() strokes += [[line[0:2]]] if line[2:4] != strokes[-1][-1]: if len(strokes[-1]) > 1 and \ ison(strokes[-1][-2], strokes[-1][-1], line[2:4]): strokes[-1][-1] = line[2:4] else: strokes[-1] += [line[2:4]] if len(strokes[-1]) == 1: dots += strokes.pop() # big endian, most significant first def BEVLI4Enc(num): if num == 0: return '0' * 4 else: temp = -(-int(math.log(num, 2) + 1) // 3) * 3 temp = [bin(num)[2:].zfill(temp)[i:i+3] for i in range(0, temp, 3)] return '1'.join([''] + temp[:-1]) + '0' + temp[-1] # encode dots in binary data = ''.join(map(BEVLI4Enc, [len(dots)] + [i for d in dots for i in d])) # convert series of points to deltas, then convert to binary for stroke in strokes: prev_point = stroke[0] data += ''.join(map(BEVLI4Enc, (len(stroke) - 2,) + prev_point)) for point in stroke[1:]: dx, dy = point[0] - prev_point[0], point[1] - prev_point[1] prev_point = point # format: bit 'is this delta more than 1 pixel?', 3xbits direction # directions: 111 000 001 # 110 # 010 # 101 100 011 isleap = abs(dx) > 1 or abs(dy) > 1 data += ('1' if isleap else '0') + \ directions[cmp(dx, 0) + 1 + (cmp(dy, 0) + 1) * 3] if isleap: if abs(dx): data += BEVLI4Enc(abs(dx)) if abs(dy): data += BEVLI4Enc(abs(dy)) # pad to byte boundry, then convert to binary data = ''.join(map(lambda x: chr(int(x, 2)), \ [data[i:i+8].ljust(8, '0') for i in range(0,len(data),8)])) # base 95 encoder def b95btoa(b): b95 = ''; n = int(('_' + b).encode('hex'), 16) while n > 0: b95 += chr(int(n % 95 + 32)); n /= 95 return b95[::-1] # compress using zlib if it makes it smaller z = zlib.compress(data)[2:-4] if len(z) < len(data): return 'c' + b95btoa(z) else: return 'e' + b95btoa(data) def decode(data): if data[0] == '0': return [] # dewrapper functions def inflate(z): return zlib.decompress(z, -zlib.MAX_WBITS) def b64atob(b64): return base64.b64decode(b64 + '=' * (4 - len(b64) % 4)) def b95atob(b95): n = 0; m = 1 for c in b95[::-1]: n += (ord(c) - 32) * m; m *= 95 return hex(n)[4:-1].decode('hex') def unwrap(d): return { 'a': inflate, # zlib compression 'b': lambda x: x, # raw version 1 format 'c': lambda x: inflate(b95atob(x)), # base 95 encoding, zlib compression 'd': lambda x: inflate(b64atob(x)), # base 64 encoding, zlib compression 'e': b95atob, # base 95 encoding, no compression 'f': b64atob # base 64 encoding, no compression }[d[0]](d[1:]) # unwrap, break into groups of 4, and convert to 01 data = ''.join([bin(ord(c))[2:].rjust(8, '0') for c in unwrap(data)]) data = [data[i:i+4] for i in range(0, len(data), 4)] def BEVLI4Dec(arr): temp = [arr.pop(0)] while temp[-1][0] == '1': temp += [arr.pop(0)] return int(''.join([i[1:4] for i in temp]), 2) #decode dots lines = [] for d in range(0, BEVLI4Dec(data)): x, y = BEVLI4Dec(data), BEVLI4Dec(data) lines += [(x, y, x, y)] #decode strokes num_points = BEVLI4Dec(data) while num_points > 0: last_line = (0, 0, BEVLI4Dec(data), BEVLI4Dec(data)) for i in range (0, num_points + 1): isleap = data[0][0] == '1' direction = directions.index(data.pop(0)[1:4]) dx, dy = direction % 3 - 1, direction / 3 - 1 last_line = (last_line[2], last_line[3], last_line[2] + dx * (BEVLI4Dec(data) if isleap and dx != 0 else 1), last_line[3] + dy * (BEVLI4Dec(data) if isleap and dy != 0 else 1)) lines += [last_line] num_points = BEVLI4Dec(data) if len(data) > 0 else 0 return lines class SignaturePad(wx.Window): """Widget for drawing and capturing a signature. Optimised for a size of 500 x 200.""" def __init__(self, parent, signatureLine=True, signatureLineText='Sign Here', signatureLineColour='Grey'): super(SignaturePad, self).__init__(parent, style=wx.NO_FULL_REPAINT_ON_RESIZE) self._initDrawing() self._bindEvents() self._initBuffer() self.signature = [] self.debug = False #Set to true to enable debugging output self.signatureLine = signatureLine self.signatureLineText = signatureLineText self.signatureLineColour = signatureLineColour self.SetMinSize((500, 200)) self.SetMaxSize((500, 200)) def _initDrawing(self): self.SetBackgroundColour('White') self.penThickness = 2 #default pen thickness self.penColour = '#145394' #default colour self.lines = [] self.previousPosition = (0, 0) def _bindEvents(self): for event, handler in [ \ (wx.EVT_LEFT_DOWN, self.onLeftDown), # Start drawing (wx.EVT_LEFT_UP, self.onLeftUp), # Stop drawing (wx.EVT_MOTION, self.onMotion), # Draw (wx.EVT_SIZE, self.onSize), # Prepare for redraw (wx.EVT_IDLE, self.onIdle), # Redraw (wx.EVT_PAINT, self.onPaint), # Refresh (wx.EVT_WINDOW_DESTROY, self.cleanup)]: self.Bind(event, handler) def _initBuffer(self): # Initialize the bitmap used for the display buffer size = self.GetClientSize() self.buffer = wx.EmptyBitmap(size.width, size.height) dc = wx.BufferedDC(None, self.buffer) dc.SetBackground(wx.Brush(self.GetBackgroundColour())) dc.Clear() self.drawLines(dc, *self.lines) self.reInitBuffer = False #set flag def SetPenColour(self, colour): """Sets the active pen colour. Returns true if changed.""" if (self.penColour == colour): return False self.penColour = colour return True def SetPenThickness(self, thickness): """Sets the pen thickness.""" self.penThickness = thickness #Event handlers: def onLeftDown(self, event): """Called on left button press (pen down)""" self.currentLine = [] self.previousPosition = event.GetPositionTuple() self.CaptureMouse() def onLeftUp(self, event): """Called on left button release (pen up)""" if self.HasCapture(): self.lines.append((self.penColour, self.penThickness, self.currentLine)) self.currentLine = [] self.ReleaseMouse() def onMotion(self, event): """Called when the mouse moving (pen is being dragged). If the left button is down while dragging, a line is drawn from the last event position to the new one. Coordinates are saved for redrawing and appended to the signature output.""" if event.Dragging() and event.LeftIsDown(): dc = wx.BufferedDC(wx.ClientDC(self), self.buffer) currentPosition = event.GetPositionTuple() lineSegment = self.previousPosition + currentPosition self.signature.append(lineSegment) #Store signature value self.drawLines(dc, (self.penColour, self.penThickness, [lineSegment])) self.currentLine.append(lineSegment) self.previousPosition = currentPosition if self.debug: print self.signature print len(self.signature) def onSize(self, event): """Enables flag to cause a redraw event if the window is resized""" self.reInitBuffer = True def onIdle(self, event): """If the window is resized, the bitmap is recopied to match the new window size. The buffer is re-initialized while idle such that a refresh only occurs once when needed.""" if self.reInitBuffer: self._initBuffer() self.Refresh(False) def onPaint(self, event): """Paints window and signature line when exposed.""" # Create a buffered paint DC. It will create the real # wx.PaintDC and then blit the bitmap to it when dc is # deleted. Since we don't need to draw anything else # here that's all there is to it. dc = wx.BufferedPaintDC(self, self.buffer) #Signature line if self.signatureLine: self.drawLines(dc, (self.signatureLineColour, 2, [(20, 150, 480, 150)])) font = wx.Font(10, wx.FONTFAMILY_SCRIPT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL) dc.SetFont(font) dc.SetTextForeground(self.signatureLineColour) dc.DrawText(self.signatureLineText, 20, 155) def clear(self): self.currentLine = [] self.signature = [] self.reInitBuffer = True self._initBuffer() dc = wx.BufferedDC(wx.ClientDC(self), self.buffer) dc.Clear() self.Refresh() def cleanup(self, event): #for future use return True @staticmethod def drawLines(dc, *lines): ''' drawLines takes a device context (dc) and a list of lines as arguments. Each line is a three-tuple: (colour, thickness, linesegments). linesegments is a list of coordinates: (x1, y1, x2, y2). ''' dc.BeginDrawing() for colour, thickness, lineSegments in lines: pen = wx.Pen(colour, thickness, wx.SOLID) dc.SetPen(pen) for lineSegment in lineSegments: dc.DrawLine(*lineSegment) dc.EndDrawing() t_CONTROLS_CANCEL = wx.NewEventType() CONTROLS_CANCEL = wx.PyEventBinder(t_CONTROLS_CANCEL, 1) class SignaturePadControls(wx.Panel): def __init__(self, parent=None): super(SignaturePadControls, self).__init__(parent) sizer = wx.BoxSizer(wx.VERTICAL) bsizer = wx.BoxSizer(wx.HORIZONTAL) self.CancelButton = wx.Button(self, wx.ID_CANCEL, size=(-1, 50)) self.ClearButton = wx.Button(self, wx.ID_CLEAR, size=(-1, 50)) self.AcceptButton = wx.Button(self, wx.ID_OK, size=(-1, 50)) bsizer.Add(self.ClearButton, 1, wx.EXPAND | wx.ALL, 5) bsizer.AddStretchSpacer() bsizer.Add(self.CancelButton, 1, wx.EXPAND | wx.ALL, 5) bsizer.Add(self.AcceptButton, 1, wx.EXPAND | wx.ALL, 5) self.sigpad = SignaturePad(self) sizer.Add(bsizer, 0, wx.EXPAND) sizer.Add(self.sigpad, 1, wx.EXPAND) self.SetSizer(sizer) self.CancelButton.Bind(wx.EVT_BUTTON, self.onCancel) self.ClearButton.Bind(wx.EVT_BUTTON, self.onClear) self.AcceptButton.Bind(wx.EVT_BUTTON, self.onAccept) def onClear(self, event): self.sigpad.clear() def onCancel(self, event): evt2 = wx.PyCommandEvent(t_CONTROLS_CANCEL, self.GetId()) self.GetEventHandler().ProcessEvent(evt2) event.Skip() pass def onAccept(self, event): if self.sigpad.signature == []: wx.MessageBox('Signature cannot be blank!', 'Error', wx.OK | wx.ICON_ERROR) else: print self.sigpad.signature encoded = encode(self.sigpad.signature) print decode(encoded) class TestFrame(wx.Frame): def __init__(self, parent=None): super(TestFrame, self).__init__(parent, title="Signature Pad", size=(500,260), style=wx.DEFAULT_FRAME_STYLE^ wx.RESIZE_BORDER) signature = SignaturePadControls(self) signature.Bind(CONTROLS_CANCEL, self.onCancel) self.Centre() def onCancel(self, event): self.Close() if __name__ == '__main__': app = wx.App() frame = TestFrame() frame.Show() app.MainLoop()
gpl-3.0
134,584,395,785,613,250
35.974093
80
0.568316
false
3.552016
false
false
false
rdkit/rdkit-orig
Contrib/mmpa/cansmirk.py
2
3001
# Copyright (c) 2012, GlaxoSmithKline Research & Development Ltd. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of GlaxoSmithKline Research & Development Ltd. # nor the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Created by Jameed Hussain, September 2012 import sys import re from rdkit import Chem from indexing import cansmirk if __name__=='__main__': if (len(sys.argv) >= 2): print "Program that canonicalises an input SMIRKS so its in same format as MMP identification program.\n"; print "USAGE: ./cansmirks.py <file_of_smirks\n"; sys.exit(1) #read the STDIN for line in sys.stdin: line = line.rstrip() line_fields = re.split('\s|,',line) smirks = line_fields[0] if(len(line_fields) == 1): id="" else: id=line_fields[1] lhs,rhs = smirks.split(">>") l = Chem.MolFromSmiles( lhs ) if(l == None): sys.stderr.write("Can't generate mol for: %s\n" % (lhs) ) continue r = Chem.MolFromSmiles( rhs ) if(r == None): sys.stderr.write("Can't generate mol for: %s\n" % (rhs) ) continue clhs = Chem.MolToSmiles( l, isomericSmiles=True ) crhs = Chem.MolToSmiles( r, isomericSmiles=True ) #just need to take care of [*H:1] if(clhs == '[*H:1]'): clhs = '[*:1][H]' if(crhs == '[*H:1]'): crhs = '[*:1][H]' #print clhs #print crhs csmirk,context = cansmirk(clhs,crhs,"") print "%s %s" % (csmirk,id)
bsd-3-clause
-579,565,610,952,190,800
34.305882
107
0.656115
false
3.737235
false
false
false
nmdp-bioinformatics/service-gfe-submission
client-python/setup.py
1
2627
# coding: utf-8 """ Gene Feature Enumeration Service The Gene Feature Enumeration (GFE) Submission service provides an API for converting raw sequence data to GFE. It provides both a RESTful API and a simple user interface for converting raw sequence data to GFE results. Sequences can be submitted one at a time or as a fasta file. This service uses <a href=\"https://github.com/nmdp-bioinformatics/service-feature\">nmdp-bioinformatics/service-feature</a> for encoding the raw sequence data and <a href=\"https://github.com/nmdp-bioinformatics/HSA\">nmdp-bioinformatics/HSA</a> for aligning the raw sequence data. The code is open source, and available on <a href=\"https://github.com/nmdp-bioinformatics/service-gfe-submission\">GitHub</a>.<br><br>Go to <a href=\"http://service-gfe-submission.readthedocs.io\">service-gfe-submission.readthedocs.io</a> for more information OpenAPI spec version: 1.0.7 Contact: mhalagan@nmdp.org Generated by: https://github.com/swagger-api/swagger-codegen.git """ import sys from setuptools import setup, find_packages NAME = "swagger_client" VERSION = "1.0.0" # To install the library, run the following # # python setup.py install # # prerequisite: setuptools # http://pypi.python.org/pypi/setuptools REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil"] setup( name=NAME, version=VERSION, description="Gene Feature Enumeration Service", author_email="mhalagan@nmdp.org", url="", keywords=["Swagger", "Gene Feature Enumeration Service"], install_requires=REQUIRES, packages=find_packages(), include_package_data=True, long_description="""\ The Gene Feature Enumeration (GFE) Submission service provides an API for converting raw sequence data to GFE. It provides both a RESTful API and a simple user interface for converting raw sequence data to GFE results. Sequences can be submitted one at a time or as a fasta file. This service uses &lt;a href&#x3D;\&quot;https://github.com/nmdp-bioinformatics/service-feature\&quot;&gt;nmdp-bioinformatics/service-feature&lt;/a&gt; for encoding the raw sequence data and &lt;a href&#x3D;\&quot;https://github.com/nmdp-bioinformatics/HSA\&quot;&gt;nmdp-bioinformatics/HSA&lt;/a&gt; for aligning the raw sequence data. The code is open source, and available on &lt;a href&#x3D;\&quot;https://github.com/nmdp-bioinformatics/service-gfe-submission\&quot;&gt;GitHub&lt;/a&gt;.&lt;br&gt;&lt;br&gt;Go to &lt;a href&#x3D;\&quot;http://service-gfe-submission.readthedocs.io\&quot;&gt;service-gfe-submission.readthedocs.io&lt;/a&gt; for more information """ )
gpl-3.0
1,899,160,545,371,149,600
63.073171
947
0.744956
false
3.191981
false
false
false
superfluidity/RDCL3D
code/lib/nemo/nemo_parser.py
1
1987
import json import pyaml import yaml from lib.util import Util from lib.parser import Parser import logging import traceback import glob import os logging.basicConfig(level=logging.DEBUG) log = logging.getLogger('NemoParser') class NemoParser(Parser): """Parser methods for nemo project type """ def __init__(self): super(NemoParser, self).__init__() @classmethod def importprojectdir(cls,dir_project, file_type): """Imports all descriptor files under a given folder this method is specific for Nemo project type """ project = { 'intent':{}, 'nodemodel':{}, 'positions': {} } for desc_type in project: cur_type_path = os.path.join(dir_project, desc_type.upper()) log.debug(cur_type_path) if os.path.isdir(cur_type_path): for file in glob.glob(os.path.join(cur_type_path, '*.'+file_type)): if file_type == 'nemo': project[desc_type][os.path.basename(file).split('.')[0]] = Util.openfile(file).read() for vertices_file in glob.glob(os.path.join(dir_project, '*.json')): if os.path.basename(vertices_file) == 'vertices.json': project['positions']['vertices'] = Util.loadjsonfile(vertices_file) return project @classmethod def importprojectfiles(cls, file_dict): """Imports descriptors (extracted from the new project POST) The keys in the dictionary are the file types """ project = { 'intent':{}, 'nodemodel':{}, } print "Importing project file" for desc_type in project: if desc_type in file_dict: files_desc_type = file_dict[desc_type] for file in files_desc_type: project[desc_type][os.path.splitext(file.name)[0]] = file.read() return project
apache-2.0
8,360,074,849,449,196,000
26.985915
109
0.573729
false
4.148225
false
false
false
rizaon/limbo
limbo/plugins/stock.py
1
1262
"""$<ticker symbol> for a quote on a stock price""" from __future__ import print_function import logging import re try: from urllib import quote except ImportError: from urllib.request import quote from bs4 import BeautifulSoup import requests logger = logging.getLogger(__name__) def stockprice(ticker): url = "https://www.google.com/finance?q={0}" soup = BeautifulSoup(requests.get(url.format(quote(ticker))).text) try: company, ticker = re.findall(u"^(.+?)\xa0\xa0(.+?)\xa0", soup.text, re.M)[0] except IndexError: logging.info("Unable to find stock {0}".format(ticker)) return "" price = soup.select("#price-panel .pr span")[0].text change, pct = soup.select("#price-panel .nwp span")[0].text.split() pct.strip('()') emoji = ":chart_with_upwards_trend:" if change.startswith("+") else ":chart_with_downwards_trend:" return "{0} {1} {2}: {3} {4} {5} {6}".format(emoji, company, ticker, price, change, pct, emoji) def on_message(msg, server): text = msg.get("text", "") matches = re.findall(r"^\$[a-zA-Z]\w{0,3}", text) if not matches: return prices = [stockprice(ticker[1:].encode("utf8")) for ticker in matches] return "\n".join(p for p in prices if p)
mit
8,730,622,750,421,006,000
30.55
102
0.637876
false
3.252577
false
false
false
datastreaming/mflow_nodes
mflow_nodes/test_tools/m_generate_test_stream.py
1
2509
from argparse import ArgumentParser import numpy as np from types import SimpleNamespace from mflow_nodes.stream_tools.mflow_forwarder import MFlowForwarder def generate_frame_data(frame_shape, frame_number): """ Generate a frame that is filled with the frame number value. :param frame_shape: Shape of the frame to generate. :param frame_number: Number to fill the frame with. """ return np.full(shape=frame_shape, fill_value=frame_number, dtype=np.int32) def generate_test_array_stream(binding_address="tcp://127.0.0.1:40000", frame_shape=(4, 4), number_of_frames=16): """ Generate an array-1.0 stream of shape [4,4] and the specified number of frames. The values for each cell in the frame corresponds to the frame number. :param frame_shape: Shape (number of cells) of the frames to send. :param number_of_frames: Number of frames to send. :param binding_address: Address to bind the stream to. """ print("Preparing to send %d frames of shape %s." % (number_of_frames, str(frame_shape))) mflow_forwarder = MFlowForwarder() mflow_forwarder.start(binding_address) # Test stream is of array type. header = {"htype": "array-1.0", "type": "int32", "shape": list(frame_shape)} # Send 16 4x4 frames. The values of each array cell is equal to the frame number. for frame_number in range(number_of_frames): header["frame"] = frame_number data = generate_frame_data(frame_shape, frame_number) print("Sending frame %d" % frame_number) message = SimpleNamespace() message.data = {"header": header, "data": [data]} mflow_forwarder.forward(message) mflow_forwarder.stop() if __name__ == "__main__": parser = ArgumentParser() parser.add_argument("binding_address", type=str, help="Binding address for mflow connection.\n" "Example: tcp://127.0.0.1:40001") parser.add_argument("--n_frames", type=int, default=16, help="Number of frames to generate.") parser.add_argument("--frame_size", type=int, default=4, help="Number of values X and Y direction, per frame.") input_args = parser.parse_args() try: generate_test_array_stream(input_args.binding_address, number_of_frames=input_args.n_frames, frame_shape=(input_args.frame_size, input_args.frame_size)) except KeyboardInterrupt: print("Terminated by user.")
gpl-3.0
47,031,052,100,983,640
39.467742
115
0.656038
false
3.744776
false
false
false
SublimeGit/SublimeGit
sgit/checkout.py
1
9251
# coding: utf-8 from functools import partial import sublime from sublime_plugin import WindowCommand, TextCommand from .util import noop from .cmd import GitCmd from .helpers import GitStatusHelper, GitBranchHelper, GitErrorHelper, GitLogHelper, GitRemoteHelper from .helpers import GitTagHelper GIT_BRANCH_EXISTS_MSG = "The branch %s already exists. Do you want to overwrite it?" NO_REMOTES = u"No remotes have been configured. Remotes can be added with the Git: Add Remote command. Do you want to add a remote now?" class GitCheckoutWindowCmd(GitCmd, GitBranchHelper, GitLogHelper, GitErrorHelper): pass class GitCheckoutBranchCommand(WindowCommand, GitCheckoutWindowCmd): """ Check out an existing branch. This command allows you to select a branch from the quick bar to check out. The currently active branch (if any) is marked with an asterisk (*) to the left of its name. """ def run(self): repo = self.get_repo() if not repo: return branches = self.get_branches(repo) choices = [] for current, name in branches: choices.append('%s %s' % ('*' if current else ' ', name)) self.window.show_quick_panel(choices, partial(self.on_done, repo, branches), sublime.MONOSPACE_FONT) def on_done(self, repo, branches, idx): if idx == -1: return current, branch = branches[idx] if current: return exit, stdout, stderr = self.git(['checkout', branch], cwd=repo) if exit == 0: panel = self.window.get_output_panel('git-checkout') panel.run_command('git_panel_write', {'content': stderr}) self.window.run_command('show_panel', {'panel': 'output.git-checkout'}) else: sublime.error_message(self.format_error_message(stderr)) self.window.run_command('git_status', {'refresh_only': True}) class GitCheckoutTagCommand(WindowCommand, GitCheckoutWindowCmd, GitTagHelper): """ Check out a specific tag. This command allows you to check out a specific tag. A list of available tags will be presented in the quick bar. After checkout, you will be in a detached head state. """ def run(self, repo=None, tag=None): repo = repo or self.get_repo() if not repo: return if tag: self.on_tag(repo, tag) else: tags = self.get_tags(repo) if not tags: sublime.error_message("This repo does not contain any tags. Run Git: Add Tag to add one.") return choices = self.format_quick_tags(tags) def on_done(idx): if idx != -1: tag = choices[idx][0] self.on_tag(repo, tag) self.window.show_quick_panel(choices, on_done) def on_tag(self, repo, tag): exit, stdout, stderr = self.git(['checkout', 'tags/%s' % tag], cwd=repo) if exit == 0: panel = self.window.get_output_panel('git-checkout') panel.run_command('git_panel_write', {'content': stderr}) self.window.run_command('show_panel', {'panel': 'output.git-checkout'}) else: sublime.error_message(self.format_error_message(stderr)) self.window.run_command('git_status', {'refresh_only': True}) class GitCheckoutCommitCommand(WindowCommand, GitCheckoutWindowCmd): """ Check out a specific commit. This command allows you to check out a specific commit. The list of commits will be presented in the quick bar, containing the first line of the commit message, the abbreviated sha1, as well as a relative and absolute date in the local timezone. After checkout, you will be in a detached head state. """ def run(self): repo = self.get_repo() if not repo: return log = self.get_quick_log(repo) hashes, choices = self.format_quick_log(log) self.window.show_quick_panel(choices, partial(self.on_done, repo, hashes)) def on_done(self, repo, hashes, idx): if idx == -1: return commit = hashes[idx] exit, stdout, stderr = self.git(['checkout', commit], cwd=repo) if exit == 0: panel = self.window.get_output_panel('git-checkout') panel.run_command('git_panel_write', {'content': stderr}) self.window.run_command('show_panel', {'panel': 'output.git-checkout'}) else: sublime.error_message(self.format_error_message(stderr)) self.window.run_command('git_status', {'refresh_only': True}) class GitCheckoutNewBranchCommand(WindowCommand, GitCheckoutWindowCmd): """ Create a new branch from the current HEAD and switch to it. This command will show an input panel allowing you to name your new branch. After giving the branch a name, pressing enter will create the new branch and check it out. Pressing esc will cancel. If a branch with the given name already exists, you will be asked if you want to overwrite the branch. Selecting cancel will exit silently, without making any changes. """ def run(self): repo = self.get_repo() if not repo: return self.window.show_input_panel("Branch:", "", partial(self.on_done, repo), noop, noop) def on_done(self, repo, branch): branch = branch.strip() if not branch: return b = '-b' branches = [n for _, n in self.get_branches(repo)] if branch in branches: if sublime.ok_cancel_dialog(GIT_BRANCH_EXISTS_MSG % branch, 'Overwrite'): b = '-B' else: return exit, stdout, stderr = self.git(['checkout', b, branch], cwd=repo) if exit == 0: panel = self.window.get_output_panel('git-checkout') panel.run_command('git_panel_write', {'content': stderr}) self.window.run_command('show_panel', {'panel': 'output.git-checkout'}) else: sublime.error_message(self.format_error_message(stderr)) self.window.run_command('git_status', {'refresh_only': True}) class GitCheckoutRemoteBranchCommand(WindowCommand, GitCheckoutWindowCmd, GitRemoteHelper): """Checkout a remote branch.""" def run(self, repo=None): repo = self.get_repo() if not repo: return remotes = self.get_remotes(repo) if not remotes: if sublime.ok_cancel_dialog(NO_REMOTES, 'Add Remote'): self.window.run_command('git_remote_add') return choices = self.format_quick_remotes(remotes) self.window.show_quick_panel(choices, partial(self.remote_panel_done, repo, choices)) def remote_panel_done(self, repo, choices, idx): if idx != -1: remote = choices[idx][0] remote_branches = self.get_remote_branches(repo, remote) if not remote_branches: return sublime.error_message("No branches on remote %s" % remote) formatted_remote_branches = self.format_quick_branches(remote_branches) local_branches = [b for _, b in self.get_branches(repo)] remote_only_branches = [b for b in formatted_remote_branches if b[0] not in frozenset(local_branches)] if not remote_only_branches: return sublime.error_message("All remote branches are already present locally") def on_remote(): self.window.show_quick_panel(remote_only_branches, partial(self.remote_branch_panel_done, repo, remote_only_branches)) sublime.set_timeout(on_remote, 50) def remote_branch_panel_done(self, repo, branches, idx): if idx != -1: branch = branches[idx][0] exit, stdout, stderr = self.git(['checkout', branch], cwd=repo) if exit == 0: panel = self.window.get_output_panel('git-checkout') panel.run_command('git_panel_write', {'content': stderr}) self.window.run_command('show_panel', {'panel': 'output.git-checkout'}) else: sublime.error_message(self.format_error_message(stderr)) self.window.run_command('git_status', {'refresh_only': True}) class GitCheckoutCurrentFileCommand(TextCommand, GitCmd, GitStatusHelper): """ Documentation coming soon. """ def run(self, edit): filename = self.view.file_name() if not filename: sublime.error_message("Cannot checkout an unsaved file.") return repo = self.get_repo() if not repo: return if not self.file_in_git(repo, filename): sublime.error_message("The file %s is not tracked by git.") return exit, stdout, stderr = self.git(['checkout', '--quiet', '--', filename], cwd=repo) if exit == 0: sublime.status_message('Checked out %s' % filename) view = self.view sublime.set_timeout(partial(view.run_command, 'revert'), 50) else: sublime.error_message('git error: %s' % stderr)
mit
7,732,864,790,283,200,000
34.856589
136
0.611177
false
3.958494
false
false
false
textclf/data-handler
nlpdatahandlers/base_handler.py
1
6414
import numpy as np import util.misc class DataHandlerException(Exception): pass class BaseDataHandler(object): DATA_ALL = 1 DATA_TRAIN = 2 DATA_VALIDATION = 4 DATA_TEST = 3 def __init__(self, source): self.source = source def get_data(self, type=DATA_ALL): """ Process the data from its source and returns two lists: texts and labels, ready for a classifier to be used """ raise NotImplementedError() @staticmethod def shuffle_data(train_values, labels): combined_lists = zip(train_values, labels) np.random.shuffle(combined_lists) return zip(*combined_lists) @staticmethod def to_word_level_vectors(texts_list, wv_container, words_per_text=None): """ Receives a list of texts. For each text, it converts the text into a list of word vectors given by a vector container (Glove, WordToVec) for direct use as input If words_per_text is specified, each text representation can have as many as words_per_text words. Hence texts will be cut or zero-padded. """ from util.language import tokenize_text tokenized_texts = util.misc.parallel_run(tokenize_text, texts_list) text_wvs_indices = [wv_container.get_indices(text) for text in tokenized_texts] del tokenized_texts text_wvs = [wv_container[text_indices] for text_indices in text_wvs_indices] del text_wvs_indices if words_per_text is not None: text_wvs = BaseDataHandler.__pad_sequence_word_vectors(text_wvs, words_per_text) return text_wvs @staticmethod def __pad_sequence_word_vectors(text_wvs, maxlen=None): """ Given a list of lists of word vectors (this is, wvs for texts), it zero-pads or reduces the number of words up to maxlen if specified. Otherwise, it pads everything up to the maximum text size """ lengths = [len(s) for s in text_wvs] nb_samples = len(text_wvs) if maxlen is None: maxlen = np.max(lengths) wv_dim = text_wvs[0].shape[1] x = np.zeros((nb_samples, maxlen, wv_dim)).astype('float32') for idx, s in enumerate(text_wvs): x[idx, :lengths[idx]] = s[:maxlen] return x @staticmethod def to_char_level_idx(texts_list, char_container, chars_per_word=None, words_per_document=None, prepend=False): """ Receives a list of texts. For each text, it converts the text into a list of indices of a characters for later use in the embedding of a neural network. Texts are padded (or reduced) up to chars_per_word char_container is assumed to be a method that converts characters to indices using a method called get_indices() """ from util.language import tokenize_text texts_list = util.misc.parallel_run(tokenize_text, texts_list) if words_per_document is not None: text_with_indices = [BaseDataHandler.__normalize(char_container.get_indices(txt), chars_per_word, prepend) for txt in texts_list] text_with_indices = BaseDataHandler.__normalize(text_with_indices, size=words_per_document, filler=[0] * chars_per_word) else: text_with_indices = char_container.get_indices(texts_list) return text_with_indices @staticmethod def to_word_level_idx(texts_list, wv_container, words_per_document=None, prepend=False): """ Receives a list of texts. For each text, it converts the text into indices of a word vector container (Glove, WordToVec) for later use in the embedding of a neural network. Texts are padded (or reduced) up to words_per_document """ from util.language import tokenize_text texts_list = util.misc.parallel_run(tokenize_text, texts_list) if words_per_document is not None: text_with_indices = BaseDataHandler.__normalize(wv_container.get_indices(texts_list), words_per_document, prepend) else: text_with_indices = wv_container.get_indices(texts_list) return text_with_indices @staticmethod def to_sentence_level_idx(texts_list, sentences_per_paragraph, words_per_sentence, wv_container, prepend=False): """ Receives a list of texts. For each text, it converts the text into sentences and converts the words into indices of a word vector container (Glove, WordToVec) for later use in the embedding of a neural network. Sentences are padded (or reduced) up to words_per_sentence elements. Texts ("paragraphs") are padded (or reduced) up to sentences_per_paragraph If prepend = True, padding is added at the beginning Ex: [[This might be cumbersome. Hopefully not.], [Another text]] to [ [[5, 24, 3, 223], [123, 25, 0, 0]]. [[34, 25, 0, 0], [0, 0, 0, 0] ] using sentences_per_paragraph = 4, words_per_sentence = 4 """ from util.language import parse_paragraph texts_list = util.misc.parallel_run(parse_paragraph, texts_list) text_with_normalized_sentences = [BaseDataHandler.__normalize(review, size=words_per_sentence, prepend=prepend) for review in wv_container.get_indices(texts_list)] text_padded_paragraphs = BaseDataHandler.__normalize(text_with_normalized_sentences, size=sentences_per_paragraph, filler=[0] * words_per_sentence) return text_padded_paragraphs @staticmethod def __normalize(sq, size=30, filler=0, prepend=False): """ Take a list of lists and ensure that they are all of length `sz` Args: ----- e: a non-generator iterable of lists sz: integer, the size that each sublist should be normalized to filler: obj -- what should be added to fill out the size? prepend: should `filler` be added to the front or the back of the list? """ if not prepend: def _normalize(e, sz): return e[:sz] if len(e) >= sz else e + [filler] * (sz - len(e)) else: def _normalize(e, sz): return e[-sz:] if len(e) >= sz else [filler] * (sz - len(e)) + e return [_normalize(e, size) for e in sq]
mit
4,222,095,093,282,551,300
42.04698
141
0.631743
false
3.896719
false
false
false
arunkgupta/gramps
gramps/gen/filters/rules/person/_isdescendantfamilyof.py
1
3903
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2002-2006 Donald N. Allingham # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # $Id$ #------------------------------------------------------------------------- # # Standard Python modules # #------------------------------------------------------------------------- from ....ggettext import gettext as _ try: set() except NameError: from sets import Set as set #------------------------------------------------------------------------- # # GRAMPS modules # #------------------------------------------------------------------------- from .. import Rule #------------------------------------------------------------------------- # # IsDescendantFamilyOf # #------------------------------------------------------------------------- class IsDescendantFamilyOf(Rule): """Rule that checks for a person that is a descendant or the spouse of a descendant of a specified person""" labels = [ _('ID:'), _('Inclusive:') ] name = _('Descendant family members of <person>') category = _('Descendant filters') description = _("Matches people that are descendants or the spouse " "of a descendant of a specified person") def prepare(self,db): self.db = db self.matches = set() self.root_person = db.get_person_from_gramps_id(self.list[0]) self.add_matches(self.root_person) try: if int(self.list[1]): inclusive = True else: inclusive = False except IndexError: inclusive = True if not inclusive: self.exclude() def reset(self): self.matches = set() def apply(self,db,person): return person.handle in self.matches def add_matches(self,person): if not person: return # Add self self.matches.add(person.handle) for family_handle in person.get_family_handle_list(): family = self.db.get_family_from_handle(family_handle) if family: # Add every child recursively for child_ref in family.get_child_ref_list(): if child_ref: self.add_matches(self.db.get_person_from_handle(child_ref.ref)) # Add spouse if person.handle == family.get_father_handle(): spouse_handle = family.get_mother_handle() else: spouse_handle = family.get_father_handle() self.matches.add(spouse_handle) def exclude(self): # This removes root person and his/her spouses from the matches set if not self.root_person: return self.matches.remove(self.root_person.handle) for family_handle in self.root_person.get_family_handle_list(): family = self.db.get_family_from_handle(family_handle) if family: if self.root_person.handle == family.get_father_handle(): spouse_handle = family.get_mother_handle() else: spouse_handle = family.get_father_handle() self.matches.remove(spouse_handle)
gpl-2.0
-1,071,615,286,370,999,000
34.481818
87
0.540354
false
4.522596
false
false
false
tommyogden/maxwellbloch
maxwellbloch/tests/test_ob_solve.py
1
8224
# -*- coding: utf-8 -*- """ Unit tests for the OBSolve class. Thomas Ogden <t@ogden.eu> """ import os import unittest import numpy as np from maxwellbloch import ob_solve, t_funcs # Absolute path of tests/json directory, so that tests can be called from # different directories. JSON_DIR = os.path.abspath(os.path.join(__file__, '../', 'json')) JSON_STR_02 = ( '{' ' "atom": {' ' "decays": [' ' { "channels": [[0,1], [1,2]], ' ' "rate": 1.0' ' }' ' ],' ' "energies": [],' ' "fields": [' ' {' ' "coupled_levels": [' ' [0, 1]' ' ],' ' "detuning": 0.0,' ' "detuning_positive": true,' ' "label": "probe",' ' "rabi_freq": 5.0,' ' "rabi_freq_t_args": {},' ' "rabi_freq_t_func": null' ' },' ' {' ' "coupled_levels": [' ' [1, 2]' ' ],' ' "detuning": 0.0,' ' "detuning_positive": false,' ' "label": "coupling",' ' "rabi_freq": 10.0,' ' "rabi_freq_t_args": {},' ' "rabi_freq_t_func": null' ' }' ' ],' ' "num_states": 3' ' },' ' "t_min": 0.0,' ' "t_max": 1.0,' ' "t_steps": 100,' ' "method": "mesolve",' ' "opts": {}' '}' ) class TestSetFieldRabiTFunc(unittest.TestCase): """ Test setting custom Rabi frequency time functions. """ def test_set_field_rabi_t_func_1(self): """ Test that a custom double pulse Rabi freq time functions can be set. """ ob_solve_02 = ob_solve.OBSolve().from_json_str(JSON_STR_02) two_pulse_t_func = lambda t, args: (t_funcs.gaussian(0)(t, args) + t_funcs.gaussian(1)(t, args)) two_pulse_t_args = {"ampl_0": 1.0, "centre_0": 0.0, "fwhm_0": 0.1, "ampl_1": 2.0, "centre_1": 0.5, "fwhm_1": 0.1, } ob_solve_02.set_field_rabi_freq_t_func(0, two_pulse_t_func) ob_solve_02.set_field_rabi_freq_t_args(0, two_pulse_t_args) field_0 = ob_solve_02.atom.fields[0] self.assertAlmostEqual(field_0.rabi_freq_t_func(0.0, field_0.rabi_freq_t_args), 1.0) self.assertAlmostEqual(field_0.rabi_freq_t_func(0.5, field_0.rabi_freq_t_args), 2.0) self.assertAlmostEqual(field_0.rabi_freq_t_func(1.0, field_0.rabi_freq_t_args), 0.0) class TestSolve(unittest.TestCase): def test_two_level_rabi_oscillations(self): """ Solve the optical Bloch equations for the two-level atom. Notes: See https://en.wikipedia.org/wiki/Rabi_cycle """ RABI_FREQ = 5.0 atom_dict = {"fields": [{"coupled_levels": [[0, 1]], "rabi_freq": RABI_FREQ}], "num_states": 2} obs = ob_solve.OBSolve(atom=atom_dict, t_min=0.0, t_max=1.0, t_steps=100) obs.solve() # Get the populations pop_0 = np.absolute(obs.states_t()[:, 0, 0]) pop_1 = np.absolute(obs.states_t()[:, 1, 1]) # The solution is known, we should have Rabi cycling at the frequency. known_0 = np.cos(2.0*np.pi*RABI_FREQ*obs.tlist/2.0)**2 known_1 = np.sin(2.0*np.pi*RABI_FREQ*obs.tlist/2.0)**2 self.assertTrue(np.allclose(pop_0, known_0, rtol=1.e-5, atol=1.e-5)) self.assertTrue(np.allclose(pop_1, known_1, rtol=1.e-5, atol=1.e-5)) # If you want to take a look # import matplotlib.pyplot as plt # plt.plot(obs.tlist, pop_0) # plt.plot(obs.tlist, known_0, ls='dashed') # plt.plot(obs.tlist, pop_1) # plt.plot(obs.tlist, known_1, ls='dashed') # plt.show() def test_two_level_with_opts(self): """ Same as test_two_level_rabi_oscillations() but with opts set such that the tolerances are lower. The results will be less accurate. """ RABI_FREQ = 5.0 atom_dict = {"fields": [{"coupled_levels": [[0, 1]], "rabi_freq": RABI_FREQ}], "num_states": 2, "initial_state": [1., 0.]} obs = ob_solve.OBSolve(atom=atom_dict, t_min=0.0, t_max=1.0, t_steps=100, opts={'atol': 1e-6, 'rtol': 1e-4}) obs.solve() # Get the populations pop_0 = np.absolute(obs.states_t()[:, 0, 0]) pop_1 = np.absolute(obs.states_t()[:, 1, 1]) # The solution is known, we should have Rabi cycling at the frequency. known_0 = np.cos(2.0 * np.pi * RABI_FREQ * obs.tlist / 2.0)**2 known_1 = np.sin(2.0 * np.pi * RABI_FREQ * obs.tlist / 2.0)**2 # Compared with test_two_level_rabi_oscillations() we can only assert # a lower tolerance to the known solution. self.assertTrue(np.allclose(pop_0, known_0, rtol=1.e-3, atol=1.e-3)) self.assertTrue(np.allclose(pop_1, known_1, rtol=1.e-3, atol=1.e-3)) # If you want to take a look # import matplotlib.pyplot as plt # plt.plot(obs.tlist, pop_0) # plt.plot(obs.tlist, known_0, ls='dashed') # plt.plot(obs.tlist, pop_1) # plt.plot(obs.tlist, known_1, ls='dashed') # plt.show() def test_two_level_with_inital_state(self): """ Same as test_two_level_rabi_oscillations() but with the initial state set so that the population starts in the upper level. """ RABI_FREQ = 5.0 atom_dict = {"fields": [{"coupled_levels": [[0, 1]], "rabi_freq": RABI_FREQ}], "num_states": 2, "initial_state": [0., 1.]} obs = ob_solve.OBSolve(atom=atom_dict, t_min=0.0, t_max=1.0, t_steps=100) obs.solve() # Get the populations pop_0 = np.absolute(obs.states_t()[:, 0, 0]) pop_1 = np.absolute(obs.states_t()[:, 1, 1]) # The solution is as test_two_level_rabi_oscillations() but swapped known_0 = np.sin(2.0*np.pi*RABI_FREQ*obs.tlist/2.0)**2 known_1 = np.cos(2.0*np.pi*RABI_FREQ*obs.tlist/2.0)**2 self.assertTrue(np.allclose(pop_0, known_0, rtol=1.e-5, atol=1.e-5)) self.assertTrue(np.allclose(pop_1, known_1, rtol=1.e-5, atol=1.e-5)) class TestJSON(unittest.TestCase): def test_to_from_json_str_00(self): ob_solve_00 = ob_solve.OBSolve() ob_solve_01 = ob_solve.OBSolve.from_json_str(ob_solve_00.to_json_str()) self.assertEqual(ob_solve_00.to_json_str(), ob_solve_01.to_json_str()) def test_from_json_str(self): ob_solve_02 = ob_solve.OBSolve().from_json_str(JSON_STR_02) self.assertEqual(ob_solve_02.t_min, 0.0) self.assertEqual(ob_solve_02.t_max, 1.0) self.assertEqual(ob_solve_02.t_steps, 100) self.assertEqual(ob_solve_02.method, "mesolve") def test_to_from_json_str_03(self): json_path = os.path.join(JSON_DIR, "ob_solve_03.json") obs = ob_solve.OBSolve().from_json(json_path) obs_test = ob_solve.OBSolve.from_json_str(obs.to_json_str()) self.assertEqual(obs.to_json_str(), obs_test.to_json_str()) def test_to_from_json(self): import os filepath = "test_ob_solve_02.json" ob_solve_02 = ob_solve.OBSolve().from_json_str(JSON_STR_02) ob_solve_02.to_json(filepath) ob_solve_03 = ob_solve.OBSolve().from_json(filepath) os.remove(filepath) self.assertEqual(ob_solve_02.to_json_str(), ob_solve_03.to_json_str()) class TestSaveLoad(unittest.TestCase): """ Tests for the OBSolve save and load methods.""" def test_save_load_01(self): """ Solve a basic OBSolve problem. Save the results to file. Set the results in the OBSolve object to null. Load the results from file and check that they match the original values. """ json_path = os.path.join(JSON_DIR, "ob_solve_02.json") ob_solve_02 = ob_solve.OBSolve().from_json(json_path) states_t = ob_solve_02.solve() states_t_loaded = ob_solve_02.solve(recalc=False) self.assertTrue((states_t == states_t_loaded).all())
mit
-7,425,014,225,698,490,000
32.567347
79
0.542437
false
2.892719
true
false
false
chenchiyuan/hawaii
hawaii/apps/weixin/models/apps.py
1
2860
# -*- coding: utf-8 -*- # __author__ = chenchiyuan from __future__ import division, unicode_literals, print_function from django.db import models from libs.models.models import SingletonModel from django.conf import settings from libs.uuids import get_uuid import requests import json class App(SingletonModel): class Meta: app_label = "weixin" db_table = "weixin_app" verbose_name_plural = verbose_name = u"账号设置" name = models.CharField("微信名", max_length=64, default="", blank=True, null=True) app_url = models.CharField("微信回调地址", max_length=256, blank=True, null=True) app_token = models.CharField("微信Token", max_length=64, blank=True, null=True) app_key = models.CharField("app_key", max_length=64, blank=True, null=True) app_id = models.CharField("app_secret", max_length=64, blank=True, null=True) def __unicode__(self): return bool(self.name) and self.name or self.owner.email @property def subscribe_rule(self): subscribe = self.subscribeitem_set.all() if not subscribe.count(): return None else: return subscribe[0].rule def get_app_url(self): return "%s/weixin/callback/" % settings.APP_HOST_NAME def save(self, force_insert=False, force_update=False, using=None): if force_insert and force_update: raise ValueError("Cannot force both insert and updating in model saving.") if not self.app_url: self.app_url = self.get_app_url() if not self.app_token: self.app_token = get_uuid() if self.app_key and self.app_id: self.delete_menus() self.create_menus() super(App, self).save(force_insert, force_update, using) def get_access_token(self): if not any([self.app_key, self.app_id]): raise Exception(u"必须申请app_key和app_secret".encode("utf-8")) url = "https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid=%s&secret=%s" \ % (self.app_key, self.app_id) response = requests.get(url) json_data = json.loads(response.content) return json_data['access_token'] def create_menus(self): from hawaii.apps.weixin.models.menus import MenuItem token = self.get_access_token() post_dict = MenuItem.get_menus_by_app(self) headers = {'content-type': 'application/json'} url = "https://api.weixin.qq.com/cgi-bin/menu/create?access_token=%s" % token return requests.post(url, data=json.dumps(post_dict, ensure_ascii=False).encode("utf-8"), headers=headers) def delete_menus(self): token = self.get_access_token() url = "https://api.weixin.qq.com/cgi-bin/menu/delete?access_token=%s" % token return requests.get(url)
bsd-3-clause
1,870,618,801,998,368,800
35.636364
114
0.639362
false
3.389423
false
false
false
leleobhz/phonetica
high_experimental/mvuorine/four1.py
1
1236
#!/usr/bin/python # -*- coding: utf-8 -*- from math import sin # Replaces data[1..2*nn] by its discrete Fourier transform, if isign */ # is input as 1; or replaces data[1..2*nn] by nn times its inverse */ # discrete Fourier transform, if isign is input as -1. data is a */ # complex array of length nn or, equivalently, a real array of length */ # 2**nn. nn MUST be an integer power of 2 (this is not checked for!). */ def four1(data, nn, isign): n=nn << 1 j=1 for i in range(1, n, 2): if (j > i): tmp = data[j] data[j] = data[i] data[i] = tmp tmp = data[j+1] data[j+1] = data[i+1] data[i+1] = tmp m=n >> 1 while (m >= 2 and j > m): j -= m m >>= 1 j += m; mmax=2 while(n > mmax): istep=mmax << 1 theta=isign*(6.28318503717959/mmax) wtemp=sin(0.5*theta) wpr = -2.0*wtemp*wtemp wpi=sin(theta) wr=1.0 wi=0.0 for m in range(1, mmax, 2): for i in range(m, n+1, istep): j=i+mmax tempr=wr*data[j]-wi*data[j+1] tempi=wr*data[j+1]+wi*data[j] data[j]=data[i]-tempr data[j+1]=data[i+1]-tempi data[i] += tempr data[i+1]+= tempi wtemp=wr wr=wtemp*wpr-wi*wpi+wr wi=wi*wpr+wtemp*wpi+wi mmax=istep
gpl-2.0
-3,151,591,414,976,966,000
22.72
72
0.562298
false
2.251366
false
false
false
concordusapps/django-identity
src/identity/saml/views/provider.py
1
5398
# -*- coding: utf-8 -*- """ \file identity/saml/views/provider.py \brief Implements the SAML endpoints for providers. \author Erich Healy (cactuscommander) ErichRHealy@gmail.com \author Ryan Leckey (mehcode) leckey.ryan@gmail.com \copyright Copyright 2012 © Concordus Applications, Inc. All Rights Reserved. """ from .. import models from ...models import Provider, Consumer from ..client import binding from lxml import etree import saml.schema.saml import saml.schema.samlp from saml import schema from django.shortcuts import get_object_or_404, redirect from django.core.urlresolvers import reverse from uuid import uuid4 from urllib import urlencode from django.contrib.auth.decorators import login_required from datetime import datetime, timedelta def sso(request, *args, **kwargs): """Single sign on (SSO).""" # Get ACS profile instance acs = models.Profile.objects.get(slug='acs') # Verify that the provider described in the URL exists provider = get_object_or_404(Provider, slug=kwargs['slug']) # Determine if we are from the login form or not from_login = request.method == 'GET' and len(request.GET) == 1 if not from_login: # Generate request identifier to namespace variables stored in session # storage identifier = uuid4().hex else: # Grab the identifier from the GET paramter passed back from the login # form identifier = request.GET['id'] # Template to pull namespaced items out of the session storage storage = '{}:saml:{{}}'.format(identifier) if not from_login: # Decode and deserialize the message message, state = binding.Binding.receive(request, 'request') xml = etree.XML(message) obj = schema.samlp.AuthenticationRequest.deserialize(xml) # Verify that the issuing consumer is known to identity consumer = get_object_or_404(Consumer, name=obj.issuer.text) else: # Get the consumer from the identifier passed from the login form consumer = get_object_or_404( Consumer, slug=request.session[storage.format('consumer')] ) # Query for a list of services provided by the requester that we know # about; if we cannot find one for ACS, then return a 404 # TODO: Handle the case of more than one acs # NOTE: This is a redundant query; it is merely a sanity check so that # if the service doesn't exist the user won't get any farther # in the authn process. service = get_object_or_404( models.Service, resource=consumer, profile=acs ) if not from_login: # TODO: Perform validation of message pass # Store items in namespaced session storage request.session[storage.format('provider')] = provider.slug request.session[storage.format('consumer')] = consumer.slug request.session[storage.format('message:id')] = obj.id request.session[storage.format('state')] = state if not request.user.is_authenticated(): # Send user off to get authenticated; # redirect to login page return redirect('{}?{}'.format( reverse('login'), urlencode({'id': identifier}) )) else: # Assign subject id to user if not already assigned if 'saml:subject' not in request.session: request.session['saml:subject'] = uuid4().hex # Construct SAML response # FIXME: This should go in `python-saml`, perhaps? obj = schema.samlp.Response( issuer=schema.saml.Issuer(provider.name), status=schema.samlp.Status( code=schema.samlp.StatusCode( value=schema.samlp.StatusCode.Value.SUCCESS ) ), assertion=schema.saml.Assertion( issuer=schema.saml.Issuer(provider.name), subject=schema.saml.Subject( id=schema.saml.NameID(request.session['saml:subject']), confirm=schema.saml.SubjectConfirmation( data=schema.saml.SubjectConfirmationData( in_response_to=consumer.name ) ) ), statements=[ schema.saml.AuthenticationStatement( context=schema.saml.AuthenticationContext( reference=schema.saml.AuthenticationContext. Reference.PREVIOUS_SESSION ) ), schema.saml.AttributeStatement( attributes=[ schema.saml.Attribute( name='uid', values=request.user.username ), ] ), ] ) ) # Serialize message to a string message = etree.tostring(schema.samlp.Response.serialize(obj)) print(message) # Send off return binding.Redirect.send( service.get_absolute_url(), message, request.session[storage.format('state')], 'response' ) def slo(request, *args, **kwargs): """Single log on (SLO)""" pass
mit
-1,617,509,357,031,689,700
34.045455
78
0.592181
false
4.558277
false
false
false
CrandellWS/gimp-plugin-helloworld
plug-ins/helloBatchWorldMessages.py
1
2139
#!/usr/bin/env python # # Use with scripts/helloBatchWorldMessages.scm to enable batch mode import gtk from gimpfu import * def helloBatchWorldMessagesFn(messageType) : #dictionary mapping case values to functions see -> https://docs.python.org/faq/design.html#why-isn-t-there-a-switch-or-case-statement-in-python messageOptions = {0:noMessage,1:terminalMessage,2:gtkMessage,3:gimpMessage} messageFunction = messageOptions[messageType] # usage example: messageFunction('Message To Deliver') # gimpMessage(messageType) #function to Deliver nothing def noMessage(msgText) : return def terminalMessage(msgText) : # Using stdout see -> https://en.wikipedia.org/wiki/Standard_streams#Standard_output_.28stdout.29 print msgText # (Unix Terminal Output) Will not work on Windows Based Systems. def gtkMessage(msgText) : # Using gtk to display an info type message see -> http://www.gtk.org/api/2.6/gtk/GtkMessageDialog.html#GtkMessageType message = gtk.MessageDialog(type=gtk.MESSAGE_INFO, buttons=gtk.BUTTONS_OK) message.set_markup(msgText) message.run() message.destroy() def gimpMessage(msgText) : # Using GIMP's interal procedure database see -> http://docs.gimp.org/en/glossary.html#glossary-pdb pdb.gimp_message(msgText) # see -> http://www.gimp.org/docs/python/ register( #name "helloBatchWorldMessagesPlugin", #blurb "Saying Hello Batch World", #help "Saying Hello to the Batch World", #author "William Crandell <william@crandell.ws>", #copyright "William Crandell <william@crandell.ws>", #date "2015", #menupath "Hello Batch World Messages", #imagetypes (use * for all, leave blank for none) "", #params [ (PF_OPTION,"message-options", "Message Options:", 0, [("No Messages"),("Terminal Messages"),("Standard Gimp Messages"),("Pretty GTK Messages")]), ], #results [], #function (to call) helloBatchWorldMessagesFn, #this can be included this way or the menu value can be directly prepended to the menupath menu = "<Toolbox>/Hello/") main()
mit
8,569,564,768,776,103,000
26.779221
153
0.697055
false
3.4224
false
false
false
toenuff/treadmill
tests/scheduler_perf.py
1
3987
"""Performance test for treadmill.scheduler """ import timeit # Disable W0611: Unused import import tests.treadmill_test_deps # pylint: disable=W0611 # XXX(boysson): Test needs update to new Scheduler API # XXX: from treadmill import scheduler def schedule(sched): """Helper function to run the scheduler.""" def _schedule(): """Run the scheduler, print some stats.""" new_placement = 0 evicted = 0 for event in sched.schedule(): if event.node: new_placement = new_placement + 1 else: evicted = evicted + 1 print 'scheduled: ', new_placement, ', evicted: ', evicted interval = timeit.timeit(stmt=_schedule, number=1) print 'time :', interval # XXX(boysson): Test needs update to new Scheduler API # XXX: # XXX: def test_reschedule(nodes_count, app_count, attempts, affinity): # XXX: """Add high priority apps on top of low priority with full capacity. # XXX: """ # XXX: print 'nodes: %s, apps: %s, attempts: %s' % (nodes_count, # XXX: app_count, # XXX: attempts) # XXX: cell = scheduler.Cell(3) # XXX: for idx in xrange(0, nodes_count): # XXX: node = scheduler.Node('node' + str(idx), [48, 48, 48]) # XXX: cell.add_node(node) # XXX: # XXX: alloc = scheduler.Allocation([10, 10, 10]) # XXX: cell.add_allocation('a1', alloc) # XXX: # XXX: sched = scheduler.Scheduler(cell) # XXX: # XXX: for attempt in xrange(0, attempts): # XXX: for app_idx in xrange(0, app_count): # XXX: prio = attempt * 5 + random.randint(0, 5) # XXX: demand = [random.randint(1, 48), # XXX: random.randint(1, 48), # XXX: random.randint(1, 48)] # XXX: name = 'app_%s.%s' % (attempt, app_idx) # XXX: alloc.add(scheduler.Application(name, prio, demand, # XXX: affinity=affinity(app_idx))) # XXX: # XXX: schedule(sched) # XXX(boysson): Test needs update to new Scheduler API # XXX: # XXX: def test_affinity(nodes_count, app_count, affinity_limit): # XXX: """Add more apps than nodes count to test affinity limit algo.""" # XXX: print 'node: %s, apps: %s, affinity_limit %s' % (nodes_count, # XXX: app_count, # XXX: affinity_limit) # XXX: # XXX: cell = scheduler.Cell(3) # XXX: for idx in xrange(0, nodes_count): # XXX: node = scheduler.Node('node' + str(idx), [48, 48, 48]) # XXX: cell.add_node(node) # XXX: # XXX: alloc = scheduler.Allocation([10, 10, 10]) # XXX: cell.add_allocation('a1', alloc) # XXX: for app_idx in xrange(0, app_count): # XXX: name = '1.%s' % (app_idx) # XXX: alloc.add(scheduler.Application(name, 0, [1, 1, 1], # XXX: affinity_limit=affinity_limit, # XXX: affinity='1')) # XXX: name = '2.%s' % (app_idx) # XXX: alloc.add(scheduler.Application(name, 0, [1, 1, 1], # XXX: affinity_limit=affinity_limit, # XXX: affinity='2')) # XXX: name = '3.%s' % (app_idx) # XXX: alloc.add(scheduler.Application(name, 0, [1, 1, 1], # XXX: affinity_limit=affinity_limit, # XXX: affinity='3')) # XXX: # XXX: sched = scheduler.Scheduler(cell) # XXX: schedule(sched) if __name__ == '__main__': pass # XXX: test_reschedule(500, 1000, 5, affinity=lambda idx: None) # XXX: test_reschedule(1000, 1000, 3, affinity=str) # XXX: test_reschedule(1000, 3000, 3, affinity=lambda idx: str(idx % 5)) # XXX: test_affinity(500, 1000, 1)
apache-2.0
-2,284,858,617,805,041,000
38.475248
79
0.520441
false
3.457936
true
false
false
renalreg/radar
radar/api/views/ins.py
1
1973
from radar.api.serializers.ins import InsClinicalPictureSerializer, InsRelapseSerializer from radar.api.views.common import ( PatientObjectDetailView, PatientObjectListView, StringLookupListView, ) from radar.models.ins import DIPSTICK_TYPES, InsClinicalPicture, InsRelapse, KIDNEY_TYPES, REMISSION_TYPES class InsClinicalPictureListView(PatientObjectListView): serializer_class = InsClinicalPictureSerializer model_class = InsClinicalPicture class InsClinicalPictureDetailView(PatientObjectDetailView): serializer_class = InsClinicalPictureSerializer model_class = InsClinicalPicture class InsRelapseListView(PatientObjectListView): serializer_class = InsRelapseSerializer model_class = InsRelapse class InsRelapseDetailView(PatientObjectDetailView): serializer_class = InsRelapseSerializer model_class = InsRelapse class InsKidneyTypeListView(StringLookupListView): items = KIDNEY_TYPES class InsRemissionTypeListView(StringLookupListView): items = REMISSION_TYPES class InsDipstickListView(StringLookupListView): items = DIPSTICK_TYPES def register_views(app): app.add_url_rule( '/ins-clinical-pictures', view_func=InsClinicalPictureListView.as_view('ins_clinical_picture_list')) app.add_url_rule( '/ins-clinical-pictures/<id>', view_func=InsClinicalPictureDetailView.as_view('ins_clinical_picture_detail')) app.add_url_rule('/ins-relapses', view_func=InsRelapseListView.as_view('ins_relapse_list')) app.add_url_rule('/ins-relapses/<id>', view_func=InsRelapseDetailView.as_view('ins_relapse_detail')) app.add_url_rule('/ins-kidney-types', view_func=InsKidneyTypeListView.as_view('ins_kidney_type_list')) app.add_url_rule( '/ins-remission-types', view_func=InsRemissionTypeListView.as_view('ins_remission_type_list')) app.add_url_rule('/ins-dipstick-options', view_func=InsDipstickListView.as_view('ins_dipstick_options'))
agpl-3.0
2,375,016,336,109,830,700
34.872727
108
0.767866
false
3.37265
false
false
false
google/graphicsfuzz
gfauto/gfauto/gflogging.py
1
1632
# -*- coding: utf-8 -*- # Copyright 2019 The GraphicsFuzz Project Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Logging utility for gfauto. Use to log to stdout and/or to a file. Print statements are banned in gfauto. """ from pathlib import Path from typing import List, TextIO from gfauto import util _LOG_TO_STDOUT = True _LOG_TO_STREAM: List[TextIO] = [] def push_stream_for_logging(stream: TextIO) -> None: _LOG_TO_STREAM.append(stream) def pop_stream_for_logging() -> None: _LOG_TO_STREAM.pop() def log(message: str, skip_newline: bool = False) -> None: if _LOG_TO_STDOUT: if not skip_newline: print(message, flush=True) # noqa: T001 else: print(message, end="", flush=True) # noqa: T001 for stream in _LOG_TO_STREAM: stream.write(message) if not skip_newline: stream.write("\n") stream.flush() def log_a_file(log_file: Path) -> None: log(f"Logging the contents of {str(log_file)}") try: log(util.file_read_text(log_file)) except IOError: log(f"Failed to read {str(log_file)}")
apache-2.0
5,014,625,191,469,446,000
27.631579
77
0.672794
false
3.464968
false
false
false
TheMOOCAgency/edx-platform
lms/djangoapps/atp_task/api_helper.py
1
12618
import json import logging import hashlib from django.utils.translation import ugettext as _ from util.db import outer_atomic from celery.result import AsyncResult from celery.states import READY_STATES, SUCCESS, FAILURE, REVOKED from courseware.module_render import get_xqueue_callback_url_prefix from atp_task.models import tmaTask, PROGRESS from datetime import datetime, timedelta log = logging.getLogger(__name__) class AlreadyRunningError(Exception): """Exception indicating that a background task is already running""" pass def _task_is_running(course_id, task_type, task_key): """Checks if a particular task is already running""" running_tasks = tmaTask.objects.filter( course_id=course_id, task_type=task_type, task_key=task_key ) # exclude states that are "ready" (i.e. not "running", e.g. failure, success, revoked): for state in READY_STATES: running_tasks = running_tasks.exclude(task_state=state) return len(running_tasks) > 0 def _task_is_outdated(course_id, task_type, task_key): """Check if task is running for more than 15 minutes""" nowtime=datetime.now() - timedelta(minutes=10) task_outdated=False try: running_task = tmaTask.objects.get( course_id=course_id, task_type=task_type, task_key=task_key, task_state="QUEUING", created__lte=nowtime ) task_outdated=True except: pass return task_outdated def set_task_to_failure(course_id, task_type, task_key): """"Sets specific task status to failure""" running_task = tmaTask.objects.get( course_id=course_id, task_type=task_type, task_key=task_key, task_state="QUEUING" ) running_task.task_state="FAILURE" running_task.save() def _reserve_task(course_id, task_type, task_key, task_input, requester): """ Creates a database entry to indicate that a task is in progress. Throws AlreadyRunningError if the task is already in progress. Includes the creation of an arbitrary value for task_id, to be submitted with the task call to celery. Note that there is a chance of a race condition here, when two users try to run the same task at almost exactly the same time. One user could be after the check and before the create when the second user gets to the check. At that point, both users are able to run their tasks simultaneously. This is deemed a small enough risk to not put in further safeguards. """ # if _task_is_running(course_id, task_type, task_key): # log.info("RESERVE TASK : Duplicate task found for task_type {} and task_key {}".format(task_type,task_key)) # if _task_is_outdated(course_id,task_type,task_key): # set_task_to_failure(course_id,task_type,task_key) # else : # raise AlreadyRunningError("requested task is already running") if _task_is_running(course_id, task_type, task_key): log.warning("Duplicate task found for task_type %s and task_key %s", task_type, task_key) raise AlreadyRunningError("requested task is already running") try: most_recent_id = tmaTask.objects.latest('id').id except tmaTask.DoesNotExist: most_recent_id = "None found" finally: log.warning( "No duplicate tasks found: task_type %s, task_key %s, and most recent task_id = %s", task_type, task_key, most_recent_id ) # Create log entry now, so that future requests will know it's running. return tmaTask.create(course_id, task_type, task_key, task_input, requester) def _get_xmodule_instance_args(request, task_id): """ Calculate parameters needed for instantiating xmodule instances. The `request_info` will be passed to a tracking log function, to provide information about the source of the task request. The `xqueue_callback_url_prefix` is used to permit old-style xqueue callbacks directly to the appropriate module in the LMS. The `task_id` is also passed to the tracking log function. """ request_info = {'username': request.user.username, 'user_id': request.user.id, 'ip': request.META['REMOTE_ADDR'], 'agent': request.META.get('HTTP_USER_AGENT', '').decode('latin1'), 'host': request.META['SERVER_NAME'], } xmodule_instance_args = {'xqueue_callback_url_prefix': get_xqueue_callback_url_prefix(request), 'request_info': request_info, 'task_id': task_id, } return xmodule_instance_args def _update_instructor_task(instructor_task, task_result): """ Updates and possibly saves a InstructorTask entry based on a task Result. Used when updated status is requested. The `instructor_task` that is passed in is updated in-place, but is usually not saved. In general, tasks that have finished (either with success or failure) should have their entries updated by the task itself, so are not updated here. Tasks that are still running are not updated and saved while they run. The one exception to the no-save rule are tasks that are in a "revoked" state. This may mean that the task never had the opportunity to update the InstructorTask entry. Tasks that are in progress and have subtasks doing the processing do not look to the task's AsyncResult object. When subtasks are running, the InstructorTask object itself is updated with the subtasks' progress, not any AsyncResult object. In this case, the InstructorTask is not updated at all. Calculates json to store in "task_output" field of the `instructor_task`, as well as updating the task_state. For a successful task, the json contains the output of the task result. For a failed task, the json contains "exception", "message", and "traceback" keys. A revoked task just has a "message" stating it was revoked. """ # Pull values out of the result object as close to each other as possible. # If we wait and check the values later, the values for the state and result # are more likely to have changed. Pull the state out first, and # then code assuming that the result may not exactly match the state. task_id = task_result.task_id result_state = task_result.state returned_result = task_result.result result_traceback = task_result.traceback # Assume we don't always save the InstructorTask entry if we don't have to, # but that in most cases we will update the InstructorTask in-place with its # current progress. entry_needs_updating = True entry_needs_saving = False task_output = None if instructor_task.task_state == PROGRESS and len(instructor_task.subtasks) > 0: # This happens when running subtasks: the result object is marked with SUCCESS, # meaning that the subtasks have successfully been defined. However, the InstructorTask # will be marked as in PROGRESS, until the last subtask completes and marks it as SUCCESS. # We want to ignore the parent SUCCESS if subtasks are still running, and just trust the # contents of the InstructorTask. entry_needs_updating = False elif result_state in [PROGRESS, SUCCESS]: # construct a status message directly from the task result's result: # it needs to go back with the entry passed in. log.info("background task (%s), state %s: result: %s", task_id, result_state, returned_result) task_output = InstructorTask.create_output_for_success(returned_result) elif result_state == FAILURE: # on failure, the result's result contains the exception that caused the failure exception = returned_result traceback = result_traceback if result_traceback is not None else '' log.warning("background task (%s) failed: %s %s", task_id, returned_result, traceback) task_output = tmaTask.create_output_for_failure(exception, result_traceback) elif result_state == REVOKED: # on revocation, the result's result doesn't contain anything # but we cannot rely on the worker thread to set this status, # so we set it here. entry_needs_saving = True log.warning("background task (%s) revoked.", task_id) task_output = tmaTask.create_output_for_revoked() # save progress and state into the entry, even if it's not being saved: # when celery is run in "ALWAYS_EAGER" mode, progress needs to go back # with the entry passed in. if entry_needs_updating: instructor_task.task_state = result_state if task_output is not None: instructor_task.task_output = task_output if entry_needs_saving: instructor_task.save() def get_updated_instructor_task(task_id): """ Returns InstructorTask object corresponding to a given `task_id`. If the InstructorTask thinks the task is still running, then the task's result is checked to return an updated state and output. """ # First check if the task_id is known try: instructor_task = tmaTask.objects.get(task_id=task_id) except InstructorTask.DoesNotExist: log.warning("query for InstructorTask status failed: task_id=(%s) not found", task_id) return None # if the task is not already known to be done, then we need to query # the underlying task's result object: if instructor_task.task_state not in READY_STATES: result = AsyncResult(task_id) _update_instructor_task(instructor_task, result) return instructor_task def get_status_from_instructor_task(instructor_task): """ Get the status for a given InstructorTask entry. Returns a dict, with the following keys: 'task_id': id assigned by LMS and used by celery. 'task_state': state of task as stored in celery's result store. 'in_progress': boolean indicating if task is still running. 'task_progress': dict containing progress information. This includes: 'attempted': number of attempts made 'succeeded': number of attempts that "succeeded" 'total': number of possible subtasks to attempt 'action_name': user-visible verb to use in status messages. Should be past-tense. 'duration_ms': how long the task has (or had) been running. 'exception': name of exception class raised in failed tasks. 'message': returned for failed and revoked tasks. 'traceback': optional, returned if task failed and produced a traceback. """ status = {} if instructor_task is not None: # status basic information matching what's stored in InstructorTask: status['task_id'] = instructor_task.task_id status['task_state'] = instructor_task.task_state status['in_progress'] = instructor_task.task_state not in READY_STATES if instructor_task.task_output is not None: status['task_progress'] = json.loads(instructor_task.task_output) return status def submit_task(request, task_type, task_class, course_key, task_input, task_key): """ Helper method to submit a task. Reserves the requested task, based on the `course_key`, `task_type`, and `task_key`, checking to see if the task is already running. The `task_input` is also passed so that it can be stored in the resulting InstructorTask entry. Arguments are extracted from the `request` provided by the originating server request. Then the task is submitted to run asynchronously, using the specified `task_class` and using the task_id constructed for it. Cannot be inside an atomic block. `AlreadyRunningError` is raised if the task is already running. """ with outer_atomic(): # check to see if task is already running, and reserve it otherwise: instructor_task = _reserve_task(course_key, task_type, task_key, task_input, request.user) # make sure all data has been committed before handing off task to celery. task_id = instructor_task.task_id task_args = [instructor_task.id, _get_xmodule_instance_args(request, task_id)] task_class.apply_async(task_args, task_id=task_id) return instructor_task
agpl-3.0
-2,607,304,293,360,572,400
44.560886
116
0.669837
false
4.114118
false
false
false
breandan/java-algebra-system
examples/polypower.py
1
1095
# # jython examples for jas. # $Id$ # from java.lang import System from java.lang import Integer from jas import PolyRing, ZZ, QQ, GF, ZM # sparse polynomial powers #r = Ring( "Mod 1152921504606846883 (x,y,z) G" ); #r = Ring( "Rat(x,y,z) G" ); #r = Ring( "C(x,y,z) G" ); #r = Ring( "Z(x,y,z) L" ); r = PolyRing( ZZ(), "(x,y,z)", PolyRing.lex ); print "Ring: " + str(r); print; ps = """ ( ( 1 + x^2147483647 + y^2147483647 + z^2147483647 ) ( 1 + x + y + z ) ( 10000000001 + 10000000001 x + 10000000001 y + 10000000001 z ) ) """; f = r.ideal( ps ); print "Ideal: " + str(f); print; pset = f.pset; print "pset:", pset; print; plist = pset.list; print "plist:", plist; print; p = plist[0]; #p = plist[2]; print "p:", p; print; q = p; for i in range(1,20): q = q.multiply(p); print "q:", q.length(); print; q1 = q.sum( r.ring.getONE() ); #print "q1:", q1; print "q1:", q1.length(); print; t = System.currentTimeMillis(); q2 = q.multiply(q1); t = System.currentTimeMillis() - t; print "q2:", q2.length(); print "t:",t; print; print "Integer.MAX_VALUE = ", Integer.MAX_VALUE;
gpl-2.0
5,131,868,069,438,154,000
15.102941
64
0.588128
false
2.257732
false
false
false
zhinaonet/sqlmap-z
lib/parse/cmdline.py
1
45290
#!/usr/bin/env python """ Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ import os import re import shlex import sys from optparse import OptionError from optparse import OptionGroup from optparse import OptionParser from optparse import SUPPRESS_HELP from lib.core.common import checkDeprecatedOptions from lib.core.common import checkSystemEncoding from lib.core.common import dataToStdout from lib.core.common import expandMnemonics from lib.core.common import getUnicode from lib.core.data import cmdLineOptions from lib.core.data import conf from lib.core.data import logger from lib.core.defaults import defaults from lib.core.enums import AUTOCOMPLETE_TYPE from lib.core.exception import SqlmapShellQuitException from lib.core.exception import SqlmapSyntaxException from lib.core.settings import BASIC_HELP_ITEMS from lib.core.settings import DUMMY_URL from lib.core.settings import IS_WIN from lib.core.settings import MAX_HELP_OPTION_LENGTH from lib.core.settings import VERSION_STRING from lib.core.shell import autoCompletion from lib.core.shell import clearHistory from lib.core.shell import loadHistory from lib.core.shell import saveHistory def cmdLineParser(argv=None): """ This function parses the command line parameters and arguments """ if not argv: argv = sys.argv checkSystemEncoding() # Reference: https://stackoverflow.com/a/4012683 (Note: previously used "...sys.getfilesystemencoding() or UNICODE_ENCODING") _ = getUnicode(os.path.basename(argv[0]), encoding=sys.stdin.encoding) usage = "%s%s [options]" % ("python " if not IS_WIN else "", \ "\"%s\"" % _ if " " in _ else _) parser = OptionParser(usage=usage) try: parser.add_option("--hh", dest="advancedHelp", action="store_true", help="Show advanced help message and exit") parser.add_option("--version", dest="showVersion", action="store_true", help="Show program's version number and exit") parser.add_option("-v", dest="verbose", type="int", help="Verbosity level: 0-6 (default %d)" % defaults.verbose) # Target options target = OptionGroup(parser, "Target", "At least one of these " "options has to be provided to define the target(s)") target.add_option("-d", dest="direct", help="Connection string " "for direct database connection") target.add_option("-u", "--url", dest="url", help="Target URL (e.g. \"http://www.site.com/vuln.php?id=1\")") target.add_option("-l", dest="logFile", help="Parse target(s) from Burp " "or WebScarab proxy log file") target.add_option("-x", dest="sitemapUrl", help="Parse target(s) from remote sitemap(.xml) file") target.add_option("-m", dest="bulkFile", help="Scan multiple targets given " "in a textual file ") target.add_option("-r", dest="requestFile", help="Load HTTP request from a file") target.add_option("-g", dest="googleDork", help="Process Google dork results as target URLs") target.add_option("-c", dest="configFile", help="Load options from a configuration INI file") # Request options request = OptionGroup(parser, "Request", "These options can be used " "to specify how to connect to the target URL") request.add_option("--method", dest="method", help="Force usage of given HTTP method (e.g. PUT)") request.add_option("--data", dest="data", help="Data string to be sent through POST") request.add_option("--param-del", dest="paramDel", help="Character used for splitting parameter values") request.add_option("--cookie", dest="cookie", help="HTTP Cookie header value") request.add_option("--cookie-del", dest="cookieDel", help="Character used for splitting cookie values") request.add_option("--load-cookies", dest="loadCookies", help="File containing cookies in Netscape/wget format") request.add_option("--drop-set-cookie", dest="dropSetCookie", action="store_true", help="Ignore Set-Cookie header from response") request.add_option("--user-agent", dest="agent", help="HTTP User-Agent header value") request.add_option("--random-agent", dest="randomAgent", action="store_true", help="Use randomly selected HTTP User-Agent header value") request.add_option("--host", dest="host", help="HTTP Host header value") request.add_option("--referer", dest="referer", help="HTTP Referer header value") request.add_option("-H", "--header", dest="header", help="Extra header (e.g. \"X-Forwarded-For: 127.0.0.1\")") request.add_option("--headers", dest="headers", help="Extra headers (e.g. \"Accept-Language: fr\\nETag: 123\")") request.add_option("--auth-type", dest="authType", help="HTTP authentication type " "(Basic, Digest, NTLM or PKI)") request.add_option("--auth-cred", dest="authCred", help="HTTP authentication credentials " "(name:password)") request.add_option("--auth-file", dest="authFile", help="HTTP authentication PEM cert/private key file") request.add_option("--ignore-401", dest="ignore401", action="store_true", help="Ignore HTTP Error 401 (Unauthorized)") request.add_option("--ignore-proxy", dest="ignoreProxy", action="store_true", help="Ignore system default proxy settings") request.add_option("--ignore-redirects", dest="ignoreRedirects", action="store_true", help="Ignore redirection attempts") request.add_option("--ignore-timeouts", dest="ignoreTimeouts", action="store_true", help="Ignore connection timeouts") request.add_option("--proxy", dest="proxy", help="Use a proxy to connect to the target URL") request.add_option("--proxy-cred", dest="proxyCred", help="Proxy authentication credentials " "(name:password)") request.add_option("--proxy-file", dest="proxyFile", help="Load proxy list from a file") request.add_option("--tor", dest="tor", action="store_true", help="Use Tor anonymity network") request.add_option("--tor-port", dest="torPort", help="Set Tor proxy port other than default") request.add_option("--tor-type", dest="torType", help="Set Tor proxy type (HTTP, SOCKS4 or SOCKS5 (default))") request.add_option("--check-tor", dest="checkTor", action="store_true", help="Check to see if Tor is used properly") request.add_option("--delay", dest="delay", type="float", help="Delay in seconds between each HTTP request") request.add_option("--timeout", dest="timeout", type="float", help="Seconds to wait before timeout connection " "(default %d)" % defaults.timeout) request.add_option("--retries", dest="retries", type="int", help="Retries when the connection timeouts " "(default %d)" % defaults.retries) request.add_option("--randomize", dest="rParam", help="Randomly change value for given parameter(s)") request.add_option("--safe-url", dest="safeUrl", help="URL address to visit frequently during testing") request.add_option("--safe-post", dest="safePost", help="POST data to send to a safe URL") request.add_option("--safe-req", dest="safeReqFile", help="Load safe HTTP request from a file") request.add_option("--safe-freq", dest="safeFreq", type="int", help="Test requests between two visits to a given safe URL") request.add_option("--skip-urlencode", dest="skipUrlEncode", action="store_true", help="Skip URL encoding of payload data") request.add_option("--csrf-token", dest="csrfToken", help="Parameter used to hold anti-CSRF token") request.add_option("--csrf-url", dest="csrfUrl", help="URL address to visit to extract anti-CSRF token") request.add_option("--force-ssl", dest="forceSSL", action="store_true", help="Force usage of SSL/HTTPS") request.add_option("--hpp", dest="hpp", action="store_true", help="Use HTTP parameter pollution method") request.add_option("--eval", dest="evalCode", help="Evaluate provided Python code before the request (e.g. \"import hashlib;id2=hashlib.md5(id).hexdigest()\")") # Optimization options optimization = OptionGroup(parser, "Optimization", "These " "options can be used to optimize the " "performance of sqlmap") optimization.add_option("-o", dest="optimize", action="store_true", help="Turn on all optimization switches") optimization.add_option("--predict-output", dest="predictOutput", action="store_true", help="Predict common queries output") optimization.add_option("--keep-alive", dest="keepAlive", action="store_true", help="Use persistent HTTP(s) connections") optimization.add_option("--null-connection", dest="nullConnection", action="store_true", help="Retrieve page length without actual HTTP response body") optimization.add_option("--threads", dest="threads", type="int", help="Max number of concurrent HTTP(s) " "requests (default %d)" % defaults.threads) # Injection options injection = OptionGroup(parser, "Injection", "These options can be " "used to specify which parameters to test " "for, provide custom injection payloads and " "optional tampering scripts") injection.add_option("-p", dest="testParameter", help="Testable parameter(s)") injection.add_option("--skip", dest="skip", help="Skip testing for given parameter(s)") injection.add_option("--skip-static", dest="skipStatic", action="store_true", help="Skip testing parameters that not appear to be dynamic") injection.add_option("--param-exclude", dest="paramExclude", help="Regexp to exclude parameters from testing (e.g. \"ses\")") injection.add_option("--dbms", dest="dbms", help="Force back-end DBMS to this value") injection.add_option("--dbms-cred", dest="dbmsCred", help="DBMS authentication credentials (user:password)") injection.add_option("--os", dest="os", help="Force back-end DBMS operating system " "to this value") injection.add_option("--invalid-bignum", dest="invalidBignum", action="store_true", help="Use big numbers for invalidating values") injection.add_option("--invalid-logical", dest="invalidLogical", action="store_true", help="Use logical operations for invalidating values") injection.add_option("--invalid-string", dest="invalidString", action="store_true", help="Use random strings for invalidating values") injection.add_option("--no-cast", dest="noCast", action="store_true", help="Turn off payload casting mechanism") injection.add_option("--no-escape", dest="noEscape", action="store_true", help="Turn off string escaping mechanism") injection.add_option("--prefix", dest="prefix", help="Injection payload prefix string") injection.add_option("--suffix", dest="suffix", help="Injection payload suffix string") injection.add_option("--tamper", dest="tamper", help="Use given script(s) for tampering injection data") # Detection options detection = OptionGroup(parser, "Detection", "These options can be " "used to customize the detection phase") detection.add_option("--level", dest="level", type="int", help="Level of tests to perform (1-5, " "default %d)" % defaults.level) detection.add_option("--risk", dest="risk", type="int", help="Risk of tests to perform (1-3, " "default %d)" % defaults.risk) detection.add_option("--string", dest="string", help="String to match when " "query is evaluated to True") detection.add_option("--not-string", dest="notString", help="String to match when " "query is evaluated to False") detection.add_option("--regexp", dest="regexp", help="Regexp to match when " "query is evaluated to True") detection.add_option("--code", dest="code", type="int", help="HTTP code to match when " "query is evaluated to True") detection.add_option("--text-only", dest="textOnly", action="store_true", help="Compare pages based only on the textual content") detection.add_option("--titles", dest="titles", action="store_true", help="Compare pages based only on their titles") # Techniques options techniques = OptionGroup(parser, "Techniques", "These options can be " "used to tweak testing of specific SQL " "injection techniques") techniques.add_option("--technique", dest="tech", help="SQL injection techniques to use " "(default \"%s\")" % defaults.tech) techniques.add_option("--time-sec", dest="timeSec", type="int", help="Seconds to delay the DBMS response " "(default %d)" % defaults.timeSec) techniques.add_option("--union-cols", dest="uCols", help="Range of columns to test for UNION query SQL injection") techniques.add_option("--union-char", dest="uChar", help="Character to use for bruteforcing number of columns") techniques.add_option("--union-from", dest="uFrom", help="Table to use in FROM part of UNION query SQL injection") techniques.add_option("--dns-domain", dest="dnsDomain", help="Domain name used for DNS exfiltration attack") techniques.add_option("--second-order", dest="secondOrder", help="Resulting page URL searched for second-order " "response") # Fingerprint options fingerprint = OptionGroup(parser, "Fingerprint") fingerprint.add_option("-f", "--fingerprint", dest="extensiveFp", action="store_true", help="Perform an extensive DBMS version fingerprint") # Enumeration options enumeration = OptionGroup(parser, "Enumeration", "These options can " "be used to enumerate the back-end database " "management system information, structure " "and data contained in the tables. Moreover " "you can run your own SQL statements") enumeration.add_option("-a", "--all", dest="getAll", action="store_true", help="Retrieve everything") enumeration.add_option("-b", "--banner", dest="getBanner", action="store_true", help="Retrieve DBMS banner") enumeration.add_option("--current-user", dest="getCurrentUser", action="store_true", help="Retrieve DBMS current user") enumeration.add_option("--current-db", dest="getCurrentDb", action="store_true", help="Retrieve DBMS current database") enumeration.add_option("--hostname", dest="getHostname", action="store_true", help="Retrieve DBMS server hostname") enumeration.add_option("--is-dba", dest="isDba", action="store_true", help="Detect if the DBMS current user is DBA") enumeration.add_option("--users", dest="getUsers", action="store_true", help="Enumerate DBMS users") enumeration.add_option("--passwords", dest="getPasswordHashes", action="store_true", help="Enumerate DBMS users password hashes") enumeration.add_option("--privileges", dest="getPrivileges", action="store_true", help="Enumerate DBMS users privileges") enumeration.add_option("--roles", dest="getRoles", action="store_true", help="Enumerate DBMS users roles") enumeration.add_option("--dbs", dest="getDbs", action="store_true", help="Enumerate DBMS databases") enumeration.add_option("--tables", dest="getTables", action="store_true", help="Enumerate DBMS database tables") enumeration.add_option("--columns", dest="getColumns", action="store_true", help="Enumerate DBMS database table columns") enumeration.add_option("--schema", dest="getSchema", action="store_true", help="Enumerate DBMS schema") enumeration.add_option("--count", dest="getCount", action="store_true", help="Retrieve number of entries for table(s)") enumeration.add_option("--dump", dest="dumpTable", action="store_true", help="Dump DBMS database table entries") enumeration.add_option("--dump-all", dest="dumpAll", action="store_true", help="Dump all DBMS databases tables entries") enumeration.add_option("--search", dest="search", action="store_true", help="Search column(s), table(s) and/or database name(s)") enumeration.add_option("--comments", dest="getComments", action="store_true", help="Retrieve DBMS comments") enumeration.add_option("-D", dest="db", help="DBMS database to enumerate") enumeration.add_option("-T", dest="tbl", help="DBMS database table(s) to enumerate") enumeration.add_option("-C", dest="col", help="DBMS database table column(s) to enumerate") enumeration.add_option("-X", dest="excludeCol", help="DBMS database table column(s) to not enumerate") enumeration.add_option("-U", dest="user", help="DBMS user to enumerate") enumeration.add_option("--exclude-sysdbs", dest="excludeSysDbs", action="store_true", help="Exclude DBMS system databases when " "enumerating tables") enumeration.add_option("--pivot-column", dest="pivotColumn", help="Pivot column name") enumeration.add_option("--where", dest="dumpWhere", help="Use WHERE condition while table dumping") enumeration.add_option("--start", dest="limitStart", type="int", help="First dump table entry to retrieve") enumeration.add_option("--stop", dest="limitStop", type="int", help="Last dump table entry to retrieve") enumeration.add_option("--first", dest="firstChar", type="int", help="First query output word character to retrieve") enumeration.add_option("--last", dest="lastChar", type="int", help="Last query output word character to retrieve") enumeration.add_option("--sql-query", dest="query", help="SQL statement to be executed") enumeration.add_option("--sql-shell", dest="sqlShell", action="store_true", help="Prompt for an interactive SQL shell") enumeration.add_option("--sql-file", dest="sqlFile", help="Execute SQL statements from given file(s)") # Brute force options brute = OptionGroup(parser, "Brute force", "These " "options can be used to run brute force " "checks") brute.add_option("--common-tables", dest="commonTables", action="store_true", help="Check existence of common tables") brute.add_option("--common-columns", dest="commonColumns", action="store_true", help="Check existence of common columns") # User-defined function options udf = OptionGroup(parser, "User-defined function injection", "These " "options can be used to create custom user-defined " "functions") udf.add_option("--udf-inject", dest="udfInject", action="store_true", help="Inject custom user-defined functions") udf.add_option("--shared-lib", dest="shLib", help="Local path of the shared library") # File system options filesystem = OptionGroup(parser, "File system access", "These options " "can be used to access the back-end database " "management system underlying file system") filesystem.add_option("--file-read", dest="rFile", help="Read a file from the back-end DBMS " "file system") filesystem.add_option("--file-write", dest="wFile", help="Write a local file on the back-end " "DBMS file system") filesystem.add_option("--file-dest", dest="dFile", help="Back-end DBMS absolute filepath to " "write to") # Takeover options takeover = OptionGroup(parser, "Operating system access", "These " "options can be used to access the back-end " "database management system underlying " "operating system") takeover.add_option("--os-cmd", dest="osCmd", help="Execute an operating system command") takeover.add_option("--os-shell", dest="osShell", action="store_true", help="Prompt for an interactive operating " "system shell") takeover.add_option("--os-pwn", dest="osPwn", action="store_true", help="Prompt for an OOB shell, " "Meterpreter or VNC") takeover.add_option("--os-smbrelay", dest="osSmb", action="store_true", help="One click prompt for an OOB shell, " "Meterpreter or VNC") takeover.add_option("--os-bof", dest="osBof", action="store_true", help="Stored procedure buffer overflow " "exploitation") takeover.add_option("--priv-esc", dest="privEsc", action="store_true", help="Database process user privilege escalation") takeover.add_option("--msf-path", dest="msfPath", help="Local path where Metasploit Framework " "is installed") takeover.add_option("--tmp-path", dest="tmpPath", help="Remote absolute path of temporary files " "directory") # Windows registry options windows = OptionGroup(parser, "Windows registry access", "These " "options can be used to access the back-end " "database management system Windows " "registry") windows.add_option("--reg-read", dest="regRead", action="store_true", help="Read a Windows registry key value") windows.add_option("--reg-add", dest="regAdd", action="store_true", help="Write a Windows registry key value data") windows.add_option("--reg-del", dest="regDel", action="store_true", help="Delete a Windows registry key value") windows.add_option("--reg-key", dest="regKey", help="Windows registry key") windows.add_option("--reg-value", dest="regVal", help="Windows registry key value") windows.add_option("--reg-data", dest="regData", help="Windows registry key value data") windows.add_option("--reg-type", dest="regType", help="Windows registry key value type") # General options general = OptionGroup(parser, "General", "These options can be used " "to set some general working parameters") general.add_option("-s", dest="sessionFile", help="Load session from a stored (.sqlite) file") general.add_option("-t", dest="trafficFile", help="Log all HTTP traffic into a " "textual file") general.add_option("--batch", dest="batch", action="store_true", help="Never ask for user input, use the default behaviour") general.add_option("--binary-fields", dest="binaryFields", help="Result fields having binary values (e.g. \"digest\")") general.add_option("--charset", dest="charset", help="Force character encoding used for data retrieval") general.add_option("--check-internet", dest="checkInternet", action="store_true", help="Check Internet connection before assessing the target") general.add_option("--crawl", dest="crawlDepth", type="int", help="Crawl the website starting from the target URL") general.add_option("--crawl-exclude", dest="crawlExclude", help="Regexp to exclude pages from crawling (e.g. \"logout\")") general.add_option("--csv-del", dest="csvDel", help="Delimiting character used in CSV output " "(default \"%s\")" % defaults.csvDel) general.add_option("--dump-format", dest="dumpFormat", help="Format of dumped data (CSV (default), HTML or SQLITE)") general.add_option("--eta", dest="eta", action="store_true", help="Display for each output the estimated time of arrival") general.add_option("--flush-session", dest="flushSession", action="store_true", help="Flush session files for current target") general.add_option("--forms", dest="forms", action="store_true", help="Parse and test forms on target URL") general.add_option("--fresh-queries", dest="freshQueries", action="store_true", help="Ignore query results stored in session file") general.add_option("--har", dest="harFile", help="Log all HTTP traffic into a HAR file") general.add_option("--hex", dest="hexConvert", action="store_true", help="Use DBMS hex function(s) for data retrieval") general.add_option("--output-dir", dest="outputDir", action="store", help="Custom output directory path") general.add_option("--parse-errors", dest="parseErrors", action="store_true", help="Parse and display DBMS error messages from responses") general.add_option("--save", dest="saveConfig", help="Save options to a configuration INI file") general.add_option("--scope", dest="scope", help="Regexp to filter targets from provided proxy log") general.add_option("--test-filter", dest="testFilter", help="Select tests by payloads and/or titles (e.g. ROW)") general.add_option("--test-skip", dest="testSkip", help="Skip tests by payloads and/or titles (e.g. BENCHMARK)") general.add_option("--update", dest="updateAll", action="store_true", help="Update sqlmap") # Miscellaneous options miscellaneous = OptionGroup(parser, "Miscellaneous") miscellaneous.add_option("-z", dest="mnemonics", help="Use short mnemonics (e.g. \"flu,bat,ban,tec=EU\")") miscellaneous.add_option("--alert", dest="alert", help="Run host OS command(s) when SQL injection is found") miscellaneous.add_option("--answers", dest="answers", help="Set question answers (e.g. \"quit=N,follow=N\")") miscellaneous.add_option("--beep", dest="beep", action="store_true", help="Beep on question and/or when SQL injection is found") miscellaneous.add_option("--cleanup", dest="cleanup", action="store_true", help="Clean up the DBMS from sqlmap specific " "UDF and tables") miscellaneous.add_option("--dependencies", dest="dependencies", action="store_true", help="Check for missing (non-core) sqlmap dependencies") miscellaneous.add_option("--disable-coloring", dest="disableColoring", action="store_true", help="Disable console output coloring") miscellaneous.add_option("--gpage", dest="googlePage", type="int", help="Use Google dork results from specified page number") miscellaneous.add_option("--identify-waf", dest="identifyWaf", action="store_true", help="Make a thorough testing for a WAF/IPS/IDS protection") miscellaneous.add_option("--mobile", dest="mobile", action="store_true", help="Imitate smartphone through HTTP User-Agent header") miscellaneous.add_option("--offline", dest="offline", action="store_true", help="Work in offline mode (only use session data)") miscellaneous.add_option("--purge-output", dest="purgeOutput", action="store_true", help="Safely remove all content from output directory") miscellaneous.add_option("--skip-waf", dest="skipWaf", action="store_true", help="Skip heuristic detection of WAF/IPS/IDS protection") miscellaneous.add_option("--smart", dest="smart", action="store_true", help="Conduct thorough tests only if positive heuristic(s)") miscellaneous.add_option("--sqlmap-shell", dest="sqlmapShell", action="store_true", help="Prompt for an interactive sqlmap shell") miscellaneous.add_option("--tmp-dir", dest="tmpDir", help="Local directory for storing temporary files") miscellaneous.add_option("--web-root", dest="webRoot", help="Web server document root directory (e.g. \"/var/www\")") miscellaneous.add_option("--wizard", dest="wizard", action="store_true", help="Simple wizard interface for beginner users") # Hidden and/or experimental options parser.add_option("--dummy", dest="dummy", action="store_true", help=SUPPRESS_HELP) parser.add_option("--murphy-rate", dest="murphyRate", type="int", help=SUPPRESS_HELP) parser.add_option("--disable-precon", dest="disablePrecon", action="store_true", help=SUPPRESS_HELP) parser.add_option("--disable-stats", dest="disableStats", action="store_true", help=SUPPRESS_HELP) parser.add_option("--profile", dest="profile", action="store_true", help=SUPPRESS_HELP) parser.add_option("--force-dns", dest="forceDns", action="store_true", help=SUPPRESS_HELP) parser.add_option("--force-threads", dest="forceThreads", action="store_true", help=SUPPRESS_HELP) parser.add_option("--smoke-test", dest="smokeTest", action="store_true", help=SUPPRESS_HELP) parser.add_option("--live-test", dest="liveTest", action="store_true", help=SUPPRESS_HELP) parser.add_option("--stop-fail", dest="stopFail", action="store_true", help=SUPPRESS_HELP) parser.add_option("--run-case", dest="runCase", help=SUPPRESS_HELP) # API options parser.add_option("--api", dest="api", action="store_true", help=SUPPRESS_HELP) parser.add_option("--taskid", dest="taskid", help=SUPPRESS_HELP) parser.add_option("--database", dest="database", help=SUPPRESS_HELP) parser.add_option_group(target) parser.add_option_group(request) parser.add_option_group(optimization) parser.add_option_group(injection) parser.add_option_group(detection) parser.add_option_group(techniques) parser.add_option_group(fingerprint) parser.add_option_group(enumeration) parser.add_option_group(brute) parser.add_option_group(udf) parser.add_option_group(filesystem) parser.add_option_group(takeover) parser.add_option_group(windows) parser.add_option_group(general) parser.add_option_group(miscellaneous) # Dirty hack to display longer options without breaking into two lines def _(self, *args): retVal = parser.formatter._format_option_strings(*args) if len(retVal) > MAX_HELP_OPTION_LENGTH: retVal = ("%%.%ds.." % (MAX_HELP_OPTION_LENGTH - parser.formatter.indent_increment)) % retVal return retVal parser.formatter._format_option_strings = parser.formatter.format_option_strings parser.formatter.format_option_strings = type(parser.formatter.format_option_strings)(_, parser, type(parser)) # Dirty hack for making a short option '-hh' option = parser.get_option("--hh") option._short_opts = ["-hh"] option._long_opts = [] # Dirty hack for inherent help message of switch '-h' option = parser.get_option("-h") option.help = option.help.capitalize().replace("this help", "basic help") _ = [] prompt = False advancedHelp = True extraHeaders = [] # Reference: https://stackoverflow.com/a/4012683 (Note: previously used "...sys.getfilesystemencoding() or UNICODE_ENCODING") for arg in argv: _.append(getUnicode(arg, encoding=sys.stdin.encoding)) argv = _ checkDeprecatedOptions(argv) prompt = "--sqlmap-shell" in argv if prompt: parser.usage = "" cmdLineOptions.sqlmapShell = True _ = ["x", "q", "exit", "quit", "clear"] for option in parser.option_list: _.extend(option._long_opts) _.extend(option._short_opts) for group in parser.option_groups: for option in group.option_list: _.extend(option._long_opts) _.extend(option._short_opts) autoCompletion(AUTOCOMPLETE_TYPE.SQLMAP, commands=_) while True: command = None try: command = raw_input("sqlmap-shell> ").strip() command = getUnicode(command, encoding=sys.stdin.encoding) except (KeyboardInterrupt, EOFError): print raise SqlmapShellQuitException if not command: continue elif command.lower() == "clear": clearHistory() dataToStdout("[i] history cleared\n") saveHistory(AUTOCOMPLETE_TYPE.SQLMAP) elif command.lower() in ("x", "q", "exit", "quit"): raise SqlmapShellQuitException elif command[0] != '-': dataToStdout("[!] invalid option(s) provided\n") dataToStdout("[i] proper example: '-u http://www.site.com/vuln.php?id=1 --banner'\n") else: saveHistory(AUTOCOMPLETE_TYPE.SQLMAP) loadHistory(AUTOCOMPLETE_TYPE.SQLMAP) break try: for arg in shlex.split(command): argv.append(getUnicode(arg, encoding=sys.stdin.encoding)) except ValueError, ex: raise SqlmapSyntaxException, "something went wrong during command line parsing ('%s')" % ex.message for i in xrange(len(argv)): if argv[i] == "-hh": argv[i] = "-h" elif len(argv[i]) > 1 and all(ord(_) in xrange(0x2018, 0x2020) for _ in ((argv[i].split('=', 1)[-1].strip() or ' ')[0], argv[i][-1])): dataToStdout("[!] copy-pasting illegal (non-console) quote characters from Internet is, well, illegal (%s)\n" % argv[i]) raise SystemExit elif len(argv[i]) > 1 and u"\uff0c" in argv[i].split('=', 1)[-1]: dataToStdout("[!] copy-pasting illegal (non-console) comma characters from Internet is, well, illegal (%s)\n" % argv[i]) raise SystemExit elif re.search(r"\A-\w=.+", argv[i]): dataToStdout("[!] potentially miswritten (illegal '=') short option detected ('%s')\n" % argv[i]) raise SystemExit elif argv[i] == "-H": if i + 1 < len(argv): extraHeaders.append(argv[i + 1]) elif re.match(r"\A\d+!\Z", argv[i]) and argv[max(0, i - 1)] == "--threads" or re.match(r"\A--threads.+\d+!\Z", argv[i]): argv[i] = argv[i][:-1] conf.skipThreadCheck = True elif argv[i] == "--version": print VERSION_STRING.split('/')[-1] raise SystemExit elif argv[i] in ("-h", "--help"): advancedHelp = False for group in parser.option_groups[:]: found = False for option in group.option_list: if option.dest not in BASIC_HELP_ITEMS: option.help = SUPPRESS_HELP else: found = True if not found: parser.option_groups.remove(group) for verbosity in (_ for _ in argv if re.search(r"\A\-v+\Z", _)): try: if argv.index(verbosity) == len(argv) - 1 or not argv[argv.index(verbosity) + 1].isdigit(): conf.verbose = verbosity.count('v') + 1 del argv[argv.index(verbosity)] except (IndexError, ValueError): pass try: (args, _) = parser.parse_args(argv) except UnicodeEncodeError, ex: dataToStdout("\n[!] %s\n" % ex.object.encode("unicode-escape")) raise SystemExit except SystemExit: if "-h" in argv and not advancedHelp: dataToStdout("\n[!] to see full list of options run with '-hh'\n") raise if extraHeaders: if not args.headers: args.headers = "" delimiter = "\\n" if "\\n" in args.headers else "\n" args.headers += delimiter + delimiter.join(extraHeaders) # Expand given mnemonic options (e.g. -z "ign,flu,bat") for i in xrange(len(argv) - 1): if argv[i] == "-z": expandMnemonics(argv[i + 1], parser, args) if args.dummy: args.url = args.url or DUMMY_URL if not any((args.direct, args.url, args.logFile, args.bulkFile, args.googleDork, args.configFile, \ args.requestFile, args.updateAll, args.smokeTest, args.liveTest, args.wizard, args.dependencies, \ args.purgeOutput, args.sitemapUrl)): errMsg = "missing a mandatory option (-d, -u, -l, -m, -r, -g, -c, -x, --wizard, --update, --purge-output or --dependencies), " errMsg += "use -h for basic or -hh for advanced help\n" parser.error(errMsg) return args except (OptionError, TypeError), e: parser.error(e) except SystemExit: # Protection against Windows dummy double clicking if IS_WIN: dataToStdout("\nPress Enter to continue...") raw_input() raise debugMsg = "parsing command line" logger.debug(debugMsg)
gpl-3.0
8,767,127,037,642,698,000
44.609265
146
0.520203
false
4.882492
true
false
false
Amoki/Amoki-Music
endpoints/tests/test_musics.py
1
1477
from utils.testcase import EndpointTestCase from rest_framework import status from music.models import Music from player.models import Room import sure class TestMusics(EndpointTestCase): def test_get(self): # Create a classic music that should be sent by /musics Music( music_id="a", name="a", thumbnail="https://a.com", total_duration=114, duration=114, url="https://www.a.com", source="youtube", room=self.r, ).save() # Create new room and a new music that should not be sent by /musics r2 = Room(name='b', password='b') r2.save() Music( music_id="a", name="a", thumbnail="https://a.com", total_duration=114, duration=114, url="https://www.a.com", source="youtube", room=r2, ).save() response = self.client.get('/musics') response.status_code.should.eql(status.HTTP_200_OK) response.data.should.have.key('count') response.data.should.have.key('next') response.data.should.have.key('previous') response.data.should.have.key('results') response.data['results'].should.be.a(list) response.data['results'].should.have.length_of(1) self.assertResponseEqualsMusic(response.data['results'][0], Music.objects.get(music_id='a', room=self.r))
mit
-8,897,600,273,422,250,000
29.142857
113
0.572106
false
3.836364
false
false
false
Tasignotas/topographica_mirror
topo/base/cf.py
1
47216
""" ConnectionField and associated classes. This module defines some basic classes of objects used to create simulations of cortical sheets that take input through connection fields that project from other cortical sheets (or laterally from themselves). ConnectionField: Holds a single connection field within a CFProjection. CFProjection: A set of ConnectionFields mapping from a Sheet into a ProjectionSheet. CFSheet: A subclass of ProjectionSheet that provides an interface to the underlying ConnectionFields in any projection of type CFProjection. """ from copy import copy import numpy as np import param from holoviews import Grid, Dimension, ViewMap from holoviews.interface.collector import AttrTree, AttrDict from holoviews.core import BoundingBox, BoundingRegionParameter, Slice import patterngenerator from patterngenerator import PatternGenerator from functionfamily import TransferFn,IdentityTF from functionfamily import LearningFn,Hebbian,IdentityLF from functionfamily import ResponseFn,DotProduct from functionfamily import CoordinateMapperFn,IdentityMF from projection import Projection,ProjectionSheet from sheetview import CFView def simple_vectorize(fn,num_outputs=1,output_type=object,doc=''): """ Wrapper for Numpy.vectorize to make it work properly with different Numpy versions. """ # Numpy.vectorize returns a callable object that applies the given # fn to a list or array. By default, Numpy.vectorize will call # the supplied fn an extra time to determine the output types, # which is a big problem for any function with side effects. # Supplying arguments is supposed to avoid the problem, but as of # Numpy 1.6.1 (and apparently since at least 1.1.1) this feature # was broken: # # $ ./topographica -c "def f(x): print x" -c "import numpy" -c "numpy.vectorize(f,otypes=numpy.sctype2char(object)*1)([3,4])" # 3 # 3 # 4 # # Numpy 1.7.0 seems to fix the problem: # $ ./topographica -c "def f(x): print x" -c "import numpy" -c "numpy.vectorize(f,otypes=numpy.sctype2char(object)*1)([3,4])" # 3 # 4 # # To make it work with all versions of Numpy, we use # numpy.vectorize as-is for versions > 1.7.0, and a nasty hack for # previous versions. # Simple Numpy 1.7.0 version: if int(np.version.version[0]) >= 1 and int(np.version.version[2]) >= 7: return np.vectorize(fn,otypes=np.sctype2char(output_type)*num_outputs, doc=doc) # Otherwise, we have to mess with Numpy's internal data structures to make it work. vfn = np.vectorize(fn,doc=doc) vfn.nout=num_outputs # number of outputs of fn output_typecode = np.sctype2char(output_type) vfn.otypes=output_typecode*num_outputs # typecodes of outputs of fn import inspect try: fn_code = fn.func_code if hasattr(fn,'func_code') else fn.__call__.func_code except: raise TypeError("Couldn't find code of %s"%fn) fn_args = inspect.getargs(fn_code)[0] extra = 1 if fn_args[0]=='self' else 0 vfn.lastcallargs=len(fn_args)-extra # num args of fn return vfn #: Specified explicitly when creating weights matrix - required #: for optimized C functions. weight_type = np.float32 class NullCFError(ValueError): """ Error thrown when trying to create an empty CF. """ def __init__(self,x,y,input,rows,cols): ValueError.__init__(self,"ConnectionField at (%s,%s) (input_sheet=%s) has a zero-sized weights matrix (%s,%s); you may need to supply a larger bounds_template or increase the density of the sheet."%(x,y,input,rows,cols)) class ConnectionField(object): """ A set of weights on one input Sheet. Each ConnectionField contributes to the activity of one unit on the output sheet, and is normally used as part of a Projection including many other ConnectionFields. """ __slots__ = ['weights','input_sheet_slice','mask', '_has_norm_total','_norm_total'] def __get_norm_total(self): """ Return the stored norm_value, if any, or else the current sum of the weights. See the norm_total property for more details. """ # The actual value is cached in _norm_total. if self._has_norm_total[0]>0: return self._norm_total[0] else: # CEBALERT: what was I playing with for this before? return np.sum(np.abs(self.weights),dtype=np.float64) def __set_norm_total(self,new_norm_total): """ Set an explicit value to be returned by norm_total. See the norm_total property for more details. """ self._has_norm_total[0] = 1 self._norm_total[0] = new_norm_total def __del_norm_total(self): """ Delete any cached norm_total that may have been set. See the norm_total property for more details. """ self._has_norm_total[0] = 0 # CB: Accessing norm_total as a property from the C code takes # about 2% of run time for 90 iterations of lissom_oo_or. (As of # r8139, using floating-point simulation time.) norm_total = property(__get_norm_total,__set_norm_total,__del_norm_total, """ The norm_total property returns a value useful in computing a sum-based weight normalization. By default, the value returned is simply the current sum of the connection weights. However, another value can be substituted by setting norm_total explicitly, and this cached value will then be returned instead. This mechanism has two main purposes. First, it allows a learning function to cache the sum value for an output function to use later without computation, which can result in significant time savings. Second, the extra level of indirection allows the sum value to be manipulated before it is used, to implement operations like joint normalization across corresponding CFs in multiple Projections. Apart from such cases, norm_total can be ignored. Note that every person who uses a class that sets or gets norm_total must be very careful to ensure that stale values will never be accessed. A good way to do this is to make sure that the value is only set just before it will be used, and deleted as soon as it has been accessed. WARNING: Any c-optimized code can bypass this property and access directly _has_norm_total, _norm_total """) def get_bounds(self,input_sheet): return self.input_sheet_slice.compute_bounds(input_sheet) # Class attribute to switch to legacy weight generation if False independent_weight_generation = True # CEBALERT: # template and mask: usually created ONCE by CFProjection and # specified as a Slice and array (respectively). Otherwise, # can be specified as BoundingBox and patterngenerator. # Note that BoundingBox() is ok for a default even though it's # mutable because we copy it inside init. Constant() is ok too # because mask and weights_generator are not modified. def __init__(self,input_sheet,x=0.0,y=0.0,template=BoundingBox(radius=0.1), weights_generator=patterngenerator.Constant(), mask=patterngenerator.Constant(), output_fns=None,min_matrix_radius=1, label=None): """ Create weights at the specified (x,y) location on the specified input_sheet. The supplied template (if a BoundingRegion) is converted to a Slice, moved to the specified (x,y) location, and then the weights pattern is drawn inside by the weights_generator. Note that if the appropriate template Slice is already known, then it can be passed in instead of a BoundingRegion template. This slice will then be used directly, instead of converting the template into a Slice. The supplied template object itself will not be modified (it is copied before use). The mask allows the weights to be limited to being non-zero in a subset of the rectangular weights area. The actual mask used is a view of the given mask created by cropping to the boundaries of the input_sheet, so that the weights all correspond to actual locations in the input sheet. For instance, if a circular pattern of weights is desired, the mask should have a disk-shaped pattern of elements with value 1, surrounded by elements with the value 0. If the CF extends over the edge of the input sheet then the weights will actually be half-moon (or similar) rather than circular. """ #print "Create CF",input_sheet.name,x,y,"template=",template,"wg=",weights_generator,"m=",mask,"ofs=",output_fns,"min r=",min_matrix_radius template = copy(template) if not isinstance(template,Slice): template = Slice(template,input_sheet,force_odd=True, min_matrix_radius=min_matrix_radius) # Note: if passed in, mask is shared between CFs (but not if created here) if not hasattr(mask,'view'): mask = _create_mask(mask,template.compute_bounds(input_sheet), # CEBALERT: it's not really worth adding more ALERTs on this # topic, but...there's no way for the CF to control autosize # and threshold. input_sheet,True,0.5) # CB: has to be set for C code. Can't be initialized at the # class level, or it would become a read-only class attribute # (because it's a slot: # http://docs.python.org/reference/datamodel.html). Can we # somehow avoid having to think about _has_norm_total in the # python code? Could the C code initialize this value? self._has_norm_total=np.array([0],dtype=np.int32) self._norm_total=np.array([0.0],dtype=np.float64) if output_fns is None: output_fns = [] # CEBALERT: now even more confusing; weights_slice is # different from input_sheet_slice. At least need to rename. weights_slice = self._create_input_sheet_slice(input_sheet,x,y,template,min_matrix_radius) # CBNOTE: this would be clearer (but not perfect, and probably slower) # m = mask_template[self.weights_slice()] self.mask = weights_slice.submatrix(mask) # view of original mask self.mask = np.array(self.mask,copy=1) # CEBALERT: why is this necessary? # (without it, optimized learning function creates artifacts in CFs at # left and right edges of sheet, at some densities) # CBENHANCEMENT: might want to do something about a size # that's specified (right now the size is assumed to be that # of the bounds) # shouldn't be extra computation of boundingbox because it's gone from Slice.__init__; could avoid extra lookups by getting straight from slice pattern_params = dict(x=x,y=y,bounds=self.get_bounds(input_sheet), xdensity=input_sheet.xdensity, ydensity=input_sheet.ydensity, mask=self.mask) controlled_weights = (param.Dynamic.time_dependent and isinstance(param.Dynamic.time_fn, param.Time) and self.independent_weight_generation) if controlled_weights: with param.Dynamic.time_fn as t: t(0) # Initialize weights at time zero. # Controls random streams name = "%s_CF (%.5f, %.5f)" % ('' if label is None else label, x,y) w = weights_generator(**dict(pattern_params, name=name)) else: w = weights_generator(**pattern_params) # CEBALERT: unnecessary copy! Pass type to PG & have it draw # in that. (Should be simple, except making it work for all # the PG subclasses that override array creation in various # ways (producing or using inconsistent types) turned out to # be too painful.) self.weights = w.astype(weight_type) # CEBHACKALERT: the system of masking through multiplication # by 0 works for now, while the output_fns are all # multiplicative. But in the long run we need a better way to # apply the mask. The same applies anywhere the mask is used, # including in learningfn/. We should investigate masked # arrays (from numpy). for of in output_fns: of(self.weights) # CB: can this be renamed to something better? def _create_input_sheet_slice(self,input_sheet,x,y,template,min_matrix_radius): """ Create the input_sheet_slice, which provides the appropriate Slice for this CF on the input_sheet (as well as providing this CF's exact bounds). Also creates the weights_slice, which provides the Slice for this weights matrix (in case it must be cropped at an edge). """ # copy required because the template gets modified here but # needs to be used again input_sheet_slice = copy(template) input_sheet_slice.positionedcrop(x,y,input_sheet) input_sheet_slice.crop_to_sheet(input_sheet) # weights matrix cannot have a zero-sized dimension (could # happen at this stage because of cropping) nrows,ncols = input_sheet_slice.shape_on_sheet() if nrows<1 or ncols<1: raise NullCFError(x,y,input_sheet,nrows,ncols) self.input_sheet_slice = input_sheet_slice # not copied because we don't use again template.positionlesscrop(x,y,input_sheet) return template # CEBALERT: unnecessary method; can use something like # activity[cf.input_sheet_slice()] def get_input_matrix(self, activity): # CBNOTE: again, this might be clearer (but probably slower): # activity[self.input_sheet_slice()] return self.input_sheet_slice.submatrix(activity) class CFPResponseFn(param.Parameterized): """ Map an input activity matrix into an output matrix using the CFs in a CFProjection. Objects in this hierarchy of callable function objects compute a response matrix when given an input pattern and a set of ConnectionField objects. Typically used as part of the activation function for a neuron, computing activation for one Projection. Objects in this class must support being called as a function with the arguments specified below, and are assumed to modify the activity matrix in place. """ __abstract=True def __call__(self, iterator, input_activity, activity, strength, **params): raise NotImplementedError class CFPRF_Plugin(CFPResponseFn): """ Generic large-scale response function based on a simple single-CF function. Applies the single_cf_fn to each CF in turn. For the default single_cf_fn of DotProduct(), does a basic dot product of each CF with the corresponding slice of the input array. This function is likely to be slow to run, but it is easy to extend with any arbitrary single-CF response function. The single_cf_fn must be a function f(X,W) that takes two identically shaped matrices X (the input) and W (the ConnectionField weights) and computes a scalar activation value based on those weights. """ single_cf_fn = param.ClassSelector(ResponseFn,default=DotProduct(), doc="Accepts a ResponseFn that will be applied to each CF individually.") def __call__(self, iterator, input_activity, activity, strength): single_cf_fn = self.single_cf_fn for cf,i in iterator(): X = cf.input_sheet_slice.submatrix(input_activity) activity.flat[i] = single_cf_fn(X,cf.weights) activity *= strength class CFPLearningFn(param.Parameterized): """ Compute new CFs for a CFProjection based on input and output activity values. Objects in this hierarchy of callable function objects compute a new set of CFs when given input and output patterns and a set of ConnectionField objects. Used for updating the weights of one CFProjection. Objects in this class must support being called as a function with the arguments specified below. """ __abstract = True def constant_sum_connection_rate(self,n_units,learning_rate): """ Return the learning rate for a single connection assuming that the total rate is to be divided evenly among all the units in the connection field. """ return float(learning_rate)/n_units # JABALERT: Should the learning_rate be a parameter of this object instead of an argument? def __call__(self, iterator, input_activity, output_activity, learning_rate, **params): """ Apply this learning function to the given set of ConnectionFields, and input and output activities, using the given learning_rate. """ raise NotImplementedError class CFPLF_Identity(CFPLearningFn): """CFLearningFunction performing no learning.""" single_cf_fn = param.ClassSelector(LearningFn,default=IdentityLF(),constant=True) def __call__(self, iterator, input_activity, output_activity, learning_rate, **params): pass class CFPLF_Plugin(CFPLearningFn): """CFPLearningFunction applying the specified single_cf_fn to each CF.""" single_cf_fn = param.ClassSelector(LearningFn,default=Hebbian(), doc="Accepts a LearningFn that will be applied to each CF individually.") def __call__(self, iterator, input_activity, output_activity, learning_rate, **params): """Apply the specified single_cf_fn to every CF.""" single_connection_learning_rate = self.constant_sum_connection_rate(iterator.proj_n_units,learning_rate) # avoid evaluating these references each time in the loop single_cf_fn = self.single_cf_fn for cf,i in iterator(): single_cf_fn(cf.get_input_matrix(input_activity), output_activity.flat[i], cf.weights, single_connection_learning_rate) cf.weights *= cf.mask class CFPOutputFn(param.Parameterized): """ Type for an object that applies some operation (typically something like normalization) to all CFs in a CFProjection for which the specified mask (typically the activity at the destination of this projection) is nonzero. """ __abstract = True def __call__(self, iterator, **params): """Operate on each CF for which the mask is nonzero.""" raise NotImplementedError class CFPOF_Plugin(CFPOutputFn): """ Applies the specified single_cf_fn to each CF in the CFProjection for which the mask is nonzero. """ single_cf_fn = param.ClassSelector(TransferFn,default=IdentityTF(), doc="Accepts a TransferFn that will be applied to each CF individually.") def __call__(self, iterator, **params): if type(self.single_cf_fn) is not IdentityTF: single_cf_fn = self.single_cf_fn for cf,i in iterator(): single_cf_fn(cf.weights) del cf.norm_total class CFPOF_Identity(CFPOutputFn): """ CFPOutputFn that leaves the CFs unchanged. Must never be changed or subclassed, because it might never be called. (I.e., it could simply be tested for and skipped.) """ single_cf_fn = param.ClassSelector(TransferFn,default=IdentityTF(),constant=True) def __call__(self, iterator, **params): pass # CB: need to make usage of 'src' and 'input_sheet' consistent between # ConnectionField and CFProjection (i.e. pick one of them). class CFProjection(Projection): """ A projection composed of ConnectionFields from a Sheet into a ProjectionSheet. CFProjection computes its activity using a response_fn of type CFPResponseFn (typically a CF-aware version of mdot) and output_fns (typically none). The initial contents of the ConnectionFields mapping from the input Sheet into the target ProjectionSheet are controlled by the weights_generator, cf_shape, and weights_output_fn parameters, while the location of the ConnectionField is controlled by the coord_mapper parameter. Any subclass has to implement the interface activate(self,input_activity) that computes the response from the input and stores it in the activity array. """ response_fn = param.ClassSelector(CFPResponseFn, default=CFPRF_Plugin(), doc='Function for computing the Projection response to an input pattern.') cf_type = param.Parameter(default=ConnectionField,constant=True, doc="Type of ConnectionField to use when creating individual CFs.") # JPHACKALERT: Not all support for null CFs has been implemented. # CF plotting and C-optimized CFPxF_ functions need # to be fixed to support null CFs without crashing. allow_null_cfs = param.Boolean(default=False, doc="Whether or not the projection can have entirely empty CFs") nominal_bounds_template = BoundingRegionParameter( default=BoundingBox(radius=0.1),doc=""" Bounds defining the Sheet area covered by a prototypical ConnectionField. The true bounds will differ depending on the density (see create_slice_template()).""") weights_generator = param.ClassSelector(PatternGenerator, default=patterngenerator.Constant(),constant=True, doc="Generate initial weights values.") cf_shape = param.ClassSelector(PatternGenerator, default=patterngenerator.Constant(),constant=True, doc="Mask pattern to define the shape of the connection fields.") same_cf_shape_for_all_cfs = param.Boolean(default=True,doc=""" Whether or not to share a single cf_shape mask for all CFs. If True, the cf_shape is evaluated only once and shared for all CFs, which saves computation time and memory. If False, the cf_shape is evaluated once for each CF, allowing each to have its own shape.""") learning_fn = param.ClassSelector(CFPLearningFn, default=CFPLF_Plugin(), doc='Function for computing changes to the weights based on one activation step.') # JABALERT: Shouldn't learning_rate be owned by the learning_fn? learning_rate = param.Number(default=0.0,softbounds=(0,100),doc=""" Amount of learning at each step for this projection, specified in units that are independent of the density of each Sheet.""") weights_output_fns = param.HookList(default=[CFPOF_Plugin()], class_=CFPOutputFn, doc='Functions applied to each CF after learning.') strength = param.Number(default=1.0,doc=""" Global multiplicative scaling applied to the Activity of this Sheet.""") coord_mapper = param.ClassSelector(CoordinateMapperFn, default=IdentityMF(), doc='Function to map a projected coordinate into the target sheet.') # CEBALERT: this is temporary (allows c++ matching in certain # cases). We will allow the user to override the mask size, but # by offering a scaling parameter. autosize_mask = param.Boolean( default=True,constant=True,precedence=-1,doc=""" Topographica sets the mask size so that it is the same as the connection field's size, unless this parameter is False - in which case the user-specified size of the cf_shape is used. In normal usage of Topographica, this parameter should remain True.""") mask_threshold = param.Number(default=0.5,constant=True,doc=""" If a unit is above this value in the cf_shape mask, it is included; otherwise it is excluded from the mask.""") apply_output_fns_init=param.Boolean(default=True,doc=""" Whether to apply the output function to connection fields (e.g. for normalization) when the CFs are first created.""") min_matrix_radius = param.Integer(default=1,bounds=(0,None),doc=""" Enforced minimum for radius of weights matrix. The default of 1 gives a minimum matrix of 3x3. 0 would allow a 1x1 matrix.""") hash_format = param.String(default="{name}-{src}-{dest}", doc=""" Format string to determine the hash value used to initialize random weight generation. Format keys available include {name} {src} and {dest}.""") precedence = param.Number(default=0.8) def __init__(self,initialize_cfs=True,**params): """ Initialize the Projection with a set of cf_type objects (typically ConnectionFields), each located at the location in the source sheet corresponding to the unit in the target sheet. The cf_type objects are stored in the 'cfs' array. The nominal_bounds_template specified may be altered: the bounds must be fitted to the Sheet's matrix, and the weights matrix must have odd dimensions. These altered bounds are passed to the individual connection fields. A mask for the weights matrix is constructed. The shape is specified by cf_shape; the size defaults to the size of the nominal_bounds_template. """ super(CFProjection,self).__init__(**params) self.weights_generator.set_dynamic_time_fn(None,sublistattr='generators') # get the actual bounds_template by adjusting a copy of the # nominal_bounds_template to ensure an odd slice, and to be # cropped to sheet if necessary self._slice_template = Slice(copy(self.nominal_bounds_template), self.src,force_odd=True, min_matrix_radius=self.min_matrix_radius) self.bounds_template = self._slice_template.compute_bounds(self.src) self.mask_template = _create_mask(self.cf_shape,self.bounds_template, self.src,self.autosize_mask, self.mask_threshold) self.n_units = self._calc_n_units() if initialize_cfs: self._create_cfs() if self.apply_output_fns_init: self.apply_learn_output_fns(active_units_mask=False) ### JCALERT! We might want to change the default value of the ### input value to self.src.activity; but it fails, raising a ### type error. It probably has to be clarified why this is ### happening self.input_buffer = None self.activity = np.array(self.dest.activity) if 'cfs' not in self.dest.views: self.dest.views.CFs = AttrTree() self.dest.views.CFs[self.name] = self._cf_grid() def _cf_grid(self, shape=None, **kwargs): "Create ProjectionGrid with the correct metadata." grid = Grid({}) grid.metadata = AttrDict(timestamp=self.src.simulation.time(), info=self.name, proj_src_name=self.src.name, proj_dest_name=self.dest.name, **kwargs) return grid def _generate_coords(self): X,Y = self.dest.sheetcoords_of_idx_grid() vectorized_coord_mapper = simple_vectorize(self.coord_mapper, num_outputs=2, # CB: could switch to float32? output_type=float) return vectorized_coord_mapper(X,Y) # CB: should be _initialize_cfs() since we already have 'initialize_cfs' flag? def _create_cfs(self): vectorized_create_cf = simple_vectorize(self._create_cf) self.cfs = vectorized_create_cf(*self._generate_coords()) self.flatcfs = list(self.cfs.flat) def _create_cf(self,x,y): """ Create a ConnectionField at x,y in the src sheet. """ # (to restore would need to have an r,c counter) # self.debug("Creating CF(%d,%d) from src (%.3f,%.3f) to dest (%.3f,%.3f)"%(r,c,x_cf,y_cf,x,y)) label = self.hash_format.format(name=self.name, src=self.src.name, dest=self.dest.name) name = "%s_CF (%.5f, %.5f)" % ('' if label is None else label, x,y) try: if self.same_cf_shape_for_all_cfs: mask_template = self.mask_template else: mask_template = _create_mask(self.cf_shape, self.bounds_template, self.src,self.autosize_mask, self.mask_threshold, name=name) CF = self.cf_type(self.src, x=x, y=y, template=self._slice_template, weights_generator=self.weights_generator, mask=mask_template, min_matrix_radius=self.min_matrix_radius, label = label) except NullCFError: if self.allow_null_cfs: CF = None else: raise return CF def _calc_n_units(self): """Return the number of unmasked units in a typical ConnectionField.""" return min(len(self.mask_template.ravel().nonzero()[0]), # CEBALERT: if the mask_template is bigger than the # src sheet (e.g. conn radius bigger than src # radius), return the size of the source sheet self.src.shape[0]*self.src.shape[1]) def cf(self,r,c): """Return the specified ConnectionField""" # CB: should we offer convenience cf(x,y) (i.e. sheetcoords) method instead? self.warning("CFProjection.cf(r,c) is deprecated: use cfs[r,c] instead") return self.cfs[r,c] def cf_bounds(self,r,c): """Return the bounds of the specified ConnectionField.""" return self.cfs[r,c].get_bounds(self.src) def grid(self, rows=11, cols=11, lbrt=None, situated=False, **kwargs): xdensity, ydensity = self.dest.xdensity, self.dest.ydensity l, b, r, t = self.dest.bounds.lbrt() half_x_unit = ((r-l) / xdensity) / 2. half_y_unit = ((t-b) / ydensity) / 2. if lbrt is None: l, b, r, t = (l+half_x_unit, b+half_y_unit, r-half_x_unit, t-half_y_unit) else: l, b = self.dest.closest_cell_center(lbrt[0], lbrt[1]) r, t = self.dest.closest_cell_center(lbrt[2], lbrt[3]) x, y = np.meshgrid(np.linspace(l, r, cols), np.linspace(b, t, rows)) coords = zip(x.flat, y.flat) grid_items = {} for x, y in coords: grid_items[x, y] = self.view(x, y, situated=situated, **kwargs) grid = Grid(grid_items, label='CFs', title=' '.join([self.dest.name, self.name, '{label}'])) grid.metadata = AttrDict(info=self.name, proj_src_name=self.src.name, proj_dest_name=self.dest.name, timestamp=self.src.simulation.time(), **kwargs) return grid def view(self, sheet_x, sheet_y, timestamp=None, situated=False, **kwargs): """ Return a single connection field Matrix, for the unit located nearest to sheet coordinate (sheet_x,sheet_y). """ if timestamp is None: timestamp = self.src.simulation.time() time_dim = Dimension("Time", type=param.Dynamic.time_fn.time_type) (r, c) = self.dest.sheet2matrixidx(sheet_x, sheet_y) cf = self.cfs[r, c] r1, r2, c1, c2 = cf.input_sheet_slice situated_shape = self.src.activity.shape situated_bounds = self.src.bounds roi_bounds = cf.get_bounds(self.src) if situated: matrix_data = np.zeros(situated_shape, dtype=np.float64) matrix_data[r1:r2, c1:c2] = cf.weights.copy() bounds = situated_bounds else: matrix_data = cf.weights.copy() bounds = roi_bounds sv = CFView(matrix_data, bounds, situated_bounds=situated_bounds, input_sheet_slice=(r1, r2, c1, c2), roi_bounds=roi_bounds, label=self.name+ " CF Weights", value='CF Weight') sv.metadata=AttrDict(timestamp=timestamp) viewmap = ViewMap((timestamp, sv), dimensions=[time_dim]) viewmap.metadata = AttrDict(coords=(sheet_x, sheet_y), dest_name=self.dest.name, precedence=self.src.precedence, proj_name=self.name, src_name=self.src.name, row_precedence=self.src.row_precedence, timestamp=timestamp, **kwargs) return viewmap def get_view(self, sheet_x, sheet_y, timestamp=None): self.warning("Deprecated, call 'view' method instead.") return self.view(sheet_x, sheet_y, timestamp) def activate(self,input_activity): """Activate using the specified response_fn and output_fn.""" if self.input_fns: input_activity = input_activity.copy() for iaf in self.input_fns: iaf(input_activity) self.input_buffer = input_activity self.activity *=0.0 self.response_fn(CFIter(self), input_activity, self.activity, self.strength) for of in self.output_fns: of(self.activity) # CEBALERT: should add active_units_mask to match # apply_learn_output_fns. def learn(self): """ For a CFProjection, learn consists of calling the learning_fn. """ # Learning is performed if the input_buffer has already been set, # i.e. there is an input to the Projection. if self.input_buffer is not None: self.learning_fn(CFIter(self),self.input_buffer,self.dest.activity,self.learning_rate) # CEBALERT: called 'learn' output fns here, but called 'weights' output fns # elsewhere (mostly). Change all to 'learn'? def apply_learn_output_fns(self,active_units_mask=True): """ Apply the weights_output_fns to each unit. If active_units_mask is True, inactive units will be skipped. """ for of in self.weights_output_fns: of(CFIter(self,active_units_mask=active_units_mask)) # CEBALERT: see gc alert in simulation.__new__ def _cleanup(self): for cf in self.cfs.flat: # cf could be None or maybe something else if hasattr(cf,'input_sheet'): cf.input_sheet=None if hasattr(cf,'input_sheet_slice'): cf.input_sheet_slice=None if hasattr(cf,'weights_slice'): cf.weights_slice=None def n_bytes(self): # Could also count the input_sheet_slice rows,cols=self.cfs.shape return super(CFProjection,self).n_bytes() + \ sum([cf.weights.nbytes + cf.mask.nbytes for cf,i in CFIter(self,ignore_sheet_mask=True)()]) def n_conns(self): # Counts non-masked values, if mask is available; otherwise counts # weights as connections if nonzero rows,cols=self.cfs.shape return np.sum([len((cf.mask if cf.mask is not None else cf.weights).ravel().nonzero()[0]) for cf,i in CFIter(self)()]) # CEB: have not yet decided proper location for this method # JAB: should it be in PatternGenerator? def _create_mask(shape,bounds_template,sheet, autosize=True,threshold=0.5, name='Mask'): """ Create the mask (see ConnectionField.__init__()). """ # Calculate the size & aspect_ratio of the mask if appropriate; # mask size set to be that of the weights matrix if hasattr(shape, 'size') and autosize: l,b,r,t = bounds_template.lbrt() shape.size = t-b shape.aspect_ratio = (r-l)/shape.size # Center mask to matrixidx center center_r,center_c = sheet.sheet2matrixidx(0,0) center_x,center_y = sheet.matrixidx2sheet(center_r,center_c) kwargs = dict(name=name, x=center_x,y=center_y, bounds=bounds_template, xdensity=sheet.xdensity, ydensity=sheet.ydensity) if isinstance(param.Dynamic.time_fn, param.Time): with param.Dynamic.time_fn as t: t(0) # Initialize masks at time 0 mask = shape(**kwargs) else: mask = shape(**kwargs) mask = np.where(mask>=threshold,mask,0.0) # CB: unnecessary copy (same as for weights) return mask.astype(weight_type) class CFIter(object): """ Iterator to walk through all ConnectionFields of all neurons in the destination Sheet of the given CFProjection. Each iteration yields the tuple (cf,i) where cf is the ConnectionField at position i in the projection's flatcfs list. If active_units_mask is True, inactive units will be skipped. If ignore_sheet_mask is True, even units excluded by the sheet mask will be included. """ # CB: as noted elsewhere, rename active_units_mask (to e.g. # ignore_inactive_units). def __init__(self,cfprojection,active_units_mask=False,ignore_sheet_mask=False): self.flatcfs = cfprojection.flatcfs self.activity = cfprojection.dest.activity self.mask = cfprojection.dest.mask self.cf_type = cfprojection.cf_type self.proj_n_units = cfprojection.n_units self.allow_skip_non_responding_units = cfprojection.dest.allow_skip_non_responding_units self.active_units_mask = active_units_mask self.ignore_sheet_mask = ignore_sheet_mask def __nomask(self): # return an array indicating all units should be processed # dtype for C functions. # could just be flat. return np.ones(self.activity.shape,dtype=self.activity.dtype) # CEBALERT: make _ def get_sheet_mask(self): if not self.ignore_sheet_mask: return self.mask.data else: return self.__nomask() # CEBALERT: make _ (and probably drop '_mask'). def get_active_units_mask(self): if self.allow_skip_non_responding_units and self.active_units_mask: return self.activity else: return self.__nomask() # CEBALERT: rename? def get_overall_mask(self): """ Return an array indicating whether or not each unit should be processed. """ # JPHACKALERT: Should really check for the existence of the # mask, rather than checking its type. This is a hack to # support higher-order projections whose dest is a CF, instead # of a sheet. The right thing to do is refactor so that CF # masks and SheetMasks are subclasses of an abstract Mask # type so that they support the same interfaces. # # CEBALERT: put back when supporting neighborhood masking # (though preferably do what Jeff suggests instead) # if isinstance(self.proj.dest.mask,SheetMask): # return get_active_units_mask() # else: # CB: note that it's faster for our optimized C functions to # combine the masks themselves, rather than using this method. sheet_mask = self.get_sheet_mask() active_units_mask = self.get_active_units_mask() return np.logical_and(sheet_mask,active_units_mask) def __call__(self): mask = self.get_overall_mask() for i,cf in enumerate(self.flatcfs): if cf is not None: if mask.flat[i]: yield cf,i # PRALERT: CFIter Alias for backwards compatability with user code # Should be removed before release v1.0 MaskedCFIter = CFIter ### We don't really need this class; its methods could probably be ### moved up to ProjectionSheet, because they may in fact be valid for ### all ProjectionSheets. But we're leaving it here, because it is ### likely to be useful in the future. class CFSheet(ProjectionSheet): """ A ProjectionSheet providing access to the ConnectionFields in its CFProjections. CFSheet is a Sheet built from units indexed by Sheet coordinates (x,y). Each unit can have one or more ConnectionFields on another Sheet (via this sheet's CFProjections). Thus CFSheet is a more concrete version of a ProjectionSheet; a ProjectionSheet does not require that there be units or weights of any kind. Unless you need access to the underlying ConnectionFields for visualization or analysis, CFSheet and ProjectionSheet are interchangeable. """ measure_maps = param.Boolean(True,doc=""" Whether to include this Sheet when measuring various maps to create SheetViews.""") precedence = param.Number(0.5) def update_unit_view(self,x,y,proj_name=''): """ Creates the list of UnitView objects for a particular unit in this CFSheet. (There is one UnitView for each Projection to this CFSheet). Each UnitView is then added to the sheet_views of its source sheet. It returns the list of all UnitViews for the given unit. """ for p in self.in_connections: if not isinstance(p,CFProjection): self.debug("Skipping non-CFProjection "+p.name) elif proj_name == '' or p.name==proj_name: v = p.view(x, y, self.simulation.time()) cfs = self.views.CFs if p.name not in cfs: cfs[p.name] = p._cf_grid() cfs[p.name][x, y] = v class ResizableCFProjection(CFProjection): """ A CFProjection with resizable weights. """ # Less efficient memory usage than CFProjection because it stores # the (x,y) position of each ConnectionField. def _generate_coords(self): # same as super's, but also stores the coords. # CB: this is storing redundant info because generate_coords() # returns output from mgrid. Might be better to store the 1d x # and y coords, and generate the grids when needed? self.X_cf,self.Y_cf = super(ResizableCFProjection,self)._generate_coords() return self.X_cf,self.Y_cf ### This could be changed into a special __set__ method for ### bounds_template, instead of being a separate function, but ### having it be explicit like this might be clearer. ### ### This implementation is fairly slow, and for some algorithms ### that rely on changing the bounds frequently, it may be worth ### re-implementing it in C. def change_bounds(self, nominal_bounds_template): """ Change the bounding box for all of the ConnectionFields in this Projection. Calls change_bounds() on each ConnectionField. Currently only allows reducing the size, but should be extended to allow increasing as well. """ slice_template = Slice(copy(nominal_bounds_template), self.src,force_odd=True, min_matrix_radius=self.min_matrix_radius) bounds_template = slice_template.compute_bounds(self.src) if not self.bounds_template.containsbb_exclusive(bounds_template): if self.bounds_template.containsbb_inclusive(bounds_template): self.debug('Initial and final bounds are the same.') else: self.warning('Unable to change_bounds; currently allows reducing only.') return # it's ok so we can store the bounds and resize the weights mask_template = _create_mask(self.cf_shape,bounds_template,self.src, self.autosize_mask,self.mask_threshold) self.mask_template = mask_template self.n_units = self._calc_n_units() self.nominal_bounds_template = nominal_bounds_template self.bounds_template = bounds_template self._slice_template = slice_template cfs = self.cfs rows,cols = cfs.shape output_fns = [wof.single_cf_fn for wof in self.weights_output_fns] for r in xrange(rows): for c in xrange(cols): xcf,ycf = self.X_cf[0,c],self.Y_cf[r,0] # CB: listhack - loop is candidate for replacement by numpy fn self._change_cf_bounds(cfs[r,c],input_sheet=self.src, x=xcf,y=ycf, template=slice_template, mask=mask_template, output_fns=output_fns, min_matrix_radius=self.min_matrix_radius) def change_density(self, new_wt_density): """ Rescales the weight matrix in place, interpolating or resampling as needed. Not yet implemented. """ raise NotImplementedError def _change_cf_bounds(self,cf,input_sheet,x,y,template,mask,output_fns=None,min_matrix_radius=1): """ Change the bounding box for this ConnectionField. Discards weights or adds new (zero) weights as necessary, preserving existing values where possible. Currently only supports reducing the size, not increasing, but should be extended to support increasing as well. Note that the supplied template will be modified, so if you're also using them elsewhere you should pass copies. """ if output_fns is None: output_fns = [] # CEBALERT: re-write to allow arbitrary resizing or1,or2,oc1,oc2 = cf.input_sheet_slice weights_slice = cf._create_input_sheet_slice(input_sheet,x,y,copy(template),min_matrix_radius) r1,r2,c1,c2 = cf.input_sheet_slice if not (r1 == or1 and r2 == or2 and c1 == oc1 and c2 == oc2): # CB: note that it's faster to copy (i.e. replacing copy=1 with copy=0 # below slows down change_bounds(). cf.weights = np.array(cf.weights[r1-or1:r2-or1,c1-oc1:c2-oc1],copy=1) # (so the obvious choice, # cf.weights=cf.weights[r1-or1:r2-or1,c1-oc1:c2-oc1], # is also slower). cf.mask = weights_slice.submatrix(mask) cf.mask = np.array(cf.mask,copy=1) # CB: why's this necessary? # (see ALERT in __init__) cf.weights *= cf.mask for of in output_fns: of(cf.weights) del cf.norm_total
bsd-3-clause
-7,709,981,377,851,790,000
39.950564
228
0.631862
false
4.057403
false
false
false
compas-dev/compas
src/compas_plotters/core/helpers.py
1
5374
from numpy import asarray from numpy import argmax from numpy import argmin from numpy import zeros from mpl_toolkits.mplot3d.art3d import Poly3DCollection from compas_plotters.core.utilities import assert_axes_dimension __all__ = [ 'Axes2D', 'Axes3D', 'Bounds', 'Box', 'Cloud2D', 'Cloud3D', 'Hull', ] class Axes2D(object): """Definition of a 2D Axes object. Parameters ---------- origin : tuple or list X and Y coordinates for the origin. vectors : list The X and Y axes. Attributes ---------- origin : tuple or list X and Y coordinates for the origin. vectors : list The X and Y axes. """ def __init__(self, origin, vectors): """Initializes the Axes2D object""" self.origin = asarray(origin) self.vectors = asarray(vectors) def plot(self, axes): """Plots the axes object Parameters ---------- axes : object The matplotlib axes object. """ assert_axes_dimension(axes, 2) o = self.origin xy = self.vectors axes.plot( [o[0, 0], o[0, 0] + xy[0, 0]], [o[0, 1], o[0, 1] + xy[0, 1]], 'r-' ) axes.plot( [o[0, 0], o[0, 0] + xy[1, 0]], [o[0, 1], o[0, 1] + xy[1, 1]], 'g-' ) class Axes3D(object): """Definition of a 3D Axes object. Parameters ---------- origin : tuple or list X, Y and Z coordinates for the origin. vectors : list The X, Y and Z axes. Attributes ---------- origin : tuple or list X, Y and Z coordinates for the origin. vectors : list The X, Y and Z axes. """ def __init__(self, origin, vectors, colors=None): """Initializes the Axes3D object""" self.origin = asarray(origin) self.vectors = asarray(vectors) if not colors: colors = ('r', 'g', 'b') self.colors = colors def plot(self, axes): """Plots the axes object Parameters ---------- axes : object The matplotlib axes object. """ assert_axes_dimension(axes, 3) o = self.origin xyz = self.vectors axes.plot( [o[0, 0], o[0, 0] + xyz[0, 0]], [o[0, 1], o[0, 1] + xyz[0, 1]], [o[0, 2], o[0, 2] + xyz[0, 2]], '{0}-'.format(self.colors[0]), linewidth=3 ) axes.plot( [o[0, 0], o[0, 0] + xyz[1, 0]], [o[0, 1], o[0, 1] + xyz[1, 1]], [o[0, 2], o[0, 2] + xyz[1, 2]], '{0}-'.format(self.colors[1]), linewidth=3 ) axes.plot( [o[0, 0], o[0, 0] + xyz[2, 0]], [o[0, 1], o[0, 1] + xyz[2, 1]], [o[0, 2], o[0, 2] + xyz[2, 2]], '{0}-'.format(self.colors[2]), linewidth=3 ) class Bounds(object): """""" def __init__(self, points): self.points = asarray(points) def plot(self, axes): assert_axes_dimension(axes, 3) xmin, ymin, zmin = argmin(self.points, axis=0) xmax, ymax, zmax = argmax(self.points, axis=0) xspan = self.points[xmax, 0] - self.points[xmin, 0] yspan = self.points[ymax, 1] - self.points[ymin, 1] zspan = self.points[zmax, 2] - self.points[zmin, 2] span = max(xspan, yspan, zspan) axes.plot([self.points[xmin, 0]], [self.points[ymin, 1]], [self.points[zmin, 2]], 'w') axes.plot([self.points[xmin, 0] + span], [self.points[ymin, 1] + span], [self.points[zmin, 2] + span], 'w') class Box(object): """""" def __init__(self, corners): self.corners = corners self.faces = [[0, 1, 2, 3], [4, 7, 6, 5], [1, 5, 6, 2], [0, 4, 5, 1], [0, 3, 7, 4], [2, 6, 7, 3]] def plot(self, axes): assert_axes_dimension(axes, 3) rec = [[self.corners[index] for index in face] for face in self.faces] rec_coll = Poly3DCollection(rec) rec_coll.set_facecolors([(1.0, 0.0, 0.0) for face in self.faces]) rec_coll.set_alpha(0.2) axes.add_collection3d(rec_coll) class Cloud2D(object): """""" def __init__(self, cloud): cloud = asarray(cloud) cols = min(2, cloud.shape[1]) self.cloud = zeros((cloud.shape[0], 2)) self.cloud[:, :cols] = cloud[:, :cols] def plot(self, axes): x = self.cloud[:, 0] y = self.cloud[:, 1] axes.plot(x, y, 'o', color=(1.0, 1.0, 1.0)) class Cloud3D(object): """""" def __init__(self, cloud): cloud = asarray(cloud) cols = min(3, cloud.shape[1]) self.cloud = zeros((cloud.shape[0], 3)) self.cloud[:, :cols] = cloud[:, :cols] def plot(self, axes): x = self.cloud[:, 0] y = self.cloud[:, 1] z = self.cloud[:, 2] axes.plot(x, y, z, 'o', color=(0.7, 0.7, 0.7)) class Hull(object): """""" def __init__(self, hull): self.vertices = hull.points self.faces = hull.simplices def plot(self, axes): tri = [[self.vertices[index] for index in face] for face in self.faces] tri_coll = Poly3DCollection(tri) tri_coll.set_facecolors([(0.0, 1.0, 0.0) for face in self.faces]) axes.add_collection3d(tri_coll)
mit
-5,616,462,484,940,110,000
25.736318
115
0.499628
false
3.216038
false
false
false
kgblll/libresoft-gymkhana
GIC/Channels/Items/PlaceItem.py
2
1046
#!/usr/bin/env python # Copyright (C) 2009 GSyC/LibreSoft # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # Authors : Roberto Calvo <rocapal@gsyc.es> import sys from GenericItem import * class PlaceItem (GenericItem): def __init__ (self): self.categories = None self.address = None self.phone = None self.city = None self.region = None self.country = None
gpl-2.0
-9,093,844,593,012,902,000
27.27027
76
0.73805
false
3.696113
false
false
false
TzuChieh/Photon-v2
scripts/SDL_Interface/PythonGenerator/PythonGenerator.py
1
5358
from InterfaceGenerator import InterfaceGenerator from SDLInterface import SDLInterface from .PythonClass import PythonClass from .PythonMethod import PythonMethod from . import pysdl_base import copy import inspect from string import capwords import datetime class PythonGenerator(InterfaceGenerator): def __init__(self): super().__init__() self.interfaces = [] def add_interface(self, sdl_interface: SDLInterface): self.interfaces.append(copy.deepcopy(sdl_interface)) def generate(self, output_directory): if not self.resolve_interface_extension(): print("warning: cannot resolve interface extension, suggestions: ") print("1. check for typo") print("2. is the extended target actually exist") print("3. may be possible cyclic extensions") return file = open(output_directory + "pysdl.py", "w+") file.write( "# ========================================\n" "# NOTE: THIS FILE CONTAINS GENERATED CODE \n" "# DO NOT MODIFY \n" "# ========================================\n") file.write("# last generated: %s \n\n" % datetime.datetime.now()) file.write(inspect.getsource(pysdl_base)) file.write("\n\n") file.write(PythonGenerator.gen_reference_data_classes()) file.write("\n\n") for interface in self.interfaces: file.write(PythonGenerator.gen_interface_classes(interface)) file.close() def name(self): return "python" def resolve_interface_extension(self): resolved_interfaces = {} unresolved_interfaces = [] for interface in self.interfaces: if interface.is_extending(): unresolved_interfaces.append(interface) else: resolved_interfaces[interface.get_full_type_name()] = interface while unresolved_interfaces: has_progress = False for interface in unresolved_interfaces: target_name = interface.get_extended_full_type_name() extended_interface = resolved_interfaces.get(target_name, None) if extended_interface is None: continue else: interface.extend(extended_interface) resolved_interfaces[interface.get_full_type_name()] = interface unresolved_interfaces.remove(interface) has_progress = True if not has_progress: return False return True @classmethod def gen_reference_data_classes(cls): reference_types = SDLInterface.get_reference_types() code = "" for type_name in reference_types: class_name = capwords(type_name, "-").replace("-", "") clazz = PythonClass("SDL" + class_name) clazz.set_inherited_class_name("SDLReference") init_method = PythonMethod("__init__") init_method.add_input("ref_name", default_value="\"\"") init_method.add_content_line("super().__init__(\"%s\", ref_name)" % type_name) clazz.add_method(init_method) code += clazz.gen_code() return code @classmethod def gen_interface_classes(cls, sdl_interface: SDLInterface): class_base_name = cls.gen_class_name(sdl_interface) code = "" # generating creator code if sdl_interface.has_creator() and not sdl_interface.creator.is_blueprint: clazz = PythonClass(class_base_name + "Creator") if sdl_interface.is_world(): clazz.set_inherited_class_name("SDLCreatorCommand") else: clazz.set_inherited_class_name("SDLCoreCommand") clazz.add_default_init() # overriding get_full_type full_type_method = PythonMethod("get_full_type") full_type_method.add_content_line("return \"%s\"" % sdl_interface.get_full_type_name()) clazz.add_method(full_type_method) for sdl_input in sdl_interface.creator.inputs: method_name = "set_" method_name += sdl_input.name.replace("-", "_") input_name = sdl_input.name.replace("-", "_") if clazz.has_method(method_name): continue method = PythonMethod(method_name) method.add_input(input_name, expected_type="SDLData") method.add_content_line("self.set_input(\"%s\", %s)" % (sdl_input.name, input_name)) clazz.add_method(method) code += clazz.gen_code() # generating executor code for sdl_executor in sdl_interface.executors: name_norm = capwords(sdl_executor.name, "-").replace("-", "") clazz = PythonClass(class_base_name + name_norm) clazz.set_inherited_class_name("SDLExecutorCommand") # overriding get_full_type full_type_method = PythonMethod("get_full_type") full_type_method.add_content_line("return \"%s\"" % sdl_interface.get_full_type_name()) clazz.add_method(full_type_method) # overriding get_name get_name_method = PythonMethod("get_name") get_name_method.add_content_line("return \"%s\"" % sdl_executor.name) clazz.add_method(get_name_method) for sdl_input in sdl_executor.inputs: method_name = "set_" method_name += sdl_input.name.replace("-", "_") input_name = sdl_input.name.replace("-", "_") if clazz.has_method(method_name): continue method = PythonMethod(method_name) method.add_input(input_name, expected_type="SDLData") method.add_content_line("self.set_input(\"%s\", %s)" % (sdl_input.name, input_name)) clazz.add_method(method) code += clazz.gen_code() return code @classmethod def gen_class_name(cls, sdl_interface: SDLInterface): category_norm = capwords(sdl_interface.category_name, "-").replace("-", "") type_norm = capwords(sdl_interface.type_name, "-").replace("-", "") return type_norm + category_norm
mit
762,593,232,892,099,800
27.5
90
0.678425
false
3.251214
false
false
false
yacoob/blitzloop
blitzloop/idlescreen.py
1
5014
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (C) 2012-2013 Hector Martin "marcan" <hector@marcansoft.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 or version 3. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA import OpenGL.GL as gl import OpenGL.GLU as glu import PIL import math import time from blitzloop import util class ImageTexture(object): def __init__(self, img_file, background=(0,0,0)): self.image = PIL.Image.open(img_file) self.tw = 1 while self.tw < self.width: self.tw *= 2 self.th = 1 while self.th < self.height: self.th *= 2 r, g, b = background self.teximage = PIL.Image.new("RGBA", (self.tw, self.th), (r, g, b, 0)) self.teximage.paste(self.image, (0,0), self.image) self.texid = gl.glGenTextures(1) gl.glBindTexture(gl.GL_TEXTURE_2D, self.texid) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR_MIPMAP_LINEAR) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP) try: blob = self.teximage.tobytes() except AttributeError: blob = self.teximage.tostring() glu.gluBuild2DMipmaps(gl.GL_TEXTURE_2D, 4, self.tw, self.th, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, blob) @property def width(self): return self.image.size[0] @property def height(self): return self.image.size[1] @property def aspect(self): return self.width / self.height def __del__(self): gl.glDeleteTextures(self.texid) def draw(self, x=0, y=0, width=1, height=None, brightness=1.0): if height is None: height = width / self.aspect gl.glEnable(gl.GL_BLEND) gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA); gl.glActiveTexture(gl.GL_TEXTURE0) gl.glBindTexture(gl.GL_TEXTURE_2D, self.texid) gl.glEnable(gl.GL_TEXTURE_2D) gl.glBegin(gl.GL_TRIANGLE_STRIP) gl.glColor4f(brightness, brightness, brightness,1) gl.glTexCoord2f(0, self.height / self.th) gl.glVertex2f(x, y) gl.glTexCoord2f(self.width / self.tw, self.height / self.th) gl.glVertex2f(x+width, y) gl.glTexCoord2f(0, 0) gl.glVertex2f(x, y+height) gl.glTexCoord2f(self.width / self.tw, 0) gl.glVertex2f(x+width, y+height) gl.glEnd() gl.glDisable(gl.GL_TEXTURE_2D) class IdleScreen(object): def __init__(self, display): self.display = display self.logo = ImageTexture(util.get_resgfx_path("logo.png"), (0,0,0)) self.tablet = ImageTexture(util.get_resgfx_path("tablet.png"), (0,0,0)) self.hand = ImageTexture( util.get_resgfx_path("hand.png"), (255,255,255)) self.silhouette = ImageTexture( util.get_resgfx_path("silhouette.png"), (0,0,0)) self.reset() def reset(self): self.fade = 0 self.st = time.time() self.closing = False def close(self): self.closing = True def __iter__(self): return self def __next__(self): t = time.time() - self.st self.display.set_aspect(4.0/3.0) if self.closing: self.fade -= 0.015 if self.fade < 0: raise StopIteration() elif self.fade < 1: self.fade = min(1, self.fade + 0.015) gl.glClearColor(0, 0, 0, 1) gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) sfac = self.silhouette.aspect / self.display.aspect self.silhouette.draw(x=0, width=sfac) lx = sfac * 0.7 lw = 1.0-lx self.logo.draw(y=(1.0 / self.display.aspect) - (lw / self.logo.aspect) - 0.02, x=lx, width=lw) tx = lx + lw/2 - 0.1 ty = 0.2 self.tablet.draw(x=tx, y=ty, width=0.2) d = math.sin(t / 0.5 * math.pi) * 0.02 self.hand.draw(x=tx + 0.1 - 0.6 * d, y=ty - 0.09 + d, width=0.1) self.display.set_aspect(None) gl.glBegin(gl.GL_TRIANGLE_STRIP) gl.glColor4f(0, 0, 0, 1-self.fade) gl.glVertex2f(0, 0) gl.glVertex2f(0, 1) gl.glVertex2f(1, 0) gl.glVertex2f(1, 1) gl.glEnd()
gpl-2.0
-4,848,876,136,024,190,000
32.651007
102
0.602712
false
3.048024
false
false
false
arpho/mmasgis5
mmasgis/Filter.py
1
2830
from filterDb import * #from queriesDb import * class Filter(): """ permette di definire una serie di condizioni che il pv deve soddisfare, il metodo test applicato sul singolo pv verifica che questo le rispetti, in particolare la logica dei filtri prevede che piu' parametri della stessa classe siano in OR mentre parametri di classi differenti devono essere in AND """ def __repr__(self): return "tab:{0},parametri:{1}".format(self.tab,self.parameters) def __init__(self,parametri, tab,db,user,activeDb): """ @param parametri: [{int:[int]}]::[{class_id:[parameter_id]}] setting del filtro @param tab: string chiave del Filtro @param db:queriesDb, stessa istanza di QueriesDb usata da interrogazioni savequery e loadQuery @param USER:utente attivo @param Db:Db attivo """ self.user=user self.activeDb=activeDb self.db=filterDb(user,activeDb) self.Dbquery=db self.parameters=parametri self.tab=tab def unpackParameter(self,d): """ritorna una versione dei parametri del filtro "scompattata", cioe' nel formato[(int,int)]::[(category_id,parameter_id)], perche' sia compatibile con le funzioni di queriesDb @return: [(int,int)]::[(category_id,parameter_id)] """ l=[] for k in d.iterkeys(): for i in d[k]: l.append((k,i)) return l def save(self,Id): """ salva i parametri del filtro sul db usando come chiave esterna l'id della query @param Id:int:: query_id """ #instanzio il dict per la selezione della tabella su cui operare in funzione del tab insertFunctions={} insertFunctions['marchi']=self.Dbquery.insertMarchi insertFunctions['potenziali']=self.Dbquery.insertPotenziale insertFunctions['parametri']=self.Dbquery.insertParametri #registro i parametri sul db insertFunctions[self.tab](Id,self.unpackParameter(self.parameters)) def test(self,Id): """ esegue il test definito dai parametri del filtro sul pv con pv_id Id @param Id::int pv_id @return boolean """ return self.checkTab(Id,self.tab,self.parameters) def checkTab(self,Id,tab,d): """ esegue i filtri appartenenti ad un tab @param Id::int pv_id @param tab:string specifica il tab di cui eseguire i filtri 'marchi',parametri','potenziali' @param d:{int:[int]}::{class_id:[parameter_id]} @return boolean: il valore ritornato e' l'operazione di AND tra i check per ogni classe """ # True e' il valore neutro per l'operazione di AND b= True # definisco un dict dei metodi da usare per il check delle classi chekers={} chekers['marchi']=self.db.checkClassBrand chekers['parametri']=self.db.checkClassParameter chekers['potenziali']=self.db.checkClassPotential for k in d.iterkeys(): b=b and chekers[tab](Id,k,d[k]) return b
mit
-3,728,216,966,661,245,000
32.938272
125
0.694346
false
2.83
false
false
false
ngageoint/scale
scale/job/test/messages/test_unpublish_jobs.py
1
2904
from __future__ import unicode_literals import datetime import django from django.utils.timezone import now from django.test import TestCase from job.messages.unpublish_jobs import UnpublishJobs from job.models import Job from job.test import utils as job_test_utils from product.models import ProductFile from product.test import utils as product_test_utils class TestUnpublishJobs(TestCase): def setUp(self): django.setup() def test_json(self): """Tests coverting an UnpublishJobs message to and from JSON""" when = now() job_exe_1 = job_test_utils.create_job_exe(status='COMPLETED') job_exe_2 = job_test_utils.create_job_exe(status='COMPLETED') product_1 = product_test_utils.create_product(job_exe=job_exe_1, is_published=True) product_2 = product_test_utils.create_product(job_exe=job_exe_2, is_published=True) # Add jobs to message message = UnpublishJobs() message.when = when if message.can_fit_more(): message.add_job(job_exe_1.job_id) if message.can_fit_more(): message.add_job(job_exe_2.job_id) # Convert message to JSON and back, and then execute message_json_dict = message.to_json() new_message = UnpublishJobs.from_json(message_json_dict) result = new_message.execute() self.assertTrue(result) products = ProductFile.objects.filter(id__in=[product_1.id, product_2.id]) self.assertEqual(len(products), 2) self.assertFalse(products[0].is_published) self.assertEqual(products[0].unpublished, when) self.assertFalse(products[1].is_published) self.assertEqual(products[1].unpublished, when) def test_execute(self): """Tests calling UnpublishJobs.execute() successfully""" when = now() job_exe_1 = job_test_utils.create_job_exe(status='COMPLETED') job_exe_2 = job_test_utils.create_job_exe(status='COMPLETED') product_1 = product_test_utils.create_product(job_exe=job_exe_1, is_published=True) product_2 = product_test_utils.create_product(job_exe=job_exe_2, is_published=True) # Add jobs to message message = UnpublishJobs() message.when = when if message.can_fit_more(): message.add_job(job_exe_1.job_id) if message.can_fit_more(): message.add_job(job_exe_2.job_id) # Execute message result = message.execute() self.assertTrue(result) # Check that products are unpublished products = ProductFile.objects.filter(id__in=[product_1.id, product_2.id]) self.assertEqual(len(products), 2) self.assertFalse(products[0].is_published) self.assertEqual(products[0].unpublished, when) self.assertFalse(products[1].is_published) self.assertEqual(products[1].unpublished, when)
apache-2.0
-1,915,926,533,011,269,000
36.230769
91
0.661157
false
3.616438
true
false
false
bjodah/symodesys
examples/_UNFINISHED_stability.py
1
1857
#!/usr/bin/env python # -*- coding: utf-8 -*- # External imports import numpy as np from sympy import sin from sympy import exp as e # Package imports from symodesys import SimpleFirstOrderODESystem from symodesys.convenience import numeric_vs_analytic """ Product of early morning flight, not a single one is working properly. Need to fix this when not half asleep """ class Sin(SimpleFirstOrderODESystem): depv_tokens = 'u', @property def expressions(self): u = self['u'] return {u: sin(u)} def analytic_u(self, indep_vals, y0, params, t0): return -np.cos(indep_vals) + y0['u'] analytic_sol = {'u': analytic_u} class Nonlinear(SimpleFirstOrderODESystem): """ From Kiusaalas p. 255 ex 7.5, not working correctly atm. """ depv_tokens = 'u', param_tokens = 'lambda_u', @property def expressions(self): u, l = self['u'], self['lambda_u'] return {u: 3*u-4*e(-u)} def analytic_u(self, indep_vals, y0, params, t0): return (y0['u']-1) * np.exp(params['lambda_u']*indep_vals) + np.exp(-indep_vals) analytic_sol = {'u': analytic_u} class Nonlinear2(SimpleFirstOrderODESystem): """ From Kiusaalas p. 248 ex 7.2, not working correctly atm. """ depv_tokens = 'u', 'up' @property def expressions(self): u, up, t = self['u'], self['up'], self.indepv return {u: up, up: -0.1*u-t} def analytic_u(self, indep_vals, y0, params, t0): return 100*indep_vals - 5*indep_vals**2 + 990*(np.exp(-0.1*indep_vals)-1) analytic_sol = {'u': analytic_u} if __name__ == '__main__': numeric_vs_analytic(Nonlinear2, {'u': 0.0, 'up': 1.0}, {}, 0.0, 10.0, N=30) numeric_vs_analytic(Sin, {'u': 1.0}, {}, 0.0, 10.0, N=30) numeric_vs_analytic(Nonlinear, {'u': 1.0}, {'lambda_u': 0.2}, 0.0, 10.0, N=30)
bsd-2-clause
7,513,807,921,356,201,000
25.15493
88
0.6042
false
2.843798
false
false
false
linkinwong/adaboost-short-text
text-stress/src/tools/arff_generator.py
1
2521
__author__ = 'linlin' import os import logging import pdb logger = logging.getLogger('main_module') def GetWordsDic(file_path): file = open(file_path, 'r') for line in file: if ";;;;" in line: break dict = {} for line in file: if len(line) != 0: dict[line.strip()]= 'exist' return dict def GetSentimentScore(filepath, pos_dic, neg_dic): neg_sum = 0 pos_sum = 0 file = open(filepath, 'r') for line in file: if len(line) > 1: two_word_list = line.split() word = two_word_list[-1].strip() if word in pos_dic: #pdb.set_trace() pos_sum = int(two_word_list[0]) + pos_sum if word in neg_dic: #pdb.set_trace() neg_sum = int(two_word_list[0]) + neg_sum file.close() return [pos_sum, neg_sum] def GenerateARFF(paragraph_dir,output_path, pos_dic, neg_dic): meta_line = [] text = [] for root, dirs, files in os.walk(paragraph_dir): for filename in files: filepath = os.path.join(root, filename) meta_line.append(filename) [pos_score, neg_score] = GetSentimentScore(filepath, pos_dic, neg_dic) meta_line.append(pos_score) meta_line.append(neg_score) meta_line.append(0) text.append(meta_line) meta_line = [] file = open(output_path, 'w') line = '@data' file.write(line) file.write('\n') for line in text: line_format = ("%s,%f,%f,%d" %(line[0], line[1], line[2], line[3])) file.write(line_format) file.write('\n') file.close() if __name__=="__main__": positive_words_path = "/home/linlin/time/040515-stress-classification/Linguisiticpart/data/opinion-lexicon-English/positive-words.txt" negative_words_path = "/home/linlin/time/040515-stress-classification/Linguisiticpart/data/opinion-lexicon-English/negative-words.txt" transcript_dir = "/home/linlin/time/040515-stress-classification/Linguisiticpart/data/transcript-dic" parDir = os.path.dirname( os.getcwd()) arff_path = parDir + "/stress_data.arff" logFile = parDir + "/logFile.txt" logging.basicConfig(filename= logFile, level = logging.DEBUG) positive_dic = {} negative_dic = {} positive_dic = GetWordsDic(positive_words_path) negative_dic = GetWordsDic(negative_words_path) GenerateARFF(transcript_dir,arff_path, positive_dic, negative_dic)
gpl-2.0
-2,856,753,327,025,647,000
28.658824
138
0.596589
false
3.257106
false
false
false
MGHComputationalPathology/highdicom
src/highdicom/coding_schemes.py
1
3782
from typing import Optional, Sequence from pydicom.dataset import Dataset class CodingSchemeResourceItem(Dataset): """Class for items of the Coding Scheme Resource Sequence.""" def __init__(self, url: str, url_type: str) -> None: """ Parameters ---------- url: str unique resource locator url_type: str type of resource `url` points to (options: `{"DOC", "OWL", "CSV"}`) """ super().__init__() self.CodingSchemeURL = str(url) if url_type not in {"DOC", "OWL", "CSV"}: raise ValueError('Unknonw URL type.') self.CodingSchemeURLType = str(url_type) class CodingSchemeIdentificationItem(Dataset): """Class for items of the Coding Scheme Identification Sequence.""" def __init__( self, designator: str, name: Optional[str] = None, version: Optional[str] = None, registry: Optional[str] = None, uid: Optional[str] = None, external_id: Optional[str] = None, responsible_organization: Optional[str] = None, resources: Optional[Sequence[CodingSchemeResourceItem]] = None ) -> None: """ Parameters ---------- designator: str value of the Coding Scheme Designator attribute of a `CodedConcept` name: str, optional name of the scheme version: str, optional version of the scheme registry: str, optional name of an external registry where scheme may be obtained from; required if scheme is registered uid: str, optional unique identifier of the scheme; required if the scheme is registered by an ISO 8824 object identifier compatible with the UI value representation (VR) external_id: str, optional external identifier of the scheme; required if the scheme is registered and `uid` is not available responsible_organization: str, optional name of the organization that is responsible for the scheme resources: Sequence[pydicom.sr.coding.CodingSchemeResourceItem], optional one or more resources related to the scheme """ # noqa super().__init__() self.CodingSchemeDesignator = str(designator) if name is not None: self.CodingSchemeName = str(name) if version is not None: self.CodingSchemeVersion = str(version) if responsible_organization is not None: self.CodingSchemeResponsibleOrganization = \ str(responsible_organization) if registry is not None: self.CodingSchemeRegistry = str(registry) if uid is None and external_id is None: raise ValueError( 'UID or external ID is required if coding scheme is ' 'registered.' ) if uid is not None and external_id is not None: raise ValueError( 'Either UID or external ID should be specified for ' 'registered coding scheme.' ) if uid is not None: self.CodingSchemeUID = str(uid) elif external_id is not None: self.CodingSchemeExternalID = str(external_id) if resources is not None: self.CodingSchemeResourcesSequence: Sequence[Dataset] = [] for r in resources: if not isinstance(r, CodingSchemeResourceItem): raise TypeError( 'Resources must have type CodingSchemeResourceItem.' ) self.CodingSchemeResourcesSequence.append(r)
mit
-5,905,343,422,687,216,000
37.20202
81
0.582232
false
4.873711
false
false
false
sthalik/git-cola
cola/fsmonitor.py
1
19637
# Copyright (c) 2008 David Aguilar # Copyright (c) 2015 Daniel Harding """Provides an filesystem monitoring for Linux (via inotify) and for Windows (via pywin32 and the ReadDirectoryChanges function)""" from __future__ import division, absolute_import, unicode_literals import errno import os import os.path import select from threading import Lock from . import utils from . import version from .decorators import memoize AVAILABLE = None if utils.is_win32(): try: import pywintypes import win32con import win32event import win32file except ImportError: pass else: AVAILABLE = 'pywin32' elif utils.is_linux(): try: from . import inotify except ImportError: pass else: AVAILABLE = 'inotify' from qtpy import QtCore from qtpy.QtCore import Signal from . import core from . import gitcfg from . import gitcmds from .compat import bchr from .git import git from .i18n import N_ from .interaction import Interaction class _Monitor(QtCore.QObject): files_changed = Signal() def __init__(self, thread_class): QtCore.QObject.__init__(self) self._thread_class = thread_class self._thread = None def start(self): if self._thread_class is not None: assert self._thread is None self._thread = self._thread_class(self) self._thread.start() def stop(self): if self._thread_class is not None: assert self._thread is not None self._thread.stop() self._thread.wait() self._thread = None def refresh(self): if self._thread is not None: self._thread.refresh() class _BaseThread(QtCore.QThread): #: The delay, in milliseconds, between detecting file system modification #: and triggering the 'files_changed' signal, to coalesce multiple #: modifications into a single signal. _NOTIFICATION_DELAY = 888 def __init__(self, monitor): QtCore.QThread.__init__(self) self._monitor = monitor self._running = True self._use_check_ignore = version.check('check-ignore', version.git_version()) self._force_notify = False self._file_paths = set() @property def _pending(self): return self._force_notify or self._file_paths def refresh(self): """Do any housekeeping necessary in response to repository changes.""" pass def notify(self): """Notifies all observers""" do_notify = False if self._force_notify: do_notify = True elif self._file_paths: proc = core.start_command(['git', 'check-ignore', '--verbose', '--non-matching', '-z', '--stdin']) path_list = bchr(0).join(core.encode(path) for path in self._file_paths) out, err = proc.communicate(path_list) if proc.returncode: do_notify = True else: # Each output record is four fields separated by NULL # characters (records are also separated by NULL characters): # <source> <NULL> <linenum> <NULL> <pattern> <NULL> <pathname> # For paths which are not ignored, all fields will be empty # except for <pathname>. So to see if we have any non-ignored # files, we simply check every fourth field to see if any of # them are empty. source_fields = out.split(bchr(0))[0:-1:4] do_notify = not all(source_fields) self._force_notify = False self._file_paths = set() if do_notify: self._monitor.files_changed.emit() @staticmethod def _log_enabled_message(): msg = N_('File system change monitoring: enabled.\n') Interaction.safe_log(msg) if AVAILABLE == 'inotify': class _InotifyThread(_BaseThread): _TRIGGER_MASK = ( inotify.IN_ATTRIB | inotify.IN_CLOSE_WRITE | inotify.IN_CREATE | inotify.IN_DELETE | inotify.IN_MODIFY | inotify.IN_MOVED_FROM | inotify.IN_MOVED_TO ) _ADD_MASK = ( _TRIGGER_MASK | inotify.IN_EXCL_UNLINK | inotify.IN_ONLYDIR ) def __init__(self, monitor): _BaseThread.__init__(self, monitor) worktree = git.worktree() if worktree is not None: worktree = core.abspath(worktree) self._worktree = worktree self._git_dir = git.git_path() self._lock = Lock() self._inotify_fd = None self._pipe_r = None self._pipe_w = None self._worktree_wd_to_path_map = {} self._worktree_path_to_wd_map = {} self._git_dir_wd_to_path_map = {} self._git_dir_path_to_wd_map = {} self._git_dir_wd = None @staticmethod def _log_out_of_wds_message(): msg = N_('File system change monitoring: disabled because the' ' limit on the total number of inotify watches was' ' reached. You may be able to increase the limit on' ' the number of watches by running:\n' '\n' ' echo fs.inotify.max_user_watches=100000 |' ' sudo tee -a /etc/sysctl.conf &&' ' sudo sysctl -p\n') Interaction.safe_log(msg) def run(self): try: with self._lock: self._inotify_fd = inotify.init() self._pipe_r, self._pipe_w = os.pipe() poll_obj = select.poll() poll_obj.register(self._inotify_fd, select.POLLIN) poll_obj.register(self._pipe_r, select.POLLIN) self.refresh() self._log_enabled_message() while self._running: if self._pending: timeout = self._NOTIFICATION_DELAY else: timeout = None try: events = poll_obj.poll(timeout) except OSError as e: if e.errno == errno.EINTR: continue else: raise except select.error: continue else: if not self._running: break elif not events: self.notify() else: for fd, event in events: if fd == self._inotify_fd: self._handle_events() finally: with self._lock: if self._inotify_fd is not None: os.close(self._inotify_fd) self._inotify_fd = None if self._pipe_r is not None: os.close(self._pipe_r) self._pipe_r = None os.close(self._pipe_w) self._pipe_w = None def refresh(self): with self._lock: if self._inotify_fd is None: return try: if self._worktree is not None: tracked_dirs = set( os.path.dirname(os.path.join(self._worktree, path)) for path in gitcmds.tracked_files()) self._refresh_watches(tracked_dirs, self._worktree_wd_to_path_map, self._worktree_path_to_wd_map) git_dirs = set() git_dirs.add(self._git_dir) for dirpath, dirnames, filenames in core.walk( os.path.join(self._git_dir, 'refs')): git_dirs.add(dirpath) self._refresh_watches(git_dirs, self._git_dir_wd_to_path_map, self._git_dir_path_to_wd_map) self._git_dir_wd = \ self._git_dir_path_to_wd_map[self._git_dir] except OSError as e: if e.errno == errno.ENOSPC: self._log_out_of_wds_message() self._running = False else: raise def _refresh_watches(self, paths_to_watch, wd_to_path_map, path_to_wd_map): watched_paths = set(path_to_wd_map) for path in watched_paths - paths_to_watch: wd = path_to_wd_map.pop(path) wd_to_path_map.pop(wd) try: inotify.rm_watch(self._inotify_fd, wd) except OSError as e: if e.errno == errno.EINVAL: # This error can occur if the target of the wd was # removed on the filesystem before we call # inotify.rm_watch() so ignore it. pass else: raise for path in paths_to_watch - watched_paths: try: wd = inotify.add_watch(self._inotify_fd, core.encode(path), self._ADD_MASK) except OSError as e: if e.errno in (errno.ENOENT, errno.ENOTDIR): # These two errors should only occur as a result of # race conditions: the first if the directory # referenced by path was removed or renamed before the # call to inotify.add_watch(); the second if the # directory referenced by path was replaced with a file # before the call to inotify.add_watch(). Therefore we # simply ignore them. pass else: raise else: wd_to_path_map[wd] = path path_to_wd_map[path] = wd def _check_event(self, wd, mask, name): if mask & inotify.IN_Q_OVERFLOW: self._force_notify = True elif not mask & self._TRIGGER_MASK: pass elif mask & inotify.IN_ISDIR: pass elif wd in self._worktree_wd_to_path_map: if self._use_check_ignore: self._file_paths.add( os.path.join(self._worktree_wd_to_path_map[wd], core.decode(name))) else: self._force_notify = True elif wd == self._git_dir_wd: name = core.decode(name) if name == 'HEAD' or name == 'index': self._force_notify = True elif (wd in self._git_dir_wd_to_path_map and not core.decode(name).endswith('.lock')): self._force_notify = True def _handle_events(self): for wd, mask, cookie, name in \ inotify.read_events(self._inotify_fd): if not self._force_notify: self._check_event(wd, mask, name) def stop(self): self._running = False with self._lock: if self._pipe_w is not None: os.write(self._pipe_w, bchr(0)) self.wait() if AVAILABLE == 'pywin32': class _Win32Watch(object): def __init__(self, path, flags): self.flags = flags self.handle = None self.event = None try: self.handle = win32file.CreateFileW( path, 0x0001, # FILE_LIST_DIRECTORY win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE, None, win32con.OPEN_EXISTING, win32con.FILE_FLAG_BACKUP_SEMANTICS | win32con.FILE_FLAG_OVERLAPPED, None) self.buffer = win32file.AllocateReadBuffer(8192) self.event = win32event.CreateEvent(None, True, False, None) self.overlapped = pywintypes.OVERLAPPED() self.overlapped.hEvent = self.event self._start() except: self.close() raise def _start(self): win32file.ReadDirectoryChangesW(self.handle, self.buffer, True, self.flags, self.overlapped) def read(self): if win32event.WaitForSingleObject(self.event, 0) \ == win32event.WAIT_TIMEOUT: result = [] else: nbytes = win32file.GetOverlappedResult(self.handle, self.overlapped, False) result = win32file.FILE_NOTIFY_INFORMATION(self.buffer, nbytes) self._start() return result def close(self): if self.handle is not None: win32file.CancelIo(self.handle) win32file.CloseHandle(self.handle) if self.event is not None: win32file.CloseHandle(self.event) class _Win32Thread(_BaseThread): _FLAGS = (win32con.FILE_NOTIFY_CHANGE_FILE_NAME | win32con.FILE_NOTIFY_CHANGE_DIR_NAME | win32con.FILE_NOTIFY_CHANGE_ATTRIBUTES | win32con.FILE_NOTIFY_CHANGE_SIZE | win32con.FILE_NOTIFY_CHANGE_LAST_WRITE | win32con.FILE_NOTIFY_CHANGE_SECURITY) def __init__(self, monitor): _BaseThread.__init__(self, monitor) worktree = git.worktree() if worktree is not None: worktree = self._transform_path(core.abspath(worktree)) self._worktree = worktree self._worktree_watch = None self._git_dir = self._transform_path(core.abspath(git.git_path())) self._git_dir_watch = None self._stop_event_lock = Lock() self._stop_event = None @staticmethod def _transform_path(path): return path.replace('\\', '/').lower() def _read_watch(self, watch): if win32event.WaitForSingleObject(watch.event, 0) \ == win32event.WAIT_TIMEOUT: nbytes = 0 else: nbytes = win32file.GetOverlappedResult(watch.handle, watch.overlapped, False) return win32file.FILE_NOTIFY_INFORMATION(watch.buffer, nbytes) def run(self): try: with self._stop_event_lock: self._stop_event = win32event.CreateEvent(None, True, False, None) events = [self._stop_event] if self._worktree is not None: self._worktree_watch = _Win32Watch(self._worktree, self._FLAGS) events.append(self._worktree_watch.event) self._git_dir_watch = _Win32Watch(self._git_dir, self._FLAGS) events.append(self._git_dir_watch.event) self._log_enabled_message() while self._running: if self._pending: timeout = self._NOTIFICATION_DELAY else: timeout = win32event.INFINITE rc = win32event.WaitForMultipleObjects(events, False, timeout) if not self._running: break elif rc == win32event.WAIT_TIMEOUT: self.notify() else: self._handle_results() finally: with self._stop_event_lock: if self._stop_event is not None: win32file.CloseHandle(self._stop_event) self._stop_event = None if self._worktree_watch is not None: self._worktree_watch.close() if self._git_dir_watch is not None: self._git_dir_watch.close() def _handle_results(self): if self._worktree_watch is not None: for action, path in self._worktree_watch.read(): if not self._running: break if self._force_notify: continue path = self._worktree + '/' + self._transform_path(path) if (path != self._git_dir and not path.startswith(self._git_dir + '/') and not os.path.isdir(path) ): if self._use_check_ignore: self._file_paths.add(path) else: self._force_notify = True for action, path in self._git_dir_watch.read(): if not self._running: break if self._force_notify: continue path = self._transform_path(path) if path.endswith('.lock'): continue if (path == 'head' or path == 'index' or path.startswith('refs/') ): self._force_notify = True def stop(self): self._running = False with self._stop_event_lock: if self._stop_event is not None: win32event.SetEvent(self._stop_event) self.wait() @memoize def current(): return _create_instance() def _create_instance(): thread_class = None cfg = gitcfg.current() if not cfg.get('cola.inotify', True): msg = N_('File system change monitoring: disabled because' ' "cola.inotify" is false.\n') Interaction.log(msg) elif AVAILABLE == 'inotify': thread_class = _InotifyThread elif AVAILABLE == 'pywin32': thread_class = _Win32Thread else: if utils.is_win32(): msg = N_('File system change monitoring: disabled because pywin32' ' is not installed.\n') Interaction.log(msg) elif utils.is_linux(): msg = N_('File system change monitoring: disabled because libc' ' does not support the inotify system calls.\n') Interaction.log(msg) return _Monitor(thread_class)
gpl-2.0
2,013,281,631,736,924,400
36.836224
79
0.470795
false
4.600984
false
false
false
gqueiroz/scigws
src/server/apps/wcs/base.py
1
1330
from xml.etree import ElementTree from exception import WCSException from json import loads from psycopg2 import connect import abc class WCSBase(object): __metaclass__ = abc.ABCMeta geo_arrays = None def __init__(self, db_path="config/db.config.json", meta="config/geo_arrays.json"): super(WCSBase, self).__init__() self.ns_dict = self._initialize_namespaces() self.dom = self._create_dom() self._register_namespaces() try: with open(meta) as data: self.geo_arrays = loads(data.read()) with open(db_path) as data: self.config = loads(data.read()) except StandardError as e: raise WCSException(e) @classmethod def _initialize_namespaces(cls): return { "gml": "http://www.opengis.net/gml/3.2", "gmlcov": "http://www.opengis.net/gmlcov/1.0", "swe": "http://www.opengis.net/swe/2.0", "ows": "http://www.opengis.net/ows/2.0", "wcs": "http://www.opengis.net/wcs/2.0" } @abc.abstractmethod def _create_dom(self): """ :return: """"" def _register_namespaces(self): for namespace in self.ns_dict: ElementTree.register_namespace(namespace, self.ns_dict[namespace])
gpl-3.0
2,134,826,318,577,003,800
29.25
87
0.575188
false
3.663912
false
false
false
michaelaye/planet4
planet4/science/activity.py
1
3436
import os from pathlib import Path import matplotlib.pyplot as plt import numpy as np import pandas as pd from numpy import arccos, cos, sin, pi # from little_helpers.p4_tools import get_final_markings_counts from planet4 import io, markings, region_data meta_data_fn = Path.home() / 'Dropbox/SternchenAndMe/python_stuff/hirise_rdr_index.hdf' def arc_length(lat1, lon1, lat2, lon2): """calculate length of arc from coordinates of end points works for east longitudes and south latitdes (with +) """ phi1 = pi + lat1 phi2 = pi + lat2 AB = sin(phi1) * sin(phi2) + cos(-lon1 + lon2) * cos(phi1) * cos(phi2) arc = arccos(AB) return arc def spherical_excess(a, b, c): "spherical excess of the triangle." A = arccos((cos(a) - cos(b) * cos(c)) / sin(b) / sin(c)) B = arccos((cos(b) - cos(c) * cos(a)) / sin(c) / sin(a)) C = arccos((cos(c) - cos(a) * cos(b)) / sin(a) / sin(b)) E = A + B + C - pi return(E) def triangle_area(a, b, c): '''calculate area of a spherical triangle''' R_Mars = 3376.20000 # in km E = spherical_excess(a, b, c) Area = R_Mars**2 * E return Area def get_metadata(region, season=None): if season is not None: fname = f"{region}_season{season}_metadata.csv" df = pd.read_csv(io.analysis_folder() / fname) df = df.drop('path', axis=1) else: fglob = f"{region}_season*_metadata.csv" globpath = io.analysis_folder().glob(fglob) bucket = [] for fname in globpath: bucket.append(pd.read_csv(fname)) df = pd.concat(bucket, ignore_index=False) df = df.drop('path', axis=1) return df def get_final_markings_counts(root, img_name, cut=0.5): # writing in dictionary here b/c later I convert it to pd.DataFrame # for which a dictionary is a natural input format d = {} d['obsid'] = img_name blotch_fname = root / '{}_blotches.csv'.format(img_name) # was _latlon d['n_blotches'] = len(pd.read_csv(str(blotch_fname))) fan_fname = root / '{}_fans.csv'.format(img_name) # was '{}_fans_latlons.csv'.format(img_name) d['n_fans'] = len(pd.read_csv(str(fan_fname))) return d def scaling_blotch(row, BlAreaCut=False, catalog_name='p4_catalog'): obsid = row.obsid catalog_folder = io.analysis_folder() / catalog_name bl_file2open = catalog_folder / (obsid + '_blotches.csv') bc = markings.BlotchContainer.from_fname(bl_file2open) if BlAreaCut: all_bl_areas = np.array([obj.area for obj in bc.content]) min_bl_area = row.min_bl_area # TODO: Ask Anya about this never being used? nr_bl2subtract = len(all_bl_areas[all_bl_areas > min_bl_area]) else: all_bl_rad1 = np.array([obj.data.radius_1 for obj in bc.content]) # maybe make min of these? all_bl_rad2 = np.array([obj.data.radius_2 for obj in bc.content]) min_bl = row.min_bl_radius nr_bl_red = len(all_bl_rad1[all_bl_rad1 > min_bl]) return nr_bl_red def scaling_fan(row): obsid = row.obsid min_fan = row.min_fan_length catalog_folder = io.analysis_folder() / 'p4_catalog' fan_file2open = catalog_folder / (obsid + '_fans.csv') fc = markings.FanContainer.from_fname(fan_file2open) all_fan_length = np.array([obj.data.distance for obj in fc.content]) nr_fans_red = len(all_fan_length[all_fan_length > min_fan]) return nr_fans_red
isc
-8,606,421,379,204,186,000
33.019802
99
0.627474
false
2.899578
false
false
false
jleivaizq/dotfiles
vim/vim.symlink/bundle/vimoutliner/vimoutliner/scripts/otlreorder.py
2
8303
#!/usr/bin/python # otlreorder.py # Grep and reorder an outline for a regex and return the branch # with all the leaves. # # Copyright 2006 Noel Henson All rights reserved # # $Revision: 1.7 $ # $Date: 2008/09/17 21:34:25 $ # $Author: noel $ # $Source: /home/noel/active/otlreorder/RCS/otlreorder.py,v $ # $Locker: $ ########################################################################### # Basic function # # This program searches an outline file for a branch that contains # a line matching the regex argument. The parent headings (branches) # and the children (sub-branches and leaves) of the matching headings # are returned with the outline focused on the search term. # # Examples # # Using this outline: # # Pets # Indoor # Cats # Sophia # Hillary # Rats # Finley # Oliver # Dogs # Kirby # Outdoor # Dogs # Kirby # Hoover # Goats # Primrose # Joey # # a reorder for Sophia returns: # # Sophia # Indoor # Cats # # a reorder for Dogs returns: # # Dogs # Indoor # Kirby # Hoover # Outdoor # Kirby # Hoover # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ########################################################################### # include whatever mdules we need import sys from string import * from re import * ########################################################################### # global variables debug = 0 ignorecase = 0 pattern = "" patterns = [] completePatterns = [] inputfile = "" lines = [] ########################################################################### # function definitions# usage # # print debug statements # input: string # output: string printed to standard out def dprint(*vals): global debug if debug != 0: print vals # usage # print the simplest form of help # input: none # output: simple command usage is printed on the console def showUsage(): print print "Usage:" print "otlreorder.py [options] pattern [pattern...] [file]" print "Options" print " - use STDIN instead of file" print " -i Ignore case" print " --version Print version (RCS) information." print " --help Show help." print "[file...] is zero or more files to search. Wildcards are supported." print " if no file is specified, input is expected on stdin." print "output is on STDOUT" print # version # print the RCS version information # input: none # output: RSC version information is printed on the console def showVersion(): print print "RCS" print " $Revision: 1.7 $" print " $Date: 2008/09/17 21:34:25 $" print " $Author: noel $" print # getArgs # Check for input arguments and set the necessary switches # input: none # output: possible console output for help, switch variables may be set def getArgs(): global debug, pattern, inputfile, ignorecase usestdin = 0 if (len(sys.argv) == 1): showUsage() sys.exit()() else: for i in range(len(sys.argv)): if (i != 0): if (sys.argv[i] == "-d"): debug = 1 # test for debug flag elif (sys.argv[i] == "-"): usestdin = 1 # test for debug flag elif (sys.argv[i] == "-i"): ignorecase = 1 # test for debug flag elif (sys.argv[i] == "-?"): # test for help flag showUsage() # show the help sys.exit() # exit elif (sys.argv[i] == "--help"): showUsage() sys.exit() elif (sys.argv[i] == "--version"): showVersion() sys.exit() elif (sys.argv[i][0] == "-"): print "Error! Unknown option. Aborting" sys.exit() else: # get the input file name patterns.append(sys.argv[i]) if (usestdin == 0): inputfile = patterns.pop() # getLineLevel # get the level of the current line (count the number of tabs) # input: linein - a single line that may or may not have tabs at the beginning # output: returns a number 1 is the lowest def getLineLevel(linein): strstart = lstrip(linein) # find the start of text in line x = find(linein,strstart) # find the text index in the line n = count(linein,"\t",0,x) # count the tabs return(n+1) # return the count + 1 (for level) # processFile # split an outline file # input: file - the filehandle of the file we are splitting # output: output files def processFile(pattern): global lines, debug, ignorecase parents = [] parentprinted = [] parents.append("pattern") parentprinted.append(0) for i in range(10): parents.append("") parentprinted.append(0) matchlevel = 0 i = 1 while (i < len(lines)): line = lines[i] level = getLineLevel(line) line = "\t"+line parents[level] = line parentprinted[level] = 0 # if (ignorecase == 1): linesearch = search(pattern,lstrip(rstrip(line)),I) # else: linesearch = search(pattern,lstrip(rstrip(line))) # if (linesearch != None): if (pattern == lstrip(rstrip(line))): if parents[0] != lstrip(line): parents[0] = lstrip(line) parentprinted[0] = 0 matchlevel = level # print my ancestors for j in range(level): if (parentprinted[j] == 0): print parents[j][:-1] parentprinted[j] = 1 # print my decendents i = i + 1 if i >= len(lines): return line = lines[i] level = getLineLevel(line) while (i < len(lines)) and (getLineLevel(line) > matchlevel): if (i < len(lines)): line = lines[i] level = getLineLevel(line) if (level > matchlevel): print line[:-1] else: i = i - 1 i = i + 1 i = i - 1 i = i + 1 # getCompletePattern # search lines for pattern matches to generate a specific list of patterns to search for # input: pattern and lines # output: patterns updated with specific, complete patterns def getCompletePattern(pattern): global completePatterns, lines, debug, ignorecase for i in range(len(lines)): line = lines[i] if (ignorecase == 1): linepattern = search(pattern,lstrip(rstrip(line)),I) else: linepattern = search(pattern,lstrip(rstrip(line))) if (linepattern != None): completePatterns.append(lstrip(rstrip(line))) if debug != 0: print lstrip(rstrip(line)) # getCompletePatterns # search lines for pattern matches to generate a specific list of patterns to search for # input: pattern and lines # output: patterns updated with specific, complete patterns def getCompletePatterns(): global completePatterns, patterns, debug for i in range(len(patterns)): getCompletePattern(patterns[i]) if (debug!=0): print "patterns:" for i in range(len(patterns)): print patterns[i] # perform the equivalent of the sort | uniq completePatterns.sort() unionPatterns = set(completePatterns) unionPatterns.union(unionPatterns) completePatterns = list(unionPatterns) if (debug!=0): print "complete patterns:" for i in range(len(completePatterns)): print completePatterns[i] # main # split an outline # input: args and input file # output: output files def main(): global lines, inputfile, completePatterns, patterns, debug getArgs() if (len(inputfile) == 0): line = sys.stdin.readline() while (line != ""): lines.append(line) line = sys.stdin.readline() else: file = open(inputfile,"r") line = file.readline() while (line != ""): lines.append(line) line = file.readline() file.close() getCompletePatterns() for i in range(len(completePatterns)): processFile(completePatterns[i]) main()
mit
-434,278,877,089,062,400
26.145763
88
0.613393
false
3.330525
false
false
false
raphaeldore/analyzr
analyzr/utils/useful.py
1
2195
import sys from io import TextIOWrapper def pprint_table(table: list, header_labels: list, blank_line_after_header: bool = True, out: TextIOWrapper = sys.stdout): """Prints out a table of data, padded for alignment @param table: The table to print. A list of lists. @param header_labels: A list containing the headers of each columns. @param blank_line_after_header: Whether to add a blank line after the header or not. @param out: Output stream (file-like object) Each row must have the same number of columns. Adapted from the code available in the comments of this blog post: http://ginstrom.com/scribbles/2007/09/04/pretty-printing-a-table-in-python/ """ # We use this often. Why not cache it ;) len_nb_elements = len(header_labels) def get_max_width(column_index: int): """Get the maximum width of the given column index""" label_width = len(str(header_labels[column_index])) max_column_width = max([len(str(row1[column_index])) for row1 in table]) # If the label is longer than the largest columns, then the max width is the label return label_width if label_width > max_column_width else max_column_width col_paddings = [] for i in range(len_nb_elements): col_paddings.append(get_max_width(column_index=i)) def print_row(row: list): for i in range(len_nb_elements): col = str(row[i]).rjust(col_paddings[i] + 1) print(col, end=" |", file=out) # new line print(file=out) # display header print_row(header_labels) # display blank line if requested if blank_line_after_header: print_row([" "] * len_nb_elements) for data_row in table: print_row(data_row) return if __name__ == "__main__": # Temporary tests. Real tests will be added soon. header_labels = ["Name this is a long header label", "a", "Fruits"] data = [["John", "Carottes", "Pommes"], ["Bob", "Piments", "Fraises"], ["Elvis", "Patates", "Bananes"]] pprint_table(table=data, header_labels=header_labels, blank_line_after_header=True, out=sys.stdout)
mit
4,396,319,637,541,835,300
33.84127
107
0.637358
false
3.610197
false
false
false
silly-wacky-3-town-toon/SOURCE-COD
toontown/ai/DistributedGreenToonEffectMgr.py
1
1177
from direct.directnotify import DirectNotifyGlobal from direct.distributed import DistributedObject from direct.interval.IntervalGlobal import * from otp.speedchat import SpeedChatGlobals from toontown.toonbase import TTLocalizer class DistributedGreenToonEffectMgr(DistributedObject.DistributedObject): notify = DirectNotifyGlobal.directNotify.newCategory('DistributedGreenToonEffectMgr') def __init__(self, cr): DistributedObject.DistributedObject.__init__(self, cr) def phraseSaid(phraseId): greenPhrase = 30450 if phraseId == greenPhrase: self.addGreenToonEffect() self.accept(SpeedChatGlobals.SCStaticTextMsgEvent, phraseSaid) def announceGenerate(self): DistributedObject.DistributedObject.announceGenerate(self) def delete(self): self.ignore(SpeedChatGlobals.SCStaticTextMsgEvent) DistributedObject.DistributedObject.delete(self) def addGreenToonEffect(self): av = base.localAvatar self.sendUpdate('addGreenToonEffect', []) msgTrack = Sequence(Func(av.setSystemMessage, 0, TTLocalizer.GreenToonEffectMsg)) msgTrack.start()
apache-2.0
-3,224,361,193,588,967,400
36.967742
89
0.743415
false
4.28
false
false
false
beobal/cassandra-dtest
bootstrap_test.py
1
28305
import os import random import re import shutil import tempfile import threading import time import logging import signal from cassandra import ConsistencyLevel from cassandra.concurrent import execute_concurrent_with_args from ccmlib.node import NodeError import pytest from dtest import Tester, create_ks, create_cf from tools.assertions import (assert_almost_equal, assert_bootstrap_state, assert_not_running, assert_one, assert_stderr_clean) from tools.data import query_c1c2 from tools.intervention import InterruptBootstrap, KillOnBootstrap from tools.misc import new_node from tools.misc import generate_ssl_stores, retry_till_success since = pytest.mark.since logger = logging.getLogger(__name__) class TestBootstrap(Tester): @pytest.fixture(autouse=True) def fixture_add_additional_log_patterns(self, fixture_dtest_setup): fixture_dtest_setup.allow_log_errors = True fixture_dtest_setup.ignore_log_patterns = ( # This one occurs when trying to send the migration to a # node that hasn't started yet, and when it does, it gets # replayed and everything is fine. r'Can\'t send migration request: node.*is down', # ignore streaming error during bootstrap r'Exception encountered during startup', r'Streaming error occurred' ) def _base_bootstrap_test(self, bootstrap=None, bootstrap_from_version=None, enable_ssl=None): def default_bootstrap(cluster, token): node2 = new_node(cluster) node2.set_configuration_options(values={'initial_token': token}) node2.start(wait_for_binary_proto=True) return node2 if bootstrap is None: bootstrap = default_bootstrap cluster = self.cluster if enable_ssl: logger.debug("***using internode ssl***") generate_ssl_stores(self.fixture_dtest_setup.test_path) cluster.enable_internode_ssl(self.fixture_dtest_setup.test_path) tokens = cluster.balanced_tokens(2) cluster.set_configuration_options(values={'num_tokens': 1}) logger.debug("[node1, node2] tokens: %r" % (tokens,)) keys = 10000 # Create a single node cluster cluster.populate(1) node1 = cluster.nodelist()[0] if bootstrap_from_version: logger.debug("starting source node on version {}".format(bootstrap_from_version)) node1.set_install_dir(version=bootstrap_from_version) node1.set_configuration_options(values={'initial_token': tokens[0]}) cluster.start(wait_other_notice=True) session = self.patient_cql_connection(node1) create_ks(session, 'ks', 1) create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'}) # record the size before inserting any of our own data empty_size = node1.data_size() logger.debug("node1 empty size : %s" % float(empty_size)) insert_statement = session.prepare("INSERT INTO ks.cf (key, c1, c2) VALUES (?, 'value1', 'value2')") execute_concurrent_with_args(session, insert_statement, [['k%d' % k] for k in range(keys)]) node1.flush() node1.compact() initial_size = node1.data_size() logger.debug("node1 size before bootstrapping node2: %s" % float(initial_size)) # Reads inserted data all during the bootstrap process. We shouldn't # get any error query_c1c2(session, random.randint(0, keys - 1), ConsistencyLevel.ONE) session.shutdown() # Bootstrapping a new node in the current version node2 = bootstrap(cluster, tokens[1]) node2.compact() node1.cleanup() logger.debug("node1 size after cleanup: %s" % float(node1.data_size())) node1.compact() logger.debug("node1 size after compacting: %s" % float(node1.data_size())) logger.debug("node2 size after compacting: %s" % float(node2.data_size())) size1 = float(node1.data_size()) size2 = float(node2.data_size()) assert_almost_equal(size1, size2, error=0.3) assert_almost_equal(float(initial_size - empty_size), 2 * (size1 - float(empty_size))) assert_bootstrap_state(self, node2, 'COMPLETED') @pytest.mark.no_vnodes def test_simple_bootstrap_with_ssl(self): self._base_bootstrap_test(enable_ssl=True) @pytest.mark.no_vnodes def test_simple_bootstrap(self): self._base_bootstrap_test() @pytest.mark.no_vnodes def test_bootstrap_on_write_survey(self): def bootstrap_on_write_survey_and_join(cluster, token): node2 = new_node(cluster) node2.set_configuration_options(values={'initial_token': token}) node2.start(jvm_args=["-Dcassandra.write_survey=true"], wait_for_binary_proto=True) assert len(node2.grep_log('Startup complete, but write survey mode is active, not becoming an active ring member.')) assert_bootstrap_state(self, node2, 'IN_PROGRESS') node2.nodetool("join") assert len(node2.grep_log('Leaving write survey mode and joining ring at operator request')) return node2 self._base_bootstrap_test(bootstrap_on_write_survey_and_join) @since('3.10') @pytest.mark.no_vnodes def test_simple_bootstrap_small_keepalive_period(self): """ @jira_ticket CASSANDRA-11841 Test that bootstrap completes if it takes longer than streaming_socket_timeout_in_ms or 2*streaming_keep_alive_period_in_secs to receive a single sstable """ cluster = self.cluster yaml_opts = {'streaming_keep_alive_period_in_secs': 2} if cluster.version() < '4.0': yaml_opts['streaming_socket_timeout_in_ms'] = 1000 cluster.set_configuration_options(values=yaml_opts) # Create a single node cluster cluster.populate(1) node1 = cluster.nodelist()[0] logger.debug("Setting up byteman on {}".format(node1.name)) # set up byteman node1.byteman_port = '8100' node1.import_config_files() cluster.start(wait_other_notice=True) # Create more than one sstable larger than 1MB node1.stress(['write', 'n=1K', '-rate', 'threads=8', '-schema', 'compaction(strategy=SizeTieredCompactionStrategy, enabled=false)']) cluster.flush() logger.debug("Submitting byteman script to {} to".format(node1.name)) # Sleep longer than streaming_socket_timeout_in_ms to make sure the node will not be killed node1.byteman_submit(['./byteman/stream_5s_sleep.btm']) # Bootstraping a new node with very small streaming_socket_timeout_in_ms node2 = new_node(cluster) node2.start(wait_for_binary_proto=True) # Shouldn't fail due to streaming socket timeout timeout assert_bootstrap_state(self, node2, 'COMPLETED') for node in cluster.nodelist(): assert node.grep_log('Scheduling keep-alive task with 2s period.', filename='debug.log') assert node.grep_log('Sending keep-alive', filename='debug.log') assert node.grep_log('Received keep-alive', filename='debug.log') def test_simple_bootstrap_nodata(self): """ @jira_ticket CASSANDRA-11010 Test that bootstrap completes if streaming from nodes with no data """ cluster = self.cluster # Create a two-node cluster cluster.populate(2) cluster.start(wait_other_notice=True) # Bootstrapping a new node node3 = new_node(cluster) node3.start(wait_for_binary_proto=True, wait_other_notice=True) assert_bootstrap_state(self, node3, 'COMPLETED') def test_read_from_bootstrapped_node(self): """ Test bootstrapped node sees existing data @jira_ticket CASSANDRA-6648 """ cluster = self.cluster cluster.populate(3) cluster.start() node1 = cluster.nodes['node1'] node1.stress(['write', 'n=10K', 'no-warmup', '-rate', 'threads=8', '-schema', 'replication(factor=2)']) session = self.patient_cql_connection(node1) stress_table = 'keyspace1.standard1' original_rows = list(session.execute("SELECT * FROM %s" % (stress_table,))) node4 = new_node(cluster) node4.start(wait_for_binary_proto=True) session = self.patient_exclusive_cql_connection(node4) new_rows = list(session.execute("SELECT * FROM %s" % (stress_table,))) assert original_rows == new_rows def test_consistent_range_movement_true_with_replica_down_should_fail(self): self._bootstrap_test_with_replica_down(True) def test_consistent_range_movement_false_with_replica_down_should_succeed(self): self._bootstrap_test_with_replica_down(False) def test_consistent_range_movement_true_with_rf1_should_fail(self): self._bootstrap_test_with_replica_down(True, rf=1) def test_consistent_range_movement_false_with_rf1_should_succeed(self): self._bootstrap_test_with_replica_down(False, rf=1) def _bootstrap_test_with_replica_down(self, consistent_range_movement, rf=2): """ Test to check consistent bootstrap will not succeed when there are insufficient replicas @jira_ticket CASSANDRA-11848 """ cluster = self.cluster cluster.populate(2) node1, node2 = cluster.nodelist() node3_token = None # Make token assignment deterministic if not self.dtest_config.use_vnodes: cluster.set_configuration_options(values={'num_tokens': 1}) tokens = cluster.balanced_tokens(3) logger.debug("non-vnode tokens: %r" % (tokens,)) node1.set_configuration_options(values={'initial_token': tokens[0]}) node2.set_configuration_options(values={'initial_token': tokens[2]}) node3_token = tokens[1] # Add node 3 between node1 and node2 cluster.start() node1.stress(['write', 'n=10K', 'no-warmup', '-rate', 'threads=8', '-schema', 'replication(factor={})'.format(rf)]) # change system_auth keyspace to 2 (default is 1) to avoid # "Unable to find sufficient sources for streaming" warning if cluster.cassandra_version() >= '2.2.0': session = self.patient_cql_connection(node1) session.execute(""" ALTER KEYSPACE system_auth WITH replication = {'class':'SimpleStrategy', 'replication_factor':2}; """) # Stop node2, so node3 will not be able to perform consistent range movement node2.stop(wait_other_notice=True) successful_bootstrap_expected = not consistent_range_movement node3 = new_node(cluster, token=node3_token) node3.start(wait_for_binary_proto=successful_bootstrap_expected, wait_other_notice=successful_bootstrap_expected, jvm_args=["-Dcassandra.consistent.rangemovement={}".format(consistent_range_movement)]) if successful_bootstrap_expected: # with rf=1 and cassandra.consistent.rangemovement=false, missing sources are ignored if not consistent_range_movement and rf == 1: node3.watch_log_for("Unable to find sufficient sources for streaming range") assert node3.is_running() assert_bootstrap_state(self, node3, 'COMPLETED') else: if consistent_range_movement: node3.watch_log_for("A node required to move the data consistently is down") else: node3.watch_log_for("Unable to find sufficient sources for streaming range") assert_not_running(node3) @since('2.2') def test_resumable_bootstrap(self): """ Test resuming bootstrap after data streaming failure """ cluster = self.cluster cluster.populate(2) node1 = cluster.nodes['node1'] # set up byteman node1.byteman_port = '8100' node1.import_config_files() cluster.start(wait_other_notice=True) # kill stream to node3 in the middle of streaming to let it fail if cluster.version() < '4.0': node1.byteman_submit(['./byteman/pre4.0/stream_failure.btm']) else: node1.byteman_submit(['./byteman/4.0/stream_failure.btm']) node1.stress(['write', 'n=1K', 'no-warmup', 'cl=TWO', '-schema', 'replication(factor=2)', '-rate', 'threads=50']) cluster.flush() # start bootstrapping node3 and wait for streaming node3 = new_node(cluster) node3.start(wait_other_notice=False, wait_for_binary_proto=True) # wait for node3 ready to query node3.watch_log_for("Starting listening for CQL clients") mark = node3.mark_log() # check if node3 is still in bootstrap mode retry_till_success(assert_bootstrap_state, tester=self, node=node3, expected_bootstrap_state='IN_PROGRESS', timeout=120) # bring back node1 and invoke nodetool bootstrap to resume bootstrapping node3.nodetool('bootstrap resume') node3.watch_log_for("Resume complete", from_mark=mark) assert_bootstrap_state(self, node3, 'COMPLETED') # cleanup to guarantee each node will only have sstables of its ranges cluster.cleanup() logger.debug("Check data is present") # Let's check stream bootstrap completely transferred data stdout, stderr, _ = node3.stress(['read', 'n=1k', 'no-warmup', '-schema', 'replication(factor=2)', '-rate', 'threads=8']) if stdout is not None: assert "FAILURE" not in stdout @since('2.2') def test_bootstrap_with_reset_bootstrap_state(self): """Test bootstrap with resetting bootstrap progress""" cluster = self.cluster cluster.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1}) cluster.populate(2).start(wait_other_notice=True) node1 = cluster.nodes['node1'] node1.stress(['write', 'n=100K', '-schema', 'replication(factor=2)']) node1.flush() # kill node1 in the middle of streaming to let it fail t = InterruptBootstrap(node1) t.start() # start bootstrapping node3 and wait for streaming node3 = new_node(cluster) try: node3.start() except NodeError: pass # node doesn't start as expected t.join() node1.start() # restart node3 bootstrap with resetting bootstrap progress node3.stop(signal_event=signal.SIGKILL) mark = node3.mark_log() node3.start(jvm_args=["-Dcassandra.reset_bootstrap_progress=true"]) # check if we reset bootstrap state node3.watch_log_for("Resetting bootstrap progress to start fresh", from_mark=mark) # wait for node3 ready to query node3.wait_for_binary_interface(from_mark=mark) # check if 2nd bootstrap succeeded assert_bootstrap_state(self, node3, 'COMPLETED') def test_manual_bootstrap(self): """ Test adding a new node and bootstrapping it manually. No auto_bootstrap. This test also verify that all data are OK after the addition of the new node. @jira_ticket CASSANDRA-9022 """ cluster = self.cluster cluster.populate(2).start(wait_other_notice=True) (node1, node2) = cluster.nodelist() node1.stress(['write', 'n=1K', 'no-warmup', '-schema', 'replication(factor=2)', '-rate', 'threads=1', '-pop', 'dist=UNIFORM(1..1000)']) session = self.patient_exclusive_cql_connection(node2) stress_table = 'keyspace1.standard1' original_rows = list(session.execute("SELECT * FROM %s" % stress_table)) # Add a new node node3 = new_node(cluster, bootstrap=False) node3.start(wait_for_binary_proto=True) node3.repair() node1.cleanup() current_rows = list(session.execute("SELECT * FROM %s" % stress_table)) assert original_rows == current_rows def test_local_quorum_bootstrap(self): """ Test that CL local_quorum works while a node is bootstrapping. @jira_ticket CASSANDRA-8058 """ cluster = self.cluster cluster.populate([1, 1]) cluster.start() node1 = cluster.nodes['node1'] yaml_config = """ # Create the keyspace and table keyspace: keyspace1 keyspace_definition: | CREATE KEYSPACE keyspace1 WITH replication = {'class': 'NetworkTopologyStrategy', 'dc1': 1, 'dc2': 1}; table: users table_definition: CREATE TABLE users ( username text, first_name text, last_name text, email text, PRIMARY KEY(username) ) WITH compaction = {'class':'SizeTieredCompactionStrategy'}; insert: partitions: fixed(1) batchtype: UNLOGGED queries: read: cql: select * from users where username = ? fields: samerow """ with tempfile.NamedTemporaryFile(mode='w+') as stress_config: stress_config.write(yaml_config) stress_config.flush() node1.stress(['user', 'profile=' + stress_config.name, 'n=2M', 'no-warmup', 'ops(insert=1)', '-rate', 'threads=50']) node3 = new_node(cluster, data_center='dc2') node3.start(no_wait=True) time.sleep(3) out, err, _ = node1.stress(['user', 'profile=' + stress_config.name, 'ops(insert=1)', 'n=500K', 'no-warmup', 'cl=LOCAL_QUORUM', '-rate', 'threads=5', '-errors', 'retries=2']) logger.debug(out) assert_stderr_clean(err) regex = re.compile("Operation.+error inserting key.+Exception") failure = regex.search(str(out)) assert failure is None, "Error during stress while bootstrapping" def test_shutdown_wiped_node_cannot_join(self): self._wiped_node_cannot_join_test(gently=True) def test_killed_wiped_node_cannot_join(self): self._wiped_node_cannot_join_test(gently=False) def _wiped_node_cannot_join_test(self, gently): """ @jira_ticket CASSANDRA-9765 Test that if we stop a node and wipe its data then the node cannot join when it is not a seed. Test both a nice shutdown or a forced shutdown, via the gently parameter. """ cluster = self.cluster cluster.populate(3) cluster.start(wait_for_binary_proto=True) stress_table = 'keyspace1.standard1' # write some data node1 = cluster.nodelist()[0] node1.stress(['write', 'n=10K', 'no-warmup', '-rate', 'threads=8']) session = self.patient_cql_connection(node1) original_rows = list(session.execute("SELECT * FROM {}".format(stress_table,))) # Add a new node, bootstrap=True ensures that it is not a seed node4 = new_node(cluster, bootstrap=True) node4.start(wait_for_binary_proto=True) session = self.patient_cql_connection(node4) assert original_rows == list(session.execute("SELECT * FROM {}".format(stress_table,))) # Stop the new node and wipe its data node4.stop(gently=gently) self._cleanup(node4) # Now start it, it should not be allowed to join. mark = node4.mark_log() node4.start(no_wait=True, wait_other_notice=False) node4.watch_log_for("A node with address {} already exists, cancelling join".format(node4.address_for_current_version_slashy()), from_mark=mark) def test_decommissioned_wiped_node_can_join(self): """ @jira_ticket CASSANDRA-9765 Test that if we decommission a node and then wipe its data, it can join the cluster. """ cluster = self.cluster cluster.populate(3) cluster.start(wait_for_binary_proto=True) stress_table = 'keyspace1.standard1' # write some data node1 = cluster.nodelist()[0] node1.stress(['write', 'n=10K', 'no-warmup', '-rate', 'threads=8']) session = self.patient_cql_connection(node1) original_rows = list(session.execute("SELECT * FROM {}".format(stress_table,))) # Add a new node, bootstrap=True ensures that it is not a seed node4 = new_node(cluster, bootstrap=True) node4.start(wait_for_binary_proto=True, wait_other_notice=True) session = self.patient_cql_connection(node4) assert original_rows == list(session.execute("SELECT * FROM {}".format(stress_table,))) # Decommission the new node and wipe its data node4.decommission() node4.stop() self._cleanup(node4) # Now start it, it should be allowed to join mark = node4.mark_log() node4.start(wait_other_notice=True) node4.watch_log_for("JOINING:", from_mark=mark) def test_decommissioned_wiped_node_can_gossip_to_single_seed(self): """ @jira_ticket CASSANDRA-8072 @jira_ticket CASSANDRA-8422 Test that if we decommission a node, kill it and wipe its data, it can join a cluster with a single seed node. """ cluster = self.cluster cluster.populate(1) cluster.start(wait_for_binary_proto=True) node1 = cluster.nodelist()[0] # Add a new node, bootstrap=True ensures that it is not a seed node2 = new_node(cluster, bootstrap=True) node2.start(wait_for_binary_proto=True, wait_other_notice=True) session = self.patient_cql_connection(node1) if cluster.version() >= '2.2': # reduce system_distributed RF to 2 so we don't require forceful decommission session.execute("ALTER KEYSPACE system_distributed WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'1'};") session.execute("ALTER KEYSPACE system_traces WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'1'};") # Decommision the new node and kill it logger.debug("Decommissioning & stopping node2") node2.decommission() node2.stop(wait_other_notice=False) # Wipe its data for data_dir in node2.data_directories(): logger.debug("Deleting {}".format(data_dir)) shutil.rmtree(data_dir) commitlog_dir = os.path.join(node2.get_path(), 'commitlogs') logger.debug("Deleting {}".format(commitlog_dir)) shutil.rmtree(commitlog_dir) # Now start it, it should be allowed to join mark = node2.mark_log() logger.debug("Restarting wiped node2") node2.start(wait_other_notice=False) node2.watch_log_for("JOINING:", from_mark=mark) def test_failed_bootstrap_wiped_node_can_join(self): """ @jira_ticket CASSANDRA-9765 Test that if a node fails to bootstrap, it can join the cluster even if the data is wiped. """ cluster = self.cluster cluster.populate(1) cluster.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1}) cluster.start(wait_for_binary_proto=True) stress_table = 'keyspace1.standard1' # write some data, enough for the bootstrap to fail later on node1 = cluster.nodelist()[0] node1.stress(['write', 'n=100K', 'no-warmup', '-rate', 'threads=8']) node1.flush() session = self.patient_cql_connection(node1) original_rows = list(session.execute("SELECT * FROM {}".format(stress_table,))) # Add a new node, bootstrap=True ensures that it is not a seed node2 = new_node(cluster, bootstrap=True) # kill node2 in the middle of bootstrap t = KillOnBootstrap(node2) t.start() node2.start() t.join() assert not node2.is_running() # wipe any data for node2 self._cleanup(node2) # Now start it again, it should be allowed to join mark = node2.mark_log() node2.start(wait_other_notice=True) node2.watch_log_for("JOINING:", from_mark=mark) @since('2.1.1') def test_simultaneous_bootstrap(self): """ Attempt to bootstrap two nodes at once, to assert the second bootstrapped node fails, and does not interfere. Start a one node cluster and run a stress write workload. Start up a second node, and wait for the first node to detect it has joined the cluster. While the second node is bootstrapping, start a third node. This should fail. @jira_ticket CASSANDRA-7069 @jira_ticket CASSANDRA-9484 """ bootstrap_error = "Other bootstrapping/leaving/moving nodes detected," \ " cannot bootstrap while cassandra.consistent.rangemovement is true" cluster = self.cluster cluster.populate(1) cluster.start(wait_for_binary_proto=True) node1, = cluster.nodelist() node1.stress(['write', 'n=500K', 'no-warmup', '-schema', 'replication(factor=1)', '-rate', 'threads=10']) node2 = new_node(cluster) node2.start(wait_other_notice=True) node3 = new_node(cluster, remote_debug_port='2003') try: node3.start(wait_other_notice=False, verbose=False) except NodeError: pass # node doesn't start as expected time.sleep(.5) node2.watch_log_for("Starting listening for CQL clients") node3.watch_log_for(bootstrap_error) session = self.patient_exclusive_cql_connection(node2) # Repeat the select count(*) query, to help catch # bugs like 9484, where count(*) fails at higher # data loads. for _ in range(5): assert_one(session, "SELECT count(*) from keyspace1.standard1", [500000], cl=ConsistencyLevel.ONE) def test_cleanup(self): """ @jira_ticket CASSANDRA-11179 Make sure we remove processed files during cleanup """ cluster = self.cluster cluster.set_configuration_options(values={'concurrent_compactors': 4}) cluster.populate(1) cluster.start(wait_for_binary_proto=True) node1, = cluster.nodelist() for x in range(0, 5): node1.stress(['write', 'n=100k', 'no-warmup', '-schema', 'compaction(strategy=SizeTieredCompactionStrategy,enabled=false)', 'replication(factor=1)', '-rate', 'threads=10']) node1.flush() node2 = new_node(cluster) node2.start(wait_for_binary_proto=True, wait_other_notice=True) event = threading.Event() failed = threading.Event() jobs = 1 thread = threading.Thread(target=self._monitor_datadir, args=(node1, event, len(node1.get_sstables("keyspace1", "standard1")), jobs, failed)) thread.setDaemon(True) thread.start() node1.nodetool("cleanup -j {} keyspace1 standard1".format(jobs)) event.set() thread.join() assert not failed.is_set() def _monitor_datadir(self, node, event, basecount, jobs, failed): while True: sstables = [s for s in node.get_sstables("keyspace1", "standard1") if "tmplink" not in s] logger.debug("---") for sstable in sstables: logger.debug(sstable) if len(sstables) > basecount + jobs: logger.debug("Current count is {}, basecount was {}".format(len(sstables), basecount)) failed.set() return if event.is_set(): return time.sleep(.1) def _cleanup(self, node): commitlog_dir = os.path.join(node.get_path(), 'commitlogs') for data_dir in node.data_directories(): logger.debug("Deleting {}".format(data_dir)) shutil.rmtree(data_dir) shutil.rmtree(commitlog_dir)
apache-2.0
-5,510,982,167,964,210,000
39.148936
184
0.622999
false
3.840049
true
false
false
skosukhin/spack
var/spack/repos/builtin/packages/numactl/package.py
1
1681
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Numactl(AutotoolsPackage): """NUMA support for Linux""" homepage = "http://oss.sgi.com/projects/libnuma/" url = "https://github.com/numactl/numactl/archive/v2.0.11.tar.gz" version('2.0.11', 'b56d2367217cde390b4d8087e00773b8') depends_on('autoconf', type='build') depends_on('automake', type='build') depends_on('libtool', type='build') depends_on('m4', type='build')
lgpl-2.1
8,610,615,474,908,283,000
42.102564
78
0.663891
false
3.864368
false
false
false
JamesMcMahon/mockjson.py
mockjson.py
1
5486
#!/usr/bin/env python """mockjson.py: Library for mocking JSON objects from a template.""" __author__ = "James McMahon" __copyright__ = "Copyright 2012, James McMahon" __license__ = "MIT" try: import simplejson as json except ImportError: import json import random import re import string import sys from datetime import datetime, timedelta _male_first_name = """James John Robert Michael William David Richard Charles Joseph Thomas Christopher Daniel Paul Mark Donald George Kenneth Steven Edward Brian Ronald Anthony Kevin Jason Matthew Gary Timothy Jose Larry Jeffrey Frank Scott Eric""".split() _female_first_name = """Mary Patricia Linda Barbara Elizabeth Jennifer Maria Susan Margaret Dorothy Lisa Nancy Karen Betty Helen Sandra Donna Carol Ruth Sharon Michelle Laura Sarah Kimberly Deborah Jessica Shirley Cynthia Angela Melissa Brenda Amy Anna""".split() _last_name = """Smith Johnson Williams Brown Jones Miller Davis Garcia Rodriguez Wilson Martinez Anderson Taylor Thomas Hernandez Moore Martin Jackson Thompson White Lopez Lee Gonzalez Harris Clark Lewis Robinson Walker Perez Hall Young Allen""".split() _lorem = """lorem ipsum dolor sit amet consectetur adipisicing elit sed do eiusmod tempor incididunt ut labore et dolore magna aliqua Ut enim ad minim veniam quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur Excepteur sint occaecat cupidatat non proident sunt in culpa qui officia deserunt mollit anim id est laborum""".split() def _random_data(key): if not key in data: return key return data[key]() def _lorem_ipsum(): length = random.randrange(2, len(_lorem) / 2) return ' '.join(random.choice(_lorem) for _ in xrange(length)) def _random_date(): return datetime.today() - timedelta(days=random.randrange(6571, 27375)) # assigning regexes to variables removes regex cache lookup overhead _re_range = re.compile(r"\w+\|(\d+)-(\d+)") _re_strip_key = re.compile(r"\|(\d+-\d+|\+\d+)") _re_increments = re.compile(r"\w+\|\+(\d+)") _re_key = re.compile(r"(@[A-Z_0-9\(\),]+)") data = dict( NUMBER=lambda: random.choice(string.digits), LETTER_UPPER=lambda: random.choice(string.uppercase), LETTER_LOWER=lambda: random.choice(string.lowercase), MALE_FIRST_NAME=lambda: random.choice(_male_first_name), FEMALE_FIRST_NAME=lambda: random.choice(_female_first_name), LAST_NAME=lambda: random.choice(_last_name), EMAIL=lambda: (_random_data('LETTER_LOWER') + '.' + _random_data('LAST_NAME').lower() + '@' + _random_data('LAST_NAME').lower() + '.com'), LOREM=lambda: random.choice(_lorem), LOREM_IPSUM=_lorem_ipsum, DATE_YYYY=lambda: str(_random_date().year), DATE_MM=lambda: str(_random_date().month).zfill(2), DATE_DD=lambda: str(_random_date().day).zfill(2), TIME_HH=lambda: str(_random_date().hour).zfill(2), TIME_MM=lambda: str(_random_date().minute).zfill(2), TIME_SS=lambda: str(_random_date().second).zfill(2) ) def mock_object(template, increments={}, name=None): length = 0 if name: matches = _re_range.search(name) if matches: groups = matches.groups() length_min = int(groups[0]) length_max = int(groups[1]) length = random.randint(length_min, length_max) t_type = type(template) if t_type is dict: generated = {} for key, value in template.iteritems(): # handle increments inc_matches = _re_increments.search(key) if inc_matches and type(template[key]) is int: increment = int(inc_matches.groups()[0]) if key in increments: increments[key] += increment else: increments[key] = 0 stripped_key = _re_strip_key.sub('', key) generated[stripped_key] = mock_object(value, increments, key) return generated elif t_type is list: return [mock_object(template[0], increments) for _ in xrange(length)] elif t_type is int: if name in increments: return increments[name] else: return length if matches else template elif t_type is bool: # apparently getrandbits(1) is faster... return random.choice([True, False]) if matches else template # is this always just going to be unicode here? elif t_type is str or t_type is unicode: if template: length = length if length else 1 generated = ''.join(template for _ in xrange(length)) matches = _re_key.findall(generated) if matches: for key in matches: rd = _random_data(key.lstrip('@')) generated = generated.replace(key, rd, 1) return generated else: return (''.join(random.choice(string.letters) for i in xrange(length))) else: return template def mock_json(template): return json.dumps(mock_object(json_data), sort_keys=True, indent=4) if __name__ == '__main__': arg = sys.argv[1:][0] with open(arg) as f: json_data = json.load(f) print(mock_json(json_data))
mit
-6,069,231,226,135,371,000
35.573333
77
0.62942
false
3.36152
false
false
false
rgtjf/Semantic-Texual-Similarity-Toolkits
stst/data/data_utils.py
1
3705
# coding: utf8 from __future__ import print_function import codecs import json import os import pyprind from stst import utils from stst.data.sent_pair import SentPair def load_data(train_file): """ Return list of dataset given train_file and gs_file Value: [(sa:str, sb:str, score:float)] """ with codecs.open(train_file, 'r', encoding='utf8') as f: data = [] for idx, line in enumerate(f): line = line.strip().split('\t') score = 0. if len(line) == 3: score = float(line[2]) sa, sb = line[0], line[1] data.append((sa, sb, score)) return data def load_STS(train_file): with utils.create_read_file(train_file) as f: data = [] for line in f: line = line.strip().split('\t') score = float(line[4]) sa, sb = line[5], line[6] data.append((sa, sb, score)) return data def load_parse_data(train_file, parser=None, flag=False): """ Load data after Parse, like POS, NER, etc. Value: [ SentPair:class, ... ] Parameter: flag: False(Default), Load from file (resources....) True, Parse and Write to file, and then load from file """ ''' Pre-Define Write File ''' # parse_train_file = config.PARSE_DIR + '/' + \ # utils.FileManager.get_file(train_file) parse_train_file = train_file.replace('./data', './generate/parse') if flag or not os.path.isfile(parse_train_file): print(train_file) if parser is None: raise RuntimeError("parser should be init by ``nlp = stst.StanfordNLP('http://localhost:9000')``") ''' Parse Data ''' data = load_STS(train_file) print('*' * 50) print("Parse Data, train_file=%s, n_train=%d\n" % (train_file, len(data))) parse_data = [] process_bar = pyprind.ProgPercent(len(data)) for (sa, sb, score) in data: process_bar.update() parse_sa = parser.parse(sa) parse_sb = parser.parse(sb) parse_data.append((parse_sa, parse_sb, score)) ''' Write Data to File ''' with utils.create_write_file(parse_train_file) as f_parse: for parse_instance in parse_data: line = json.dumps(parse_instance) print(line, file=f_parse) ''' Load Data from File ''' print('*' * 50) parse_data = [] with utils.create_read_file(parse_train_file) as f: for line in f: parse_json = json.loads(line) sentpair_instance = SentPair(parse_json) parse_data.append(sentpair_instance) print("Load Data, train_file=%s, n_train=%d\n" % (train_file, len(parse_data))) return parse_data def load_sentences(file_list, type='lemma'): """ sentence_dict['file'][idx]['sa'] = idx sentence_dict['file'][idx]['sb'] = idx+1 """ sentence_tags = [] sentences = [] for file in file_list: # file is path file_name = file.split('/')[-1] parse_data = load_parse_data(file, None) for idx, train_instance in enumerate(parse_data): if type == 'lemma': sa, sb = train_instance.get_word(type='lemma', stopwords=False, lower=True) elif type == 'word' : sa, sb = train_instance.get_word(type='word') sa_tag = "%s_%d_sa" % (file_name, idx) sb_tag = "%s_%d_sb" % (file_name, idx) sentences.append(sa) sentence_tags.append(sa_tag) sentences.append(sb) sentence_tags.append(sb_tag) return sentences, sentence_tags
mit
263,318,713,653,373,730
29.628099
110
0.552497
false
3.528571
false
false
false
akarol/cfme_tests
cfme/tests/cli/test_appliance_console.py
1
17241
import pytest from collections import namedtuple from wait_for import wait_for from cfme.utils import os from cfme.utils.log_validator import LogValidator from cfme.utils.log import logger from cfme.utils.conf import hidden import tempfile import lxml.etree import yaml TimedCommand = namedtuple('TimedCommand', ['command', 'timeout']) LoginOption = namedtuple('LoginOption', ['name', 'option', 'index']) TZ = namedtuple('TimeZone', ['name', 'option']) tzs = [ TZ('Africa/Abidjan', ('1', '1')), TZ('America/Argentina/Buenos_Aires', ('2', '6', '1')), TZ('Antarctica/Casey', ('3', 'q', '1')), TZ('Arctic/Longyearbyen', ('4', 'q', '1')), TZ('Asia/Aden', ('5', '1')), TZ('Atlantic/Azores', ('6', 'q', '1')), TZ('Australia/Adelaide', ('7', 'q', '1')), TZ('Europe/Amsterdam', ('8', '1')), TZ('Indian/Antananarivo', ('9', 'q', '1')), TZ('Pacific/Apia', ('10', '1')), TZ('UTC', ('11',)) ] @pytest.mark.smoke def test_appliance_console(appliance): """'ap | tee /tmp/opt.txt)' saves stdout to file, 'ap' launch appliance_console.""" command_set = ('ap | tee -a /tmp/opt.txt', 'ap') appliance.appliance_console.run_commands(command_set) assert appliance.ssh_client.run_command("cat /tmp/opt.txt | grep '{} Virtual Appliance'" .format(appliance.product_name)) assert appliance.ssh_client.run_command("cat /tmp/opt.txt | grep '{} Database:'" .format(appliance.product_name)) assert appliance.ssh_client.run_command("cat /tmp/opt.txt | grep '{} Version:'" .format(appliance.product_name)) def test_appliance_console_set_hostname(appliance): """'ap' launch appliance_console, '' clear info screen, '1' loads network settings, '5' gives access to set hostname, 'hostname' sets new hostname.""" hostname = 'test.example.com' command_set = ('ap', '', '1', '5', hostname,) appliance.appliance_console.run_commands(command_set) def is_hostname_set(appliance): assert appliance.ssh_client.run_command("hostname -f | grep {hostname}" .format(hostname=hostname)) wait_for(is_hostname_set, func_args=[appliance]) return_code, output = appliance.ssh_client.run_command("hostname -f") assert output.strip() == hostname assert return_code == 0 @pytest.mark.parametrize('timezone', tzs, ids=[tz.name for tz in tzs]) def test_appliance_console_set_timezone(timezone, temp_appliance_preconfig_modscope): """'ap' launch appliance_console, '' clear info screen, '2' set timezone, 'opt' select region, 'timezone' selects zone, 'y' confirm slection, '' finish.""" command_set = ('ap', '', '2') + timezone[1] + ('y', '') temp_appliance_preconfig_modscope.appliance_console.run_commands(command_set) temp_appliance_preconfig_modscope.appliance_console.timezone_check(timezone) def test_appliance_console_internal_db(app_creds, unconfigured_appliance): """'ap' launch appliance_console, '' clear info screen, '5' setup db, '1' Creates v2_key, '1' selects internal db, 'y' continue, '1' use partition, 'n' don't create dedicated db, '0' db region number, 'pwd' db password, 'pwd' confirm db password + wait 360 secs and '' finish.""" pwd = app_creds['password'] command_set = ('ap', '', '5', '1', '1', 'y', '1', 'n', '0', pwd, TimedCommand(pwd, 360), '') unconfigured_appliance.appliance_console.run_commands(command_set) unconfigured_appliance.wait_for_evm_service() unconfigured_appliance.wait_for_web_ui() def test_appliance_console_internal_db_reset(temp_appliance_preconfig_funcscope): """'ap' launch appliance_console, '' clear info screen, '5' setup db, '4' reset db, 'y' confirm db reset, '1' db region number + wait 360 secs, '' continue""" temp_appliance_preconfig_funcscope.ssh_client.run_command('systemctl stop evmserverd') command_set = ('ap', '', '5', '4', 'y', TimedCommand('1', 360), '') temp_appliance_preconfig_funcscope.appliance_console.run_commands(command_set) temp_appliance_preconfig_funcscope.ssh_client.run_command('systemctl start evmserverd') temp_appliance_preconfig_funcscope.wait_for_evm_service() temp_appliance_preconfig_funcscope.wait_for_web_ui() def test_appliance_console_dedicated_db(unconfigured_appliance, app_creds): """'ap' launch appliance_console, '' clear info screen, '5' setup db, '1' Creates v2_key, '1' selects internal db, 'y' continue, '1' use partition, 'y' create dedicated db, 'pwd' db password, 'pwd' confirm db password + wait 360 secs and '' finish.""" pwd = app_creds['password'] command_set = ('ap', '', '5', '1', '1', 'y', '1', 'y', pwd, TimedCommand(pwd, 360), '') unconfigured_appliance.appliance_console.run_commands(command_set) wait_for(lambda: unconfigured_appliance.db.is_dedicated_active) def test_appliance_console_ha_crud(unconfigured_appliances, app_creds): """Testing HA configuration with 3 appliances. Appliance one configuring dedicated database, 'ap' launch appliance_console, '' clear info screen, '5' setup db, '1' Creates v2_key, '1' selects internal db, '1' use partition, 'y' create dedicated db, 'pwd' db password, 'pwd' confirm db password + wait 360 secs and '' finish. Appliance two creating region in dedicated database, 'ap' launch appliance_console, '' clear info screen, '5' setup db, '2' fetch v2_key, 'app0_ip' appliance ip address, '' default user, 'pwd' appliance password, '' default v2_key location, '2' create region in external db, '0' db region number, 'y' confirm create region in external db 'app0_ip', '' ip and default port for dedicated db, '' use default db name, '' default username, 'pwd' db password, 'pwd' confirm db password + wait 360 seconds and '' finish. Appliance one configuring primary node for replication, 'ap' launch appliance_console, '' clear info screen, '6' configure db replication, '1' configure node as primary, '1' cluster node number set to 1, '' default dbname, '' default user, 'pwd' password, 'pwd' confirm password, 'app0_ip' primary appliance ip, confirm settings and wait 360 seconds to configure, '' finish. Appliance three configuring standby node for replication, 'ap' launch appliance_console, '' clear info screen, '6' configure db replication, '1' configure node as primary, '1' cluster node number set to 1, '' default dbname, '' default user, 'pwd' password, 'pwd' confirm password, 'app0_ip' primary appliance ip, confirm settings and wait 360 seconds to configure, '' finish. Appliance two configuring automatic failover of database nodes, 'ap' launch appliance_console, '' clear info screen '9' configure application database failover monitor, '1' start failover monitor. wait 30 seconds for service to start '' finish. Appliance one, stop APPLIANCE_PG_SERVICE and check that the standby node takes over correctly and evm starts up again pointing at the new primary database. """ apps = unconfigured_appliances app0_ip = apps[0].hostname app1_ip = apps[1].hostname pwd = app_creds['password'] # Configure first appliance as dedicated database command_set = ('ap', '', '5', '1', '1', '1', 'y', pwd, TimedCommand(pwd, 360), '') apps[0].appliance_console.run_commands(command_set) wait_for(lambda: apps[0].db.is_dedicated_active) # Configure EVM webui appliance with create region in dedicated database command_set = ('ap', '', '5', '2', app0_ip, '', pwd, '', '2', '0', 'y', app0_ip, '', '', '', pwd, TimedCommand(pwd, 360), '') apps[2].appliance_console.run_commands(command_set) apps[2].wait_for_evm_service() apps[2].wait_for_web_ui() # Configure primary replication node command_set = ('ap', '', '6', '1', '1', '', '', pwd, pwd, app0_ip, 'y', TimedCommand('y', 60), '') apps[0].appliance_console.run_commands(command_set) # Configure secondary replication node command_set = ('ap', '', '6', '2', '1', '2', '', '', pwd, pwd, app0_ip, app1_ip, 'y', TimedCommand('y', 60), '') apps[1].appliance_console.run_commands(command_set) # Configure automatic failover on EVM appliance command_set = ('ap', '', '9', TimedCommand('1', 30), '') apps[2].appliance_console.run_commands(command_set) def is_ha_monitor_started(appliance): return bool(appliance.ssh_client.run_command( "grep {} /var/www/miq/vmdb/config/failover_databases.yml".format(app1_ip)).success) wait_for(is_ha_monitor_started, func_args=[apps[2]], timeout=300, handle_exception=True) # Cause failover to occur rc, out = apps[0].ssh_client.run_command('systemctl stop $APPLIANCE_PG_SERVICE', timeout=15) assert rc == 0, "Failed to stop APPLIANCE_PG_SERVICE: {}".format(out) def is_failover_started(appliance): return bool(appliance.ssh_client.run_command( "grep 'Starting to execute failover' /var/www/miq/vmdb/log/ha_admin.log").success) wait_for(is_failover_started, func_args=[apps[2]], timeout=450, handle_exception=True) apps[2].wait_for_evm_service() apps[2].wait_for_web_ui() def test_appliance_console_external_db(temp_appliance_unconfig_funcscope, app_creds, appliance): """'ap' launch appliance_console, '' clear info screen, '5/8' setup db, '2' fetch v2_key, 'ip' address to fetch from, '' default username, 'pwd' db password, '' default v2_key location, '3' join external region, 'port' ip and port of joining region, '' use default db name, '' default username, 'pwd' db password, 'pwd' confirm db password + wait 360 secs and '' finish.""" ip = appliance.hostname pwd = app_creds['password'] command_set = ('ap', '', '5', '2', ip, '', pwd, '', '3', ip, '', '', '', pwd, TimedCommand(pwd, 360), '') temp_appliance_unconfig_funcscope.appliance_console.run_commands(command_set) temp_appliance_unconfig_funcscope.wait_for_evm_service() temp_appliance_unconfig_funcscope.wait_for_web_ui() def test_appliance_console_external_db_create( app_creds, dedicated_db_appliance, unconfigured_appliance_secondary): """'ap' launch appliance_console, '' clear info screen, '5' setup db, '1' create v2_key, '2' create region in external db, '0' db region number, 'y' confirm create region in external db 'ip', '' ip and port for dedicated db, '' use default db name, '' default username, 'pwd' db password, 'pwd' confirm db password + wait 360 secs and '' finish.""" ip = dedicated_db_appliance.hostname pwd = app_creds['password'] command_set = ('ap', '', '5', '1', '2', '0', 'y', ip, '', '', '', pwd, TimedCommand(pwd, 300), '') unconfigured_appliance_secondary.appliance_console.run_commands(command_set) unconfigured_appliance_secondary.wait_for_evm_service() unconfigured_appliance_secondary.wait_for_web_ui() def test_appliance_console_extend_storage(unconfigured_appliance): """'ap' launches appliance_console, '' clears info screen, '10' extend storage, '1' select disk, 'y' confirm configuration and '' complete.""" command_set = ('ap', '', '10', '1', 'y', '') unconfigured_appliance.appliance_console.run_commands(command_set) def is_storage_extended(): assert unconfigured_appliance.ssh_client.run_command("df -h | grep /var/www/miq_tmp") wait_for(is_storage_extended) @pytest.mark.uncollect('No IPA servers currently available') def test_appliance_console_ipa(ipa_creds, configured_appliance): """'ap' launches appliance_console, '' clears info screen, '11' setup IPA, 'y' confirm setup + wait 40 secs and '' finish.""" command_set = ('ap', '', '11', ipa_creds['hostname'], ipa_creds['domain'], '', ipa_creds['username'], ipa_creds['password'], TimedCommand('y', 40), '') configured_appliance.appliance_console.run_commands(command_set) def is_sssd_running(configured_appliance): assert configured_appliance.ssh_client.run_command("systemctl status sssd | grep running") wait_for(is_sssd_running, func_args=[configured_appliance]) return_code, output = configured_appliance.ssh_client.run_command( "cat /etc/ipa/default.conf | grep 'enable_ra = True'") assert return_code == 0 @pytest.mark.uncollect('No IPA servers currently available') @pytest.mark.parametrize('auth_type', [ LoginOption('sso', 'sso_enabled', '1'), LoginOption('saml', 'saml_enabled', '2'), LoginOption('local_login', 'local_login_disabled', '3') ], ids=['sso', 'saml', 'local_login']) def test_appliance_console_external_auth(auth_type, app_creds, ipa_crud): """'ap' launches appliance_console, '' clears info screen, '12' change ext auth options, 'auth_type' auth type to change, '4' apply changes.""" evm_tail = LogValidator('/var/www/miq/vmdb/log/evm.log', matched_patterns=['.*{} to true.*'.format(auth_type.option)], hostname=ipa_crud.hostname, username=app_creds['sshlogin'], password=app_creds['password']) evm_tail.fix_before_start() command_set = ('ap', '', '12', auth_type.index, '4') ipa_crud.appliance_console.run_commands(command_set) evm_tail.validate_logs() evm_tail = LogValidator('/var/www/miq/vmdb/log/evm.log', matched_patterns=['.*{} to false.*'.format(auth_type.option)], hostname=ipa_crud.hostname, username=app_creds['sshlogin'], password=app_creds['password']) evm_tail.fix_before_start() command_set = ('ap', '', '12', auth_type.index, '4') ipa_crud.appliance_console.run_commands(command_set) evm_tail.validate_logs() @pytest.mark.uncollect('No IPA servers currently available') def test_appliance_console_external_auth_all(app_creds, ipa_crud): """'ap' launches appliance_console, '' clears info screen, '12' change ext auth options, 'auth_type' auth type to change, '4' apply changes.""" evm_tail = LogValidator('/var/www/miq/vmdb/log/evm.log', matched_patterns=['.*sso_enabled to true.*', '.*saml_enabled to true.*', '.*local_login_disabled to true.*'], hostname=ipa_crud.hostname, username=app_creds['sshlogin'], password=app_creds['password']) evm_tail.fix_before_start() command_set = ('ap', '', '12', '1', '2', '3', '4') ipa_crud.appliance_console.run_commands(command_set) evm_tail.validate_logs() evm_tail = LogValidator('/var/www/miq/vmdb/log/evm.log', matched_patterns=['.*sso_enabled to false.*', '.*saml_enabled to false.*', '.*local_login_disabled to false.*'], hostname=ipa_crud.hostname, username=app_creds['sshlogin'], password=app_creds['password']) evm_tail.fix_before_start() command_set = ('ap', '', '12', '1', '2', '3', '4') ipa_crud.appliance_console.run_commands(command_set) evm_tail.validate_logs() def test_appliance_console_scap(temp_appliance_preconfig, soft_assert): """'ap' launches appliance_console, '' clears info screen, '14' Hardens appliance using SCAP configuration, '' complete.""" command_set = ('ap', '', '14', '') temp_appliance_preconfig.appliance_console.run_commands(command_set) with tempfile.NamedTemporaryFile('w') as f: f.write(hidden['scap.rb']) f.flush() os.fsync(f.fileno()) temp_appliance_preconfig.ssh_client.put_file( f.name, '/tmp/scap.rb') if temp_appliance_preconfig.version >= "5.8": rules = '/var/www/miq/vmdb/productization/appliance_console/config/scap_rules.yml' else: rules = '/var/www/miq/vmdb/gems/pending/appliance_console/config/scap_rules.yml' temp_appliance_preconfig.ssh_client.run_command('cd /tmp/ && ruby scap.rb ' '--rulesfile={rules}'.format(rules=rules)) temp_appliance_preconfig.ssh_client.get_file( '/tmp/scap-results.xccdf.xml', '/tmp/scap-results.xccdf.xml') temp_appliance_preconfig.ssh_client.get_file( '{rules}'.format(rules=rules), '/tmp/scap_rules.yml') # Get the scap rules with open('/tmp/scap_rules.yml') as f: yml = yaml.load(f.read()) rules = yml['rules'] tree = lxml.etree.parse('/tmp/scap-results.xccdf.xml') root = tree.getroot() for rule in rules: elements = root.findall( './/{{http://checklists.nist.gov/xccdf/1.1}}rule-result[@idref="{}"]'.format(rule)) if elements: result = elements[0].findall('./{http://checklists.nist.gov/xccdf/1.1}result') if result: soft_assert(result[0].text == 'pass') logger.info("{}: {}".format(rule, result[0].text)) else: logger.info("{}: no result".format(rule)) else: logger.info("{}: rule not found".format(rule))
gpl-2.0
-147,145,179,698,815,040
48.973913
100
0.640682
false
3.488669
true
false
false
ThiefMaster/indico-plugins
vc_vidyo/indico_vc_vidyo/plugin.py
1
14601
# This file is part of the Indico plugins. # Copyright (C) 2002 - 2021 CERN # # The Indico plugins are free software; you can redistribute # them and/or modify them under the terms of the MIT License; # see the LICENSE file for more details. from flask import session from sqlalchemy.orm.attributes import flag_modified from wtforms.fields import IntegerField, TextAreaField from wtforms.fields.html5 import EmailField, URLField from wtforms.fields.simple import StringField from wtforms.validators import DataRequired, NumberRange from indico.core import signals from indico.core.auth import multipass from indico.core.config import config from indico.core.plugins import IndicoPlugin, url_for_plugin from indico.modules.events.views import WPSimpleEventDisplay from indico.modules.vc import VCPluginMixin, VCPluginSettingsFormBase from indico.modules.vc.exceptions import VCRoomError, VCRoomNotFoundError from indico.modules.vc.views import WPVCEventPage, WPVCManageEvent from indico.web.forms.fields import IndicoPasswordField from indico.web.forms.widgets import CKEditorWidget from indico.web.http_api.hooks.base import HTTPAPIHook from indico_vc_vidyo import _ from indico_vc_vidyo.api import AdminClient, APIException, RoomNotFoundAPIException from indico_vc_vidyo.blueprint import blueprint from indico_vc_vidyo.cli import cli from indico_vc_vidyo.forms import VCRoomAttachForm, VCRoomForm from indico_vc_vidyo.http_api import DeleteVCRoomAPI from indico_vc_vidyo.models.vidyo_extensions import VidyoExtension from indico_vc_vidyo.util import iter_extensions, iter_user_identities, retrieve_principal, update_room_from_obj class PluginSettingsForm(VCPluginSettingsFormBase): support_email = EmailField(_('Vidyo email support')) username = StringField(_('Username'), [DataRequired()], description=_('Indico username for Vidyo')) password = IndicoPasswordField(_('Password'), [DataRequired()], toggle=True, description=_('Indico password for Vidyo')) admin_api_wsdl = URLField(_('Admin API WSDL URL'), [DataRequired()]) user_api_wsdl = URLField(_('User API WSDL URL'), [DataRequired()]) indico_room_prefix = IntegerField(_('Indico tenant prefix'), [NumberRange(min=0)], description=_('The tenant prefix for Indico rooms created on this server')) room_group_name = StringField(_("Public rooms' group name"), [DataRequired()], description=_('Group name for public videoconference rooms created by Indico')) authenticators = StringField(_('Authenticators'), [DataRequired()], description=_('Identity providers to convert Indico users to Vidyo accounts')) num_days_old = IntegerField(_('VC room age threshold'), [NumberRange(min=1), DataRequired()], description=_('Number of days after an Indico event when a videoconference room is ' 'considered old')) max_rooms_warning = IntegerField(_('Max. num. VC rooms before warning'), [NumberRange(min=1), DataRequired()], description=_('Maximum number of rooms until a warning is sent to the managers')) vidyo_phone_link = URLField(_('VidyoVoice phone number'), description=_('Link to the list of VidyoVoice phone numbers')) client_chooser_url = URLField(_('Client Chooser URL'), description=_("URL for client chooser interface. The room key will be passed as a " "'url' GET query argument")) creation_email_footer = TextAreaField(_('Creation email footer'), widget=CKEditorWidget(), description=_('Footer to append to emails sent upon creation of a VC room')) class VidyoPlugin(VCPluginMixin, IndicoPlugin): """Vidyo Videoconferencing with Vidyo """ configurable = True settings_form = PluginSettingsForm vc_room_form = VCRoomForm vc_room_attach_form = VCRoomAttachForm friendly_name = 'Vidyo' def init(self): super().init() self.connect(signals.plugin.cli, self._extend_indico_cli) self.inject_bundle('main.js', WPSimpleEventDisplay) self.inject_bundle('main.js', WPVCEventPage) self.inject_bundle('main.js', WPVCManageEvent) HTTPAPIHook.register(DeleteVCRoomAPI) @property def default_settings(self): return dict(VCPluginMixin.default_settings, **{ 'support_email': config.SUPPORT_EMAIL, 'username': 'indico', 'password': None, 'admin_api_wsdl': 'https://yourvidyoportal/services/v1_1/VidyoPortalAdminService?wsdl', 'user_api_wsdl': 'https://yourvidyoportal/services/v1_1/VidyoPortalUserService?wsdl', 'indico_room_prefix': 10, 'room_group_name': 'Indico', # we skip identity providers in the default list if they don't support get_identity. # these providers (local accounts, oauth) are unlikely be the correct ones to integrate # with the vidyo infrastructure. 'authenticators': ', '.join(p.name for p in multipass.identity_providers.values() if p.supports_get), 'num_days_old': 365, 'max_rooms_warning': 5000, 'vidyo_phone_link': None, 'creation_email_footer': None, 'client_chooser_url': None }) @property def logo_url(self): return url_for_plugin(self.name + '.static', filename='images/logo.png') @property def icon_url(self): return url_for_plugin(self.name + '.static', filename='images/vidyo_logo_notext.png') def _extend_indico_cli(self, sender, **kwargs): return cli def update_data_association(self, event, vc_room, event_vc_room, data): super().update_data_association(event, vc_room, event_vc_room, data) event_vc_room.data.update({key: data.pop(key) for key in [ 'show_pin', 'show_autojoin', 'show_phone_numbers' ]}) flag_modified(event_vc_room, 'data') def update_data_vc_room(self, vc_room, data, is_new=False): super().update_data_vc_room(vc_room, data, is_new=is_new) for key in ['description', 'owner', 'room_pin', 'moderation_pin', 'auto_mute']: if key in data: vc_room.data[key] = data.pop(key) flag_modified(vc_room, 'data') def create_room(self, vc_room, event): """Create a new Vidyo room for an event, given a VC room. In order to create the Vidyo room, the function will try to do so with all the available identities of the user based on the authenticators defined in Vidyo plugin's settings, in that order. :param vc_room: VCRoom -- The VC room from which to create the Vidyo room :param event: Event -- The event to the Vidyo room will be attached """ client = AdminClient(self.settings) owner = retrieve_principal(vc_room.data['owner']) login_gen = iter_user_identities(owner) login = next(login_gen, None) if login is None: raise VCRoomError(_("No valid Vidyo account found for this user"), field='owner_user') extension_gen = iter_extensions(self.settings.get('indico_room_prefix'), event.id) extension = next(extension_gen) while True: room_mode = { 'isLocked': False, 'hasPIN': bool(vc_room.data['room_pin']), 'hasModeratorPIN': bool(vc_room.data['moderation_pin']) } if room_mode['hasPIN']: room_mode['roomPIN'] = vc_room.data['room_pin'] if room_mode['hasModeratorPIN']: room_mode['moderatorPIN'] = vc_room.data['moderation_pin'] room_obj = client.create_room_object( name=vc_room.name, RoomType='Public', ownerName=login, extension=extension, groupName=self.settings.get('room_group_name'), description=vc_room.data['description'], RoomMode=room_mode) if room_obj.RoomMode.hasPIN: room_obj.RoomMode.roomPIN = vc_room.data['room_pin'] if room_obj.RoomMode.hasModeratorPIN: room_obj.RoomMode.moderatorPIN = vc_room.data['moderation_pin'] try: client.add_room(room_obj) except APIException as err: err_msg = err.message if err_msg.startswith('Room exist for name'): raise VCRoomError(_("Room name already in use"), field='name') elif err_msg.startswith('Member not found for ownerName'): login = next(login_gen, None) if login is None: raise VCRoomError(_("No valid Vidyo account found for this user"), field='owner_user') elif err_msg.startswith('Room exist for extension'): extension = next(extension_gen) else: raise else: # get room back, in order to fetch Vidyo-set parameters created_room = client.find_room(extension) if not created_room: raise VCRoomNotFoundError(_("Could not find newly created room in Vidyo")) vc_room.data.update({ 'vidyo_id': str(created_room.roomID), 'url': created_room.RoomMode.roomURL, 'owner_identity': created_room.ownerName }) flag_modified(vc_room, 'data') vc_room.vidyo_extension = VidyoExtension(vc_room_id=vc_room.id, extension=int(created_room.extension), owned_by_user=owner) client.set_automute(created_room.roomID, vc_room.data['auto_mute']) break def update_room(self, vc_room, event): client = AdminClient(self.settings) try: room_obj = self.get_room(vc_room) except RoomNotFoundAPIException: raise VCRoomNotFoundError(_("This room has been deleted from Vidyo")) owner = retrieve_principal(vc_room.data['owner']) changed_owner = room_obj.ownerName not in iter_user_identities(owner) if changed_owner: login_gen = iter_user_identities(owner) login = next(login_gen, None) if login is None: raise VCRoomError(_("No valid Vidyo account found for this user"), field='owner_user') room_obj.ownerName = login room_obj.name = vc_room.name room_obj.description = vc_room.data['description'] room_obj.RoomMode.hasPIN = bool(vc_room.data['room_pin']) room_obj.RoomMode.hasModeratorPIN = bool(vc_room.data['moderation_pin']) if room_obj.RoomMode.hasPIN: room_obj.RoomMode.roomPIN = vc_room.data['room_pin'] if room_obj.RoomMode.hasModeratorPIN: room_obj.RoomMode.moderatorPIN = vc_room.data['moderation_pin'] vidyo_id = vc_room.data['vidyo_id'] while True: try: client.update_room(vidyo_id, room_obj) except RoomNotFoundAPIException: raise VCRoomNotFoundError(_("This room has been deleted from Vidyo")) except APIException as err: err_msg = err.message if err_msg.startswith('Room exist for name'): raise VCRoomError(_("Room name already in use"), field='name') elif err_msg.startswith('Member not found for ownerName'): if changed_owner: login = next(login_gen, None) if not changed_owner or login is None: raise VCRoomError(_("No valid Vidyo account found for this user"), field='owner_user') room_obj.ownerName = login else: raise else: updated_room_obj = self.get_room(vc_room) update_room_from_obj(self.settings, vc_room, updated_room_obj) flag_modified(vc_room, 'data') client.set_automute(vidyo_id, vc_room.data['auto_mute']) break def refresh_room(self, vc_room, event): client = AdminClient(self.settings) try: room_obj = self.get_room(vc_room) except RoomNotFoundAPIException: raise VCRoomNotFoundError(_("This room has been deleted from Vidyo")) update_room_from_obj(self.settings, vc_room, room_obj) vc_room.data['auto_mute'] = client.get_automute(room_obj.roomID) flag_modified(vc_room, 'data') def delete_room(self, vc_room, event): client = AdminClient(self.settings) vidyo_id = vc_room.data['vidyo_id'] try: client.delete_room(vidyo_id) except RoomNotFoundAPIException: pass def get_room(self, vc_room): client = AdminClient(self.settings) return client.get_room(vc_room.data['vidyo_id']) def get_blueprints(self): return blueprint def get_vc_room_form_defaults(self, event): defaults = super().get_vc_room_form_defaults(event) defaults.update({ 'auto_mute': True, 'show_pin': False, 'show_autojoin': True, 'show_phone_numbers': True, 'owner_user': session.user }) return defaults def get_vc_room_attach_form_defaults(self, event): defaults = super().get_vc_room_attach_form_defaults(event) defaults.update({ 'show_pin': False, 'show_autojoin': True, 'show_phone_numbers': True }) return defaults def can_manage_vc_room(self, user, room): return user == room.vidyo_extension.owned_by_user or super().can_manage_vc_room(user, room) def _merge_users(self, target, source, **kwargs): super()._merge_users(target, source, **kwargs) for ext in VidyoExtension.query.filter_by(owned_by_user=source): ext.owned_by_user = target flag_modified(ext.vc_room, 'data') def get_notification_cc_list(self, action, vc_room, event): return {vc_room.vidyo_extension.owned_by_user.email}
mit
573,350,418,020,589,600
43.245455
118
0.607561
false
3.998083
false
false
false
nyrocron/eve-skills
skillcheck.py
1
3274
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. """skillcheck.py: """ from sys import argv import csv import eveapi_simple as api from evestatic import StaticDB sdb = StaticDB() skillset_name = argv[1] skillset = {} with open(skillset_name + '.csv', 'rb') as csvfile: skillsreader = csv.reader(csvfile, delimiter=',', quotechar='"') for row in skillsreader: skill_group, skill_name, required_level = row required_level = int(required_level) if not 1 <= required_level <= 5: continue skill_id = sdb.skill_id(skill_name) if skill_group not in skillset: skillset[skill_group] = [] skillset[skill_group].append((skill_name, skill_id, required_level)) api_args = {'keyID': argv[2], 'vCode': argv[3]} characters = [] key_info = api.query('/account/APIKeyInfo', api_args) for character in key_info.result.key.rowset.row: char_id = character.attrib['characterID'] char_name = character.attrib['characterName'] characters.append((char_id, char_name)) title = "Skillcheck - %s" % skillset_name print('<!DOCTYPE html>') print('<html>') print('<head>') print('<title>%s</title>' % title) print('<link rel="stylesheet" type="text/css" href="style.css" />') print('</head>') print('<body>') print('<h1>%s</h1>' % title) for character in characters: char_id, char_name = character api_args['characterID'] = char_id charsheet = api.query('/char/CharacterSheet', api_args) trained_skills = {} for skill in charsheet.xpath("result/rowset[@name='skills']/row"): skill_id = int(skill.attrib['typeID']) required_level = int(skill.attrib['level']) skill_name = sdb.skill_name(skill_id) trained_skills[skill_name] = required_level print('<h2>%s</h2>' % char_name) low_skill_counter = 0 for group in sorted(skillset.keys()): groupheader_printed = False for skill in sorted(skillset[group]): skill_name, skill_id, required_level = skill if skill_name in trained_skills: trained_level = trained_skills[skill_name] else: trained_level = 0 if trained_level < required_level: if not groupheader_printed: print('<h3>%s</h3>' % group) print('<table class="skills">') groupheader_printed = True print('<tr class="lowskill">') print('<td><a class="igblink" onclick="CCPEVE.showInfo(%s)">%s' '</a></td>' % (skill_id, skill_name)) print('<td><img style="background:url(gfx/level{1}_red.png)"' ' src="gfx/level{0}.png"' ' alt="Level {0}/{1}" /></td>'.format(trained_level, required_level)) print('</tr>') low_skill_counter += 1 if groupheader_printed: print('</table>') if low_skill_counter == 0: print('<span>Skill requirements met</span>') print('</body>') print('</html>')
mpl-2.0
-5,300,169,020,836,653,000
33.114583
79
0.577886
false
3.570338
false
false
false
Guts/isogeo-api-py-minsdk
isogeo_pysdk/api/routes_coordinate_systems.py
1
13730
# -*- coding: UTF-8 -*- #! python3 """ Isogeo API v1 - API Routes to retrieve CoordinateSystems See: http://help.isogeo.com/api/complete/index.html """ # ############################################################################# # ########## Libraries ############# # ################################## # Standard library import logging # 3rd party from requests.models import Response # submodules from isogeo_pysdk.checker import IsogeoChecker from isogeo_pysdk.decorators import ApiDecorators from isogeo_pysdk.models import CoordinateSystem, Metadata, Workgroup from isogeo_pysdk.utils import IsogeoUtils # ############################################################################# # ########## Global ############# # ################################## logger = logging.getLogger(__name__) checker = IsogeoChecker() utils = IsogeoUtils() # ############################################################################# # ########## Classes ############### # ################################## class ApiCoordinateSystem: """Routes as methods of Isogeo API used to manipulate coordinate-systems """ def __init__(self, api_client=None): if api_client is not None: self.api_client = api_client # store API client (Request [Oauthlib] Session) and pass it to the decorators self.api_client = api_client ApiDecorators.api_client = api_client # ensure platform and others params to request self.platform, self.api_url, self.app_url, self.csw_url, self.mng_url, self.oc_url, self.ssl = utils.set_base_url( self.api_client.platform ) # initialize super(ApiCoordinateSystem, self).__init__() @ApiDecorators._check_bearer_validity def listing(self, workgroup_id: str = None, caching: bool = 1) -> list: """Get coordinate-systems in the whole Isogeo database or into a specific workgroup. :param str workgroup_id: identifier of the owner workgroup. OPTIONNAL: if present, list SRS slected into the workgroup. :param bool caching: option to cache the response :rtype: list :Example: >>> # list all coordinate-systems in the whole Isogeo database >>> srs = isogeo.srs.listing() >>> print(len(srs)) 4301 >>> # list coordinate-systems which have been selected in a specific workgroup >>> srs = isogeo.srs.listing(workgroup_id=WORKGROUP_UUID) >>> print(len(srs)) 5 """ # check if workgroup or global if workgroup_id is None: # request URL url_coordinate_systems = utils.get_request_base_url( route="coordinate-systems" ) else: # check workgroup UUID if not checker.check_is_uuid(workgroup_id): raise ValueError( "Workgroup ID is not a correct UUID: {}".format(workgroup_id) ) # request URL url_coordinate_systems = utils.get_request_base_url( route="groups/{}/coordinate-systems".format(workgroup_id) ) # request req_coordinate_systems = self.api_client.get( url=url_coordinate_systems, headers=self.api_client.header, proxies=self.api_client.proxies, verify=self.api_client.ssl, timeout=self.api_client.timeout, ) # checking response req_check = checker.check_api_response(req_coordinate_systems) if isinstance(req_check, tuple): return req_check coordinate_systems = req_coordinate_systems.json() # if caching use or store the workgroup coordinate_systems if caching and workgroup_id is None: self.api_client._coordinate_systems = coordinate_systems elif caching: self.api_client._wg_coordinate_systems = coordinate_systems else: pass # end of method return coordinate_systems @ApiDecorators._check_bearer_validity def coordinate_system( self, coordinate_system_code: str, workgroup_id: str = None ) -> CoordinateSystem: """Get details about a specific coordinate_system, from the whole Isogeo database or into a specific workgroup (to get the SRS alias for example). :param str workgroup_id: identifier of the owner workgroup. OPTIONNAL: if present, list SRS slected into the workgroup. :param str coordinate_system_code: EPSG code of the coordinate system :rtype: CoordinateSystem :Example: >>> # list all coordinate-systems in the whole Isogeo database >>> srs = isogeo.srs.listing() >>> # print details about the first SRS found >>> pprint.pprint(isogeo.srs.coordinate_system(srs[0].get("code"))) { '_tag': 'coordinate-system:4143', 'code': 4143, 'name': 'Abidjan 1987' } """ # check if workgroup or global if workgroup_id is None: # request URL url_coordinate_system = utils.get_request_base_url( route="coordinate-systems/{}".format(coordinate_system_code) ) else: # check workgroup UUID if not checker.check_is_uuid(workgroup_id): raise ValueError( "Workgroup ID is not a correct UUID: {}".format(workgroup_id) ) # request URL url_coordinate_system = utils.get_request_base_url( route="groups/{}/coordinate-systems/{}".format( workgroup_id, coordinate_system_code ) ) # request req_coordinate_system = self.api_client.get( url=url_coordinate_system, headers=self.api_client.header, proxies=self.api_client.proxies, verify=self.api_client.ssl, timeout=self.api_client.timeout, ) # checking response req_check = checker.check_api_response(req_coordinate_system) if isinstance(req_check, tuple): return req_check # end of method return CoordinateSystem(**req_coordinate_system.json()) # -- Routes to manage the related objects ------------------------------------------ @ApiDecorators._check_bearer_validity def associate_metadata( self, metadata: Metadata, coordinate_system: CoordinateSystem ) -> Response: """Associate a coordinate-system (SRS) to a metadata. If a coordinate-system is already associated to the metadata, it'll be oversritten. :param Metadata metadata: metadata object to update :param CoordinateSystem coordinate_system: coordinate-system model object to associate :rtype: CoordinateSystem :Example: .. code-block:: python # retrieve metadata md = isogeo.metadata.get( metadata_id=METADATA_UUID, include=[] ) # retrieve one of the SRS selected in the workgroup of the metadata wg_srs = self.isogeo.coordinate_system.listing(md._creator.get("_id")) random_srs = CoordinateSystem(**sample(wg_srs, 1)[0]) # associate them isogeo.coordinateSystem.associate_metadata( metadata=md, coordinateSystem=random_srs, ) """ # check metadata UUID if not checker.check_is_uuid(metadata._id): raise ValueError( "Metadata ID is not a correct UUID: {}".format(metadata._id) ) else: pass # URL url_srs_association = utils.get_request_base_url( route="resources/{}/coordinate-system".format(metadata._id) ) # request req_srs_association = self.api_client.put( url=url_srs_association, json=coordinate_system.to_dict(), headers=self.api_client.header, proxies=self.api_client.proxies, verify=self.api_client.ssl, timeout=self.api_client.timeout, ) # checking response req_check = checker.check_api_response(req_srs_association) if isinstance(req_check, tuple): return req_check # end of method return CoordinateSystem(**req_srs_association.json()) @ApiDecorators._check_bearer_validity def dissociate_metadata(self, metadata: Metadata) -> Response: """Removes the coordinate-system from a metadata. :param Metadata metadata: metadata object to update """ # check metadata UUID if not checker.check_is_uuid(metadata._id): raise ValueError( "Metadata ID is not a correct UUID: {}".format(metadata._id) ) else: pass # URL url_coordinateSystem_dissociation = utils.get_request_base_url( route="resources/{}/coordinate-system".format(metadata._id) ) # request req_coordinateSystem_dissociation = self.api_client.delete( url=url_coordinateSystem_dissociation, headers=self.api_client.header, proxies=self.api_client.proxies, verify=self.api_client.ssl, timeout=self.api_client.timeout, ) # checking response req_check = checker.check_api_response(req_coordinateSystem_dissociation) if isinstance(req_check, tuple): return req_check # end of method return req_coordinateSystem_dissociation @ApiDecorators._check_bearer_validity def associate_workgroup( self, coordinate_system: CoordinateSystem, workgroup: Workgroup = None ) -> CoordinateSystem: """Add a coordinate system to the workgroup selection or/adn edit the SRS custom alias. :param CoordinateSystem coordinate_system: EPSG code of the coordinate system to add to the workgroup selection :param Workgroup workgroup: identifier of the owner workgroup. :rtype: CoordinateSystem :Example: >>> # retrieve the SRS >>> coordsys = isogeo.srs.coordinate_system("4326") >>> # add a custom alias >>> coordsys.alias = "World SRS" >>> # add it to the workgroup selection >>> isogeo.srs.associate_workgroup( workgroup=isogeo.workgroup.get(WORKGROUP_UUID), coordinate_system=coordsys ) """ # check workgroup UUID if not checker.check_is_uuid(workgroup._id): raise ValueError( "Workgroup ID is not a correct UUID: {}".format(workgroup._id) ) else: pass # request URL url_coordinate_system_association = utils.get_request_base_url( route="groups/{}/coordinate-systems/{}".format( workgroup._id, coordinate_system.code ) ) # request req_coordinate_system = self.api_client.put( url=url_coordinate_system_association, json={"alias": coordinate_system.alias}, headers=self.api_client.header, proxies=self.api_client.proxies, verify=self.api_client.ssl, timeout=self.api_client.timeout, ) # checking response req_check = checker.check_api_response(req_coordinate_system) if isinstance(req_check, tuple): return req_check # end of method return CoordinateSystem(**req_coordinate_system.json()) @ApiDecorators._check_bearer_validity def dissociate_workgroup( self, coordinate_system_code: str, workgroup_id: str = None ) -> CoordinateSystem: """Remove a coordinate system from the workgroup selection. :param str coordinate_system_code: EPSG code of the coordinate system to reomve from the workgroup selection :param str workgroup_id: identifier of the owner workgroup. :rtype: CoordinateSystem :Example: >>> isogeo.srs.dissociate_workgroup( workgroup_id=WORKGROUP_TEST_FIXTURE_UUID, coordinate_system_code="2154" ) """ # check workgroup UUID if not checker.check_is_uuid(workgroup_id): raise ValueError( "Workgroup ID is not a correct UUID: {}".format(workgroup_id) ) else: pass # request URL url_coordinate_system_dissociation = utils.get_request_base_url( route="groups/{}/coordinate-systems/{}".format( workgroup_id, coordinate_system_code ) ) # request req_coordinate_system_dissociation = self.api_client.delete( url=url_coordinate_system_dissociation, headers=self.api_client.header, proxies=self.api_client.proxies, verify=self.api_client.ssl, timeout=self.api_client.timeout, ) # checking response req_check = checker.check_api_response(req_coordinate_system_dissociation) if isinstance(req_check, tuple): return req_check # end of method return req_coordinate_system_dissociation # ############################################################################## # ##### Stand alone program ######## # ################################## if __name__ == "__main__": """ standalone execution """ api_coordinate_system = ApiCoordinateSystem()
gpl-3.0
-391,128,447,559,713,800
34.11509
127
0.572469
false
4.417632
false
false
false
redhat-cip/dci-control-server
dci/api/v1/tests.py
1
5672
# -*- coding: utf-8 -*- # # Copyright (C) 2015-2016 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import flask from flask import json from sqlalchemy import exc as sa_exc from sqlalchemy import sql from dci.api.v1 import api from dci.api.v1 import base from dci.api.v1 import remotecis from dci.api.v1 import utils as v1_utils from dci import decorators from dci.common import exceptions as dci_exc from dci.common.schemas import ( check_json_is_valid, clean_json_with_schema, create_test_schema, update_test_schema, check_and_get_args ) from dci.common import utils from dci.db import models _TABLE = models.TESTS # associate column names with the corresponding SA Column object _T_COLUMNS = v1_utils.get_columns_name_with_objects(_TABLE) @api.route('/tests', methods=['POST']) @decorators.login_required def create_tests(user): values = flask.request.json check_json_is_valid(create_test_schema, values) values.update(v1_utils.common_values_dict()) # todo: remove team_id if 'team_id' in values: del values['team_id'] query = _TABLE.insert().values(**values) try: flask.g.db_conn.execute(query) except sa_exc.IntegrityError: raise dci_exc.DCICreationConflict(_TABLE.name, 'name') return flask.Response( json.dumps({'test': values}), 201, content_type='application/json' ) @api.route('/tests/<uuid:t_id>', methods=['PUT']) @decorators.login_required def update_tests(user, t_id): v1_utils.verify_existence_and_get(t_id, _TABLE) if_match_etag = utils.check_and_get_etag(flask.request.headers) values = clean_json_with_schema(update_test_schema, flask.request.json) values['etag'] = utils.gen_etag() where_clause = sql.and_( _TABLE.c.etag == if_match_etag, _TABLE.c.id == t_id ) query = _TABLE.update().returning(*_TABLE.columns).\ where(where_clause).values(**values) result = flask.g.db_conn.execute(query) if not result.rowcount: raise dci_exc.DCIConflict('Test', t_id) return flask.Response( json.dumps({'test': result.fetchone()}), 200, headers={'ETag': values['etag']}, content_type='application/json' ) def get_tests_to_issues(topic_id): query = (sql.select([models.TESTS, models.ISSUES], use_labels=True) .select_from(models.TESTS.join( models.JOIN_ISSUES_TESTS).join(models.ISSUES)) .where(models.ISSUES.c.topic_id == topic_id)) tests_join_issues = flask.g.db_conn.execute(query).fetchall() tests_to_issues = {} for tji in tests_join_issues: test_name = tji['tests_name'] issue = {'id': str(tji['issues_id']), 'url': tji['issues_url']} if test_name not in tests_to_issues: tests_to_issues[test_name] = [issue] else: tests_to_issues[test_name].append(issue) return tests_to_issues def get_all_tests_by_team(user, team_id): # todo: remove team_id args = check_and_get_args(flask.request.args.to_dict()) query = v1_utils.QueryBuilder(_TABLE, args, _T_COLUMNS) query.add_extra_condition(_TABLE.c.state != 'archived') # get the number of rows for the '_meta' section nb_rows = query.get_number_of_rows() rows = query.execute(fetchall=True) rows = v1_utils.format_result(rows, _TABLE.name) return flask.jsonify({'tests': rows, '_meta': {'count': nb_rows}}) @api.route('/tests', methods=['GET']) @decorators.login_required def get_all_tests(user): return get_all_tests_by_team(user, None) @api.route('/tests/<uuid:t_id>', methods=['GET']) @decorators.login_required def get_test_by_id(user, t_id): test = v1_utils.verify_existence_and_get(t_id, _TABLE) res = flask.jsonify({'test': test}) return res @api.route('/tests/<uuid:t_id>/remotecis', methods=['GET']) @decorators.login_required def get_remotecis_by_test(user, test_id): test = v1_utils.verify_existence_and_get(test_id, _TABLE) return remotecis.get_all_remotecis(test['id']) @api.route('/tests/<uuid:t_id>', methods=['DELETE']) @decorators.login_required def delete_test_by_id(user, t_id): v1_utils.verify_existence_and_get(t_id, _TABLE) with flask.g.db_conn.begin(): values = {'state': 'archived'} where_clause = _TABLE.c.id == t_id query = _TABLE.update().where(where_clause).values(**values) result = flask.g.db_conn.execute(query) if not result.rowcount: raise dci_exc.DCIDeleteConflict('Test', t_id) for model in [models.FILES]: query = model.update().where(model.c.test_id == t_id).values( **values ) flask.g.db_conn.execute(query) return flask.Response(None, 204, content_type='application/json') @api.route('/tests/purge', methods=['GET']) @decorators.login_required def get_to_purge_archived_tests(user): return base.get_to_purge_archived_resources(user, _TABLE) @api.route('/tests/purge', methods=['POST']) @decorators.login_required def purge_archived_tests(user): return base.purge_archived_resources(user, _TABLE)
apache-2.0
7,328,636,679,937,318,000
30.337017
75
0.661671
false
3.274827
true
false
false
ruibarreira/linuxtrail
usr/lib/python2.7/dist-packages/numpy/core/tests/test_multiarray.py
1
145252
from __future__ import division, absolute_import, print_function import tempfile import sys import os import shutil import warnings import io from decimal import Decimal import numpy as np from nose import SkipTest from numpy.core import * from numpy.compat import asbytes, getexception, strchar, sixu from test_print import in_foreign_locale from numpy.core.multiarray_tests import ( test_neighborhood_iterator, test_neighborhood_iterator_oob, test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end, test_inplace_increment, get_buffer_info ) from numpy.testing import ( TestCase, run_module_suite, assert_, assert_raises, assert_equal, assert_almost_equal, assert_array_equal, assert_array_almost_equal, assert_allclose, assert_array_less, runstring, dec ) # Need to test an object that does not fully implement math interface from datetime import timedelta if sys.version_info[:2] > (3, 2): # In Python 3.3 the representation of empty shape, strides and suboffsets # is an empty tuple instead of None. # http://docs.python.org/dev/whatsnew/3.3.html#api-changes EMPTY = () else: EMPTY = None class TestFlags(TestCase): def setUp(self): self.a = arange(10) def test_writeable(self): mydict = locals() self.a.flags.writeable = False self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict) self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict) self.a.flags.writeable = True self.a[0] = 5 self.a[0] = 0 def test_otherflags(self): assert_equal(self.a.flags.carray, True) assert_equal(self.a.flags.farray, False) assert_equal(self.a.flags.behaved, True) assert_equal(self.a.flags.fnc, False) assert_equal(self.a.flags.forc, True) assert_equal(self.a.flags.owndata, True) assert_equal(self.a.flags.writeable, True) assert_equal(self.a.flags.aligned, True) assert_equal(self.a.flags.updateifcopy, False) class TestHash(TestCase): # see #3793 def test_int(self): for st, ut, s in [(np.int8, np.uint8, 8), (np.int16, np.uint16, 16), (np.int32, np.uint32, 32), (np.int64, np.uint64, 64)]: for i in range(1, s): assert_equal(hash(st(-2**i)), hash(-2**i), err_msg="%r: -2**%d" % (st, i)) assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)), err_msg="%r: 2**%d" % (st, i - 1)) assert_equal(hash(st(2**i - 1)), hash(2**i - 1), err_msg="%r: 2**%d - 1" % (st, i)) i = max(i - 1, 1) assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)), err_msg="%r: 2**%d" % (ut, i - 1)) assert_equal(hash(ut(2**i - 1)), hash(2**i - 1), err_msg="%r: 2**%d - 1" % (ut, i)) class TestAttributes(TestCase): def setUp(self): self.one = arange(10) self.two = arange(20).reshape(4, 5) self.three = arange(60, dtype=float64).reshape(2, 5, 6) def test_attributes(self): assert_equal(self.one.shape, (10,)) assert_equal(self.two.shape, (4, 5)) assert_equal(self.three.shape, (2, 5, 6)) self.three.shape = (10, 3, 2) assert_equal(self.three.shape, (10, 3, 2)) self.three.shape = (2, 5, 6) assert_equal(self.one.strides, (self.one.itemsize,)) num = self.two.itemsize assert_equal(self.two.strides, (5*num, num)) num = self.three.itemsize assert_equal(self.three.strides, (30*num, 6*num, num)) assert_equal(self.one.ndim, 1) assert_equal(self.two.ndim, 2) assert_equal(self.three.ndim, 3) num = self.two.itemsize assert_equal(self.two.size, 20) assert_equal(self.two.nbytes, 20*num) assert_equal(self.two.itemsize, self.two.dtype.itemsize) assert_equal(self.two.base, arange(20)) def test_dtypeattr(self): assert_equal(self.one.dtype, dtype(int_)) assert_equal(self.three.dtype, dtype(float_)) assert_equal(self.one.dtype.char, 'l') assert_equal(self.three.dtype.char, 'd') self.assertTrue(self.three.dtype.str[0] in '<>') assert_equal(self.one.dtype.str[1], 'i') assert_equal(self.three.dtype.str[1], 'f') def test_int_subclassing(self): # Regression test for https://github.com/numpy/numpy/pull/3526 numpy_int = np.int_(0) if sys.version_info[0] >= 3: # On Py3k int_ should not inherit from int, because it's not fixed-width anymore assert_equal(isinstance(numpy_int, int), False) else: # Otherwise, it should inherit from int... assert_equal(isinstance(numpy_int, int), True) # ... and fast-path checks on C-API level should also work from numpy.core.multiarray_tests import test_int_subclass assert_equal(test_int_subclass(numpy_int), True) def test_stridesattr(self): x = self.one def make_array(size, offset, strides): return ndarray(size, buffer=x, dtype=int, offset=offset*x.itemsize, strides=strides*x.itemsize) assert_equal(make_array(4, 4, -1), array([4, 3, 2, 1])) self.assertRaises(ValueError, make_array, 4, 4, -2) self.assertRaises(ValueError, make_array, 4, 2, -1) self.assertRaises(ValueError, make_array, 8, 3, 1) assert_equal(make_array(8, 3, 0), np.array([3]*8)) # Check behavior reported in gh-2503: self.assertRaises(ValueError, make_array, (2, 3), 5, array([-2, -3])) make_array(0, 0, 10) def test_set_stridesattr(self): x = self.one def make_array(size, offset, strides): try: r = ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize) except: raise RuntimeError(getexception()) r.strides = strides=strides*x.itemsize return r assert_equal(make_array(4, 4, -1), array([4, 3, 2, 1])) assert_equal(make_array(7, 3, 1), array([3, 4, 5, 6, 7, 8, 9])) self.assertRaises(ValueError, make_array, 4, 4, -2) self.assertRaises(ValueError, make_array, 4, 2, -1) self.assertRaises(RuntimeError, make_array, 8, 3, 1) # Check that the true extent of the array is used. # Test relies on as_strided base not exposing a buffer. x = np.lib.stride_tricks.as_strided(arange(1), (10, 10), (0, 0)) def set_strides(arr, strides): arr.strides = strides self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize)) # Test for offset calculations: x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1], shape=(10,), strides=(-1,)) self.assertRaises(ValueError, set_strides, x[::-1], -1) a = x[::-1] a.strides = 1 a[::2].strides = 2 def test_fill(self): for t in "?bhilqpBHILQPfdgFDGO": x = empty((3, 2, 1), t) y = empty((3, 2, 1), t) x.fill(1) y[...] = 1 assert_equal(x, y) def test_fill_struct_array(self): # Filling from a scalar x = array([(0, 0.0), (1, 1.0)], dtype='i4,f8') x.fill(x[0]) assert_equal(x['f1'][1], x['f1'][0]) # Filling from a tuple that can be converted # to a scalar x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')]) x.fill((3.5, -2)) assert_array_equal(x['a'], [3.5, 3.5]) assert_array_equal(x['b'], [-2, -2]) class TestAssignment(TestCase): def test_assignment_broadcasting(self): a = np.arange(6).reshape(2, 3) # Broadcasting the input to the output a[...] = np.arange(3) assert_equal(a, [[0, 1, 2], [0, 1, 2]]) a[...] = np.arange(2).reshape(2, 1) assert_equal(a, [[0, 0, 0], [1, 1, 1]]) # For compatibility with <= 1.5, a limited version of broadcasting # the output to the input. # # This behavior is inconsistent with NumPy broadcasting # in general, because it only uses one of the two broadcasting # rules (adding a new "1" dimension to the left of the shape), # applied to the output instead of an input. In NumPy 2.0, this kind # of broadcasting assignment will likely be disallowed. a[...] = np.arange(6)[::-1].reshape(1, 2, 3) assert_equal(a, [[5, 4, 3], [2, 1, 0]]) # The other type of broadcasting would require a reduction operation. def assign(a, b): a[...] = b assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3)) class TestDtypedescr(TestCase): def test_construction(self): d1 = dtype('i4') assert_equal(d1, dtype(int32)) d2 = dtype('f8') assert_equal(d2, dtype(float64)) class TestZeroRank(TestCase): def setUp(self): self.d = array(0), array('x', object) def test_ellipsis_subscript(self): a, b = self.d self.assertEqual(a[...], 0) self.assertEqual(b[...], 'x') self.assertTrue(a[...] is a) self.assertTrue(b[...] is b) def test_empty_subscript(self): a, b = self.d self.assertEqual(a[()], 0) self.assertEqual(b[()], 'x') self.assertTrue(type(a[()]) is a.dtype.type) self.assertTrue(type(b[()]) is str) def test_invalid_subscript(self): a, b = self.d self.assertRaises(IndexError, lambda x: x[0], a) self.assertRaises(IndexError, lambda x: x[0], b) self.assertRaises(IndexError, lambda x: x[array([], int)], a) self.assertRaises(IndexError, lambda x: x[array([], int)], b) def test_ellipsis_subscript_assignment(self): a, b = self.d a[...] = 42 self.assertEqual(a, 42) b[...] = '' self.assertEqual(b.item(), '') def test_empty_subscript_assignment(self): a, b = self.d a[()] = 42 self.assertEqual(a, 42) b[()] = '' self.assertEqual(b.item(), '') def test_invalid_subscript_assignment(self): a, b = self.d def assign(x, i, v): x[i] = v self.assertRaises(IndexError, assign, a, 0, 42) self.assertRaises(IndexError, assign, b, 0, '') self.assertRaises(ValueError, assign, a, (), '') def test_newaxis(self): a, b = self.d self.assertEqual(a[newaxis].shape, (1,)) self.assertEqual(a[..., newaxis].shape, (1,)) self.assertEqual(a[newaxis, ...].shape, (1,)) self.assertEqual(a[..., newaxis].shape, (1,)) self.assertEqual(a[newaxis, ..., newaxis].shape, (1, 1)) self.assertEqual(a[..., newaxis, newaxis].shape, (1, 1)) self.assertEqual(a[newaxis, newaxis, ...].shape, (1, 1)) self.assertEqual(a[(newaxis,)*10].shape, (1,)*10) def test_invalid_newaxis(self): a, b = self.d def subscript(x, i): x[i] self.assertRaises(IndexError, subscript, a, (newaxis, 0)) self.assertRaises(IndexError, subscript, a, (newaxis,)*50) def test_constructor(self): x = ndarray(()) x[()] = 5 self.assertEqual(x[()], 5) y = ndarray((), buffer=x) y[()] = 6 self.assertEqual(x[()], 6) def test_output(self): x = array(2) self.assertRaises(ValueError, add, x, [1], x) class TestScalarIndexing(TestCase): def setUp(self): self.d = array([0, 1])[0] def test_ellipsis_subscript(self): a = self.d self.assertEqual(a[...], 0) self.assertEqual(a[...].shape, ()) def test_empty_subscript(self): a = self.d self.assertEqual(a[()], 0) self.assertEqual(a[()].shape, ()) def test_invalid_subscript(self): a = self.d self.assertRaises(IndexError, lambda x: x[0], a) self.assertRaises(IndexError, lambda x: x[array([], int)], a) def test_invalid_subscript_assignment(self): a = self.d def assign(x, i, v): x[i] = v self.assertRaises(TypeError, assign, a, 0, 42) def test_newaxis(self): a = self.d self.assertEqual(a[newaxis].shape, (1,)) self.assertEqual(a[..., newaxis].shape, (1,)) self.assertEqual(a[newaxis, ...].shape, (1,)) self.assertEqual(a[..., newaxis].shape, (1,)) self.assertEqual(a[newaxis, ..., newaxis].shape, (1, 1)) self.assertEqual(a[..., newaxis, newaxis].shape, (1, 1)) self.assertEqual(a[newaxis, newaxis, ...].shape, (1, 1)) self.assertEqual(a[(newaxis,)*10].shape, (1,)*10) def test_invalid_newaxis(self): a = self.d def subscript(x, i): x[i] self.assertRaises(IndexError, subscript, a, (newaxis, 0)) self.assertRaises(IndexError, subscript, a, (newaxis,)*50) def test_overlapping_assignment(self): # With positive strides a = np.arange(4) a[:-1] = a[1:] assert_equal(a, [1, 2, 3, 3]) a = np.arange(4) a[1:] = a[:-1] assert_equal(a, [0, 0, 1, 2]) # With positive and negative strides a = np.arange(4) a[:] = a[::-1] assert_equal(a, [3, 2, 1, 0]) a = np.arange(6).reshape(2, 3) a[::-1,:] = a[:, ::-1] assert_equal(a, [[5, 4, 3], [2, 1, 0]]) a = np.arange(6).reshape(2, 3) a[::-1, ::-1] = a[:, ::-1] assert_equal(a, [[3, 4, 5], [0, 1, 2]]) # With just one element overlapping a = np.arange(5) a[:3] = a[2:] assert_equal(a, [2, 3, 4, 3, 4]) a = np.arange(5) a[2:] = a[:3] assert_equal(a, [0, 1, 0, 1, 2]) a = np.arange(5) a[2::-1] = a[2:] assert_equal(a, [4, 3, 2, 3, 4]) a = np.arange(5) a[2:] = a[2::-1] assert_equal(a, [0, 1, 2, 1, 0]) a = np.arange(5) a[2::-1] = a[:1:-1] assert_equal(a, [2, 3, 4, 3, 4]) a = np.arange(5) a[:1:-1] = a[2::-1] assert_equal(a, [0, 1, 0, 1, 2]) class TestCreation(TestCase): def test_from_attribute(self): class x(object): def __array__(self, dtype=None): pass self.assertRaises(ValueError, array, x()) def test_from_string(self) : types = np.typecodes['AllInteger'] + np.typecodes['Float'] nstr = ['123', '123'] result = array([123, 123], dtype=int) for type in types : msg = 'String conversion for %s' % type assert_equal(array(nstr, dtype=type), result, err_msg=msg) def test_void(self): arr = np.array([], dtype='V') assert_equal(arr.dtype.kind, 'V') def test_zeros(self): types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] for dt in types: d = np.zeros((13,), dtype=dt) assert_equal(np.count_nonzero(d), 0) # true for ieee floats assert_equal(d.sum(), 0) assert_(not d.any()) d = np.zeros(2, dtype='(2,4)i4') assert_equal(np.count_nonzero(d), 0) assert_equal(d.sum(), 0) assert_(not d.any()) d = np.zeros(2, dtype='4i4') assert_equal(np.count_nonzero(d), 0) assert_equal(d.sum(), 0) assert_(not d.any()) d = np.zeros(2, dtype='(2,4)i4, (2,4)i4') assert_equal(np.count_nonzero(d), 0) @dec.slow def test_zeros_big(self): # test big array as they might be allocated different by the sytem types = np.typecodes['AllInteger'] + np.typecodes['AllFloat'] for dt in types: d = np.zeros((30 * 1024**2,), dtype=dt) assert_(not d.any()) def test_zeros_obj(self): # test initialization from PyLong(0) d = np.zeros((13,), dtype=object) assert_array_equal(d, [0] * 13) assert_equal(np.count_nonzero(d), 0) def test_non_sequence_sequence(self): """Should not segfault. Class Fail breaks the sequence protocol for new style classes, i.e., those derived from object. Class Map is a mapping type indicated by raising a ValueError. At some point we may raise a warning instead of an error in the Fail case. """ class Fail(object): def __len__(self): return 1 def __getitem__(self, index): raise ValueError() class Map(object): def __len__(self): return 1 def __getitem__(self, index): raise KeyError() a = np.array([Map()]) assert_(a.shape == (1,)) assert_(a.dtype == np.dtype(object)) assert_raises(ValueError, np.array, [Fail()]) class TestStructured(TestCase): def test_subarray_field_access(self): a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))]) a['a'] = np.arange(60).reshape(3, 5, 2, 2) # Since the subarray is always in C-order, these aren't equal assert_(np.any(a['a'].T != a.T['a'])) # In Fortran order, the subarray gets appended # like in all other cases, not prepended as a special case b = a.copy(order='F') assert_equal(a['a'].shape, b['a'].shape) assert_equal(a.T['a'].shape, a.T.copy()['a'].shape) def test_subarray_comparison(self): # Check that comparisons between record arrays with # multi-dimensional field types work properly a = np.rec.fromrecords( [([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])], dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))]) b = a.copy() assert_equal(a==b, [True, True]) assert_equal(a!=b, [False, False]) b[1].b = 'c' assert_equal(a==b, [True, False]) assert_equal(a!=b, [False, True]) for i in range(3): b[0].a = a[0].a b[0].a[i] = 5 assert_equal(a==b, [False, False]) assert_equal(a!=b, [True, True]) for i in range(2): for j in range(2): b = a.copy() b[0].c[i, j] = 10 assert_equal(a==b, [False, True]) assert_equal(a!=b, [True, False]) # Check that broadcasting with a subarray works a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')]) b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')]) assert_equal(a==b, [[True, True, False], [False, False, True]]) assert_equal(b==a, [[True, True, False], [False, False, True]]) a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))]) b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))]) assert_equal(a==b, [[True, True, False], [False, False, True]]) assert_equal(b==a, [[True, True, False], [False, False, True]]) a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))]) b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))]) assert_equal(a==b, [[True, False, False], [False, False, True]]) assert_equal(b==a, [[True, False, False], [False, False, True]]) # Check that broadcasting Fortran-style arrays with a subarray work a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F') b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))]) assert_equal(a==b, [[True, False, False], [False, False, True]]) assert_equal(b==a, [[True, False, False], [False, False, True]]) # Check that incompatible sub-array shapes don't result to broadcasting x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')]) y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')]) assert_equal(x == y, False) x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')]) y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')]) assert_equal(x == y, False) class TestBool(TestCase): def test_test_interning(self): a0 = bool_(0) b0 = bool_(False) self.assertTrue(a0 is b0) a1 = bool_(1) b1 = bool_(True) self.assertTrue(a1 is b1) self.assertTrue(array([True])[0] is a1) self.assertTrue(array(True)[()] is a1) class TestMethods(TestCase): def test_test_round(self): assert_equal(array([1.2, 1.5]).round(), [1, 2]) assert_equal(array(1.5).round(), 2) assert_equal(array([12.2, 15.5]).round(-1), [10, 20]) assert_equal(array([12.15, 15.51]).round(1), [12.2, 15.5]) def test_transpose(self): a = array([[1, 2], [3, 4]]) assert_equal(a.transpose(), [[1, 3], [2, 4]]) self.assertRaises(ValueError, lambda: a.transpose(0)) self.assertRaises(ValueError, lambda: a.transpose(0, 0)) self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2)) def test_sort(self): # test ordering for floats and complex containing nans. It is only # necessary to check the lessthan comparison, so sorts that # only follow the insertion sort path are sufficient. We only # test doubles and complex doubles as the logic is the same. # check doubles msg = "Test real sort order with nans" a = np.array([np.nan, 1, 0]) b = sort(a) assert_equal(b, a[::-1], msg) # check complex msg = "Test complex sort order with nans" a = np.zeros(9, dtype=np.complex128) a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0] a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0] b = sort(a) assert_equal(b, a[::-1], msg) # all c scalar sorts use the same code with different types # so it suffices to run a quick check with one type. The number # of sorted items must be greater than ~50 to check the actual # algorithm because quick and merge sort fall over to insertion # sort for small arrays. a = np.arange(101) b = a[::-1].copy() for kind in ['q', 'm', 'h'] : msg = "scalar sort, kind=%s" % kind c = a.copy(); c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy(); c.sort(kind=kind) assert_equal(c, a, msg) # test complex sorts. These use the same code as the scalars # but the compare fuction differs. ai = a*1j + 1 bi = b*1j + 1 for kind in ['q', 'm', 'h'] : msg = "complex sort, real part == 1, kind=%s" % kind c = ai.copy(); c.sort(kind=kind) assert_equal(c, ai, msg) c = bi.copy(); c.sort(kind=kind) assert_equal(c, ai, msg) ai = a + 1j bi = b + 1j for kind in ['q', 'm', 'h'] : msg = "complex sort, imag part == 1, kind=%s" % kind c = ai.copy(); c.sort(kind=kind) assert_equal(c, ai, msg) c = bi.copy(); c.sort(kind=kind) assert_equal(c, ai, msg) # test string sorts. s = 'aaaaaaaa' a = np.array([s + chr(i) for i in range(101)]) b = a[::-1].copy() for kind in ['q', 'm', 'h'] : msg = "string sort, kind=%s" % kind c = a.copy(); c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy(); c.sort(kind=kind) assert_equal(c, a, msg) # test unicode sorts. s = 'aaaaaaaa' a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode) b = a[::-1].copy() for kind in ['q', 'm', 'h'] : msg = "unicode sort, kind=%s" % kind c = a.copy(); c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy(); c.sort(kind=kind) assert_equal(c, a, msg) # test object array sorts. a = np.empty((101,), dtype=np.object) a[:] = list(range(101)) b = a[::-1] for kind in ['q', 'h', 'm'] : msg = "object sort, kind=%s" % kind c = a.copy(); c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy(); c.sort(kind=kind) assert_equal(c, a, msg) # test record array sorts. dt = np.dtype([('f', float), ('i', int)]) a = array([(i, i) for i in range(101)], dtype = dt) b = a[::-1] for kind in ['q', 'h', 'm'] : msg = "object sort, kind=%s" % kind c = a.copy(); c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy(); c.sort(kind=kind) assert_equal(c, a, msg) # test datetime64 sorts. a = np.arange(0, 101, dtype='datetime64[D]') b = a[::-1] for kind in ['q', 'h', 'm'] : msg = "datetime64 sort, kind=%s" % kind c = a.copy(); c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy(); c.sort(kind=kind) assert_equal(c, a, msg) # test timedelta64 sorts. a = np.arange(0, 101, dtype='timedelta64[D]') b = a[::-1] for kind in ['q', 'h', 'm'] : msg = "timedelta64 sort, kind=%s" % kind c = a.copy(); c.sort(kind=kind) assert_equal(c, a, msg) c = b.copy(); c.sort(kind=kind) assert_equal(c, a, msg) # check axis handling. This should be the same for all type # specific sorts, so we only check it for one type and one kind a = np.array([[3, 2], [1, 0]]) b = np.array([[1, 0], [3, 2]]) c = np.array([[2, 3], [0, 1]]) d = a.copy() d.sort(axis=0) assert_equal(d, b, "test sort with axis=0") d = a.copy() d.sort(axis=1) assert_equal(d, c, "test sort with axis=1") d = a.copy() d.sort() assert_equal(d, c, "test sort with default axis") def test_copy(self): def assert_fortran(arr): assert_(arr.flags.fortran) assert_(arr.flags.f_contiguous) assert_(not arr.flags.c_contiguous) def assert_c(arr): assert_(not arr.flags.fortran) assert_(not arr.flags.f_contiguous) assert_(arr.flags.c_contiguous) a = np.empty((2, 2), order='F') # Test copying a Fortran array assert_c(a.copy()) assert_c(a.copy('C')) assert_fortran(a.copy('F')) assert_fortran(a.copy('A')) # Now test starting with a C array. a = np.empty((2, 2), order='C') assert_c(a.copy()) assert_c(a.copy('C')) assert_fortran(a.copy('F')) assert_c(a.copy('A')) def test_sort_order(self): # Test sorting an array with fields x1=np.array([21, 32, 14]) x2=np.array(['my', 'first', 'name']) x3=np.array([3.1, 4.5, 6.2]) r=np.rec.fromarrays([x1, x2, x3], names='id,word,number') r.sort(order=['id']) assert_equal(r.id, array([14, 21, 32])) assert_equal(r.word, array(['name', 'my', 'first'])) assert_equal(r.number, array([6.2, 3.1, 4.5])) r.sort(order=['word']) assert_equal(r.id, array([32, 21, 14])) assert_equal(r.word, array(['first', 'my', 'name'])) assert_equal(r.number, array([4.5, 3.1, 6.2])) r.sort(order=['number']) assert_equal(r.id, array([21, 32, 14])) assert_equal(r.word, array(['my', 'first', 'name'])) assert_equal(r.number, array([3.1, 4.5, 6.2])) if sys.byteorder == 'little': strtype = '>i2' else: strtype = '<i2' mydtype = [('name', strchar + '5'), ('col2', strtype)] r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)], dtype= mydtype) r.sort(order='col2') assert_equal(r['col2'], [1, 3, 255, 258]) assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)], dtype=mydtype)) def test_argsort(self): # all c scalar argsorts use the same code with different types # so it suffices to run a quick check with one type. The number # of sorted items must be greater than ~50 to check the actual # algorithm because quick and merge sort fall over to insertion # sort for small arrays. a = np.arange(101) b = a[::-1].copy() for kind in ['q', 'm', 'h'] : msg = "scalar argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), a, msg) assert_equal(b.copy().argsort(kind=kind), b, msg) # test complex argsorts. These use the same code as the scalars # but the compare fuction differs. ai = a*1j + 1 bi = b*1j + 1 for kind in ['q', 'm', 'h'] : msg = "complex argsort, kind=%s" % kind assert_equal(ai.copy().argsort(kind=kind), a, msg) assert_equal(bi.copy().argsort(kind=kind), b, msg) ai = a + 1j bi = b + 1j for kind in ['q', 'm', 'h'] : msg = "complex argsort, kind=%s" % kind assert_equal(ai.copy().argsort(kind=kind), a, msg) assert_equal(bi.copy().argsort(kind=kind), b, msg) # test string argsorts. s = 'aaaaaaaa' a = np.array([s + chr(i) for i in range(101)]) b = a[::-1].copy() r = np.arange(101) rr = r[::-1] for kind in ['q', 'm', 'h'] : msg = "string argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # test unicode argsorts. s = 'aaaaaaaa' a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode) b = a[::-1] r = np.arange(101) rr = r[::-1] for kind in ['q', 'm', 'h'] : msg = "unicode argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # test object array argsorts. a = np.empty((101,), dtype=np.object) a[:] = list(range(101)) b = a[::-1] r = np.arange(101) rr = r[::-1] for kind in ['q', 'm', 'h'] : msg = "object argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # test structured array argsorts. dt = np.dtype([('f', float), ('i', int)]) a = array([(i, i) for i in range(101)], dtype = dt) b = a[::-1] r = np.arange(101) rr = r[::-1] for kind in ['q', 'm', 'h'] : msg = "structured array argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # test datetime64 argsorts. a = np.arange(0, 101, dtype='datetime64[D]') b = a[::-1] r = np.arange(101) rr = r[::-1] for kind in ['q', 'h', 'm'] : msg = "datetime64 argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # test timedelta64 argsorts. a = np.arange(0, 101, dtype='timedelta64[D]') b = a[::-1] r = np.arange(101) rr = r[::-1] for kind in ['q', 'h', 'm'] : msg = "timedelta64 argsort, kind=%s" % kind assert_equal(a.copy().argsort(kind=kind), r, msg) assert_equal(b.copy().argsort(kind=kind), rr, msg) # check axis handling. This should be the same for all type # specific argsorts, so we only check it for one type and one kind a = np.array([[3, 2], [1, 0]]) b = np.array([[1, 1], [0, 0]]) c = np.array([[1, 0], [1, 0]]) assert_equal(a.copy().argsort(axis=0), b) assert_equal(a.copy().argsort(axis=1), c) assert_equal(a.copy().argsort(), c) # using None is known fail at this point #assert_equal(a.copy().argsort(axis=None, c) # check that stable argsorts are stable r = np.arange(100) # scalars a = np.zeros(100) assert_equal(a.argsort(kind='m'), r) # complex a = np.zeros(100, dtype=np.complex) assert_equal(a.argsort(kind='m'), r) # string a = np.array(['aaaaaaaaa' for i in range(100)]) assert_equal(a.argsort(kind='m'), r) # unicode a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode) assert_equal(a.argsort(kind='m'), r) def test_searchsorted(self): # test for floats and complex containing nans. The logic is the # same for all float types so only test double types for now. # The search sorted routines use the compare functions for the # array type, so this checks if that is consistent with the sort # order. # check double a = np.array([0, 1, np.nan]) msg = "Test real searchsorted with nans, side='l'" b = a.searchsorted(a, side='l') assert_equal(b, np.arange(3), msg) msg = "Test real searchsorted with nans, side='r'" b = a.searchsorted(a, side='r') assert_equal(b, np.arange(1, 4), msg) # check double complex a = np.zeros(9, dtype=np.complex128) a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan] a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan] msg = "Test complex searchsorted with nans, side='l'" b = a.searchsorted(a, side='l') assert_equal(b, np.arange(9), msg) msg = "Test complex searchsorted with nans, side='r'" b = a.searchsorted(a, side='r') assert_equal(b, np.arange(1, 10), msg) msg = "Test searchsorted with little endian, side='l'" a = np.array([0, 128], dtype='<i4') b = a.searchsorted(np.array(128, dtype='<i4')) assert_equal(b, 1, msg) msg = "Test searchsorted with big endian, side='l'" a = np.array([0, 128], dtype='>i4') b = a.searchsorted(np.array(128, dtype='>i4')) assert_equal(b, 1, msg) # Check 0 elements a = np.ones(0) b = a.searchsorted([0, 1, 2], 'l') assert_equal(b, [0, 0, 0]) b = a.searchsorted([0, 1, 2], 'r') assert_equal(b, [0, 0, 0]) a = np.ones(1) # Check 1 element b = a.searchsorted([0, 1, 2], 'l') assert_equal(b, [0, 0, 1]) b = a.searchsorted([0, 1, 2], 'r') assert_equal(b, [0, 1, 1]) # Check all elements equal a = np.ones(2) b = a.searchsorted([0, 1, 2], 'l') assert_equal(b, [0, 0, 2]) b = a.searchsorted([0, 1, 2], 'r') assert_equal(b, [0, 2, 2]) # Test searching unaligned array a = np.arange(10) aligned = np.empty(a.itemsize * a.size + 1, 'uint8') unaligned = aligned[1:].view(a.dtype) unaligned[:] = a # Test searching unaligned array b = unaligned.searchsorted(a, 'l') assert_equal(b, a) b = unaligned.searchsorted(a, 'r') assert_equal(b, a + 1) # Test searching for unaligned keys b = a.searchsorted(unaligned, 'l') assert_equal(b, a) b = a.searchsorted(unaligned, 'r') assert_equal(b, a + 1) def test_searchsorted_unicode(self): # Test searchsorted on unicode strings. # 1.6.1 contained a string length miscalculation in # arraytypes.c.src:UNICODE_compare() which manifested as # incorrect/inconsistent results from searchsorted. a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1', 'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'], dtype=np.unicode) ind = np.arange(len(a)) assert_equal([a.searchsorted(v, 'left') for v in a], ind) assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1) assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind) assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1) def test_searchsorted_with_sorter(self): a = np.array([5, 2, 1, 3, 4]) s = np.argsort(a) assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3))) assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1]) assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4]) assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6]) # bounds check assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5]) assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3]) assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3]) a = np.random.rand(300) s = a.argsort() b = np.sort(a) k = np.linspace(0, 1, 20) assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s)) a = np.array([0, 1, 2, 3, 5]*20) s = a.argsort() k = [0, 1, 2, 3, 5] expected = [0, 20, 40, 60, 80] assert_equal(a.searchsorted(k, side='l', sorter=s), expected) expected = [20, 40, 60, 80, 100] assert_equal(a.searchsorted(k, side='r', sorter=s), expected) # Test searching unaligned array keys = np.arange(10) a = keys.copy() np.random.shuffle(s) s = a.argsort() aligned = np.empty(a.itemsize * a.size + 1, 'uint8') unaligned = aligned[1:].view(a.dtype) # Test searching unaligned array unaligned[:] = a b = unaligned.searchsorted(keys, 'l', s) assert_equal(b, keys) b = unaligned.searchsorted(keys, 'r', s) assert_equal(b, keys + 1) # Test searching for unaligned keys unaligned[:] = keys b = a.searchsorted(unaligned, 'l', s) assert_equal(b, keys) b = a.searchsorted(unaligned, 'r', s) assert_equal(b, keys + 1) def test_partition(self): d = np.arange(10) assert_raises(TypeError, np.partition, d, 2, kind=1) assert_raises(ValueError, np.partition, d, 2, kind="nonsense") assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense") assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense") assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense") for k in ("introselect",): d = np.array([]) assert_array_equal(np.partition(d, 0, kind=k), d) assert_array_equal(np.argpartition(d, 0, kind=k), d) d = np.ones((1)) assert_array_equal(np.partition(d, 0, kind=k)[0], d) assert_array_equal(d[np.argpartition(d, 0, kind=k)], np.partition(d, 0, kind=k)) # kth not modified kth = np.array([30, 15, 5]) okth = kth.copy() np.partition(np.arange(40), kth) assert_array_equal(kth, okth) for r in ([2, 1], [1, 2], [1, 1]): d = np.array(r) tgt = np.sort(d) assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0]) assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1]) assert_array_equal(d[np.argpartition(d, 0, kind=k)], np.partition(d, 0, kind=k)) assert_array_equal(d[np.argpartition(d, 1, kind=k)], np.partition(d, 1, kind=k)) for i in range(d.size): d[i:].partition(0, kind=k) assert_array_equal(d, tgt) for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1], [1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]): d = np.array(r) tgt = np.sort(d) assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0]) assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1]) assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2]) assert_array_equal(d[np.argpartition(d, 0, kind=k)], np.partition(d, 0, kind=k)) assert_array_equal(d[np.argpartition(d, 1, kind=k)], np.partition(d, 1, kind=k)) assert_array_equal(d[np.argpartition(d, 2, kind=k)], np.partition(d, 2, kind=k)) for i in range(d.size): d[i:].partition(0, kind=k) assert_array_equal(d, tgt) d = np.ones((50)) assert_array_equal(np.partition(d, 0, kind=k), d) assert_array_equal(d[np.argpartition(d, 0, kind=k)], np.partition(d, 0, kind=k)) # sorted d = np.arange((49)) self.assertEqual(np.partition(d, 5, kind=k)[5], 5) self.assertEqual(np.partition(d, 15, kind=k)[15], 15) assert_array_equal(d[np.argpartition(d, 5, kind=k)], np.partition(d, 5, kind=k)) assert_array_equal(d[np.argpartition(d, 15, kind=k)], np.partition(d, 15, kind=k)) # rsorted d = np.arange((47))[::-1] self.assertEqual(np.partition(d, 6, kind=k)[6], 6) self.assertEqual(np.partition(d, 16, kind=k)[16], 16) assert_array_equal(d[np.argpartition(d, 6, kind=k)], np.partition(d, 6, kind=k)) assert_array_equal(d[np.argpartition(d, 16, kind=k)], np.partition(d, 16, kind=k)) assert_array_equal(np.partition(d, -6, kind=k), np.partition(d, 41, kind=k)) assert_array_equal(np.partition(d, -16, kind=k), np.partition(d, 31, kind=k)) assert_array_equal(d[np.argpartition(d, -6, kind=k)], np.partition(d, 41, kind=k)) # equal elements d = np.arange((47)) % 7 tgt = np.sort(np.arange((47)) % 7) np.random.shuffle(d) for i in range(d.size): self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i]) assert_array_equal(d[np.argpartition(d, 6, kind=k)], np.partition(d, 6, kind=k)) assert_array_equal(d[np.argpartition(d, 16, kind=k)], np.partition(d, 16, kind=k)) for i in range(d.size): d[i:].partition(0, kind=k) assert_array_equal(d, tgt) d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 9]) kth = [0, 3, 19, 20] assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7)) assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7)) d = np.array([2, 1]) d.partition(0, kind=k) assert_raises(ValueError, d.partition, 2) assert_raises(ValueError, d.partition, 3, axis=1) assert_raises(ValueError, np.partition, d, 2) assert_raises(ValueError, np.partition, d, 2, axis=1) assert_raises(ValueError, d.argpartition, 2) assert_raises(ValueError, d.argpartition, 3, axis=1) assert_raises(ValueError, np.argpartition, d, 2) assert_raises(ValueError, np.argpartition, d, 2, axis=1) d = np.arange(10).reshape((2, 5)) d.partition(1, axis=0, kind=k) d.partition(4, axis=1, kind=k) np.partition(d, 1, axis=0, kind=k) np.partition(d, 4, axis=1, kind=k) np.partition(d, 1, axis=None, kind=k) np.partition(d, 9, axis=None, kind=k) d.argpartition(1, axis=0, kind=k) d.argpartition(4, axis=1, kind=k) np.argpartition(d, 1, axis=0, kind=k) np.argpartition(d, 4, axis=1, kind=k) np.argpartition(d, 1, axis=None, kind=k) np.argpartition(d, 9, axis=None, kind=k) assert_raises(ValueError, d.partition, 2, axis=0) assert_raises(ValueError, d.partition, 11, axis=1) assert_raises(TypeError, d.partition, 2, axis=None) assert_raises(ValueError, np.partition, d, 9, axis=1) assert_raises(ValueError, np.partition, d, 11, axis=None) assert_raises(ValueError, d.argpartition, 2, axis=0) assert_raises(ValueError, d.argpartition, 11, axis=1) assert_raises(ValueError, np.argpartition, d, 9, axis=1) assert_raises(ValueError, np.argpartition, d, 11, axis=None) td = [(dt, s) for dt in [np.int32, np.float32, np.complex64] for s in (9, 16)] for dt, s in td: aae = assert_array_equal at = self.assertTrue d = np.arange(s, dtype=dt) np.random.shuffle(d) d1 = np.tile(np.arange(s, dtype=dt), (4, 1)) map(np.random.shuffle, d1) d0 = np.transpose(d1) for i in range(d.size): p = np.partition(d, i, kind=k) self.assertEqual(p[i], i) # all before are smaller assert_array_less(p[:i], p[i]) # all after are larger assert_array_less(p[i], p[i + 1:]) aae(p, d[np.argpartition(d, i, kind=k)]) p = np.partition(d1, i, axis=1, kind=k) aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt)) # array_less does not seem to work right at((p[:, :i].T <= p[:, i]).all(), msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T)) at((p[:, i + 1:].T > p[:, i]).all(), msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T)) aae(p, d1[np.arange(d1.shape[0])[:, None], np.argpartition(d1, i, axis=1, kind=k)]) p = np.partition(d0, i, axis=0, kind=k) aae(p[i,:], np.array([i] * d1.shape[0], dtype=dt)) # array_less does not seem to work right at((p[:i,:] <= p[i,:]).all(), msg="%d: %r <= %r" % (i, p[i,:], p[:i,:])) at((p[i + 1:,:] > p[i,:]).all(), msg="%d: %r < %r" % (i, p[i,:], p[:, i + 1:])) aae(p, d0[np.argpartition(d0, i, axis=0, kind=k), np.arange(d0.shape[1])[None,:]]) # check inplace dc = d.copy() dc.partition(i, kind=k) assert_equal(dc, np.partition(d, i, kind=k)) dc = d0.copy() dc.partition(i, axis=0, kind=k) assert_equal(dc, np.partition(d0, i, axis=0, kind=k)) dc = d1.copy() dc.partition(i, axis=1, kind=k) assert_equal(dc, np.partition(d1, i, axis=1, kind=k)) def assert_partitioned(self, d, kth): prev = 0 for k in np.sort(kth): assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k) assert_((d[k:] >= d[k]).all(), msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k])) prev = k + 1 def test_partition_iterative(self): d = np.arange(17) kth = (0, 1, 2, 429, 231) assert_raises(ValueError, d.partition, kth) assert_raises(ValueError, d.argpartition, kth) d = np.arange(10).reshape((2, 5)) assert_raises(ValueError, d.partition, kth, axis=0) assert_raises(ValueError, d.partition, kth, axis=1) assert_raises(ValueError, np.partition, d, kth, axis=1) assert_raises(ValueError, np.partition, d, kth, axis=None) d = np.array([3, 4, 2, 1]) p = np.partition(d, (0, 3)) self.assert_partitioned(p, (0, 3)) self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3)) assert_array_equal(p, np.partition(d, (-3, -1))) assert_array_equal(p, d[np.argpartition(d, (-3, -1))]) d = np.arange(17) np.random.shuffle(d) d.partition(range(d.size)) assert_array_equal(np.arange(17), d) np.random.shuffle(d) assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))]) # test unsorted kth d = np.arange(17) np.random.shuffle(d) keys = np.array([1, 3, 8, -2]) np.random.shuffle(d) p = np.partition(d, keys) self.assert_partitioned(p, keys) p = d[np.argpartition(d, keys)] self.assert_partitioned(p, keys) np.random.shuffle(keys) assert_array_equal(np.partition(d, keys), p) assert_array_equal(d[np.argpartition(d, keys)], p) # equal kth d = np.arange(20)[::-1] self.assert_partitioned(np.partition(d, [5]*4), [5]) self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]), [5]*4 + [6, 13]) self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5]) self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])], [5]*4 + [6, 13]) d = np.arange(12) np.random.shuffle(d) d1 = np.tile(np.arange(12), (4, 1)) map(np.random.shuffle, d1) d0 = np.transpose(d1) kth = (1, 6, 7, -1) p = np.partition(d1, kth, axis=1) pa = d1[np.arange(d1.shape[0])[:, None], d1.argpartition(kth, axis=1)] assert_array_equal(p, pa) for i in range(d1.shape[0]): self.assert_partitioned(p[i,:], kth) p = np.partition(d0, kth, axis=0) pa = d0[np.argpartition(d0, kth, axis=0), np.arange(d0.shape[1])[None,:]] assert_array_equal(p, pa) for i in range(d0.shape[1]): self.assert_partitioned(p[:, i], kth) def test_partition_cdtype(self): d = array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41), ('Lancelot', 1.9, 38)], dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')]) tgt = np.sort(d, order=['age', 'height']) assert_array_equal(np.partition(d, range(d.size), order=['age', 'height']), tgt) assert_array_equal(d[np.argpartition(d, range(d.size), order=['age', 'height'])], tgt) for k in range(d.size): assert_equal(np.partition(d, k, order=['age', 'height'])[k], tgt[k]) assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k], tgt[k]) d = array(['Galahad', 'Arthur', 'zebra', 'Lancelot']) tgt = np.sort(d) assert_array_equal(np.partition(d, range(d.size)), tgt) for k in range(d.size): assert_equal(np.partition(d, k)[k], tgt[k]) assert_equal(d[np.argpartition(d, k)][k], tgt[k]) def test_partition_fuzz(self): # a few rounds of random data testing for j in range(10, 30): for i in range(1, j - 2): d = np.arange(j) np.random.shuffle(d) d = d % np.random.randint(2, 30) idx = np.random.randint(d.size) kth = [0, idx, i, i + 1] tgt = np.sort(d)[kth] assert_array_equal(np.partition(d, kth)[kth], tgt, err_msg="data: %r\n kth: %r" % (d, kth)) def test_flatten(self): x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32) x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32) y0 = np.array([1, 2, 3, 4, 5, 6], np.int32) y0f = np.array([1, 4, 2, 5, 3, 6], np.int32) y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32) y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32) assert_equal(x0.flatten(), y0) assert_equal(x0.flatten('F'), y0f) assert_equal(x0.flatten('F'), x0.T.flatten()) assert_equal(x1.flatten(), y1) assert_equal(x1.flatten('F'), y1f) assert_equal(x1.flatten('F'), x1.T.flatten()) def test_dot(self): a = np.array([[1, 0], [0, 1]]) b = np.array([[0, 1], [1, 0]]) c = np.array([[9, 1], [1, -9]]) assert_equal(np.dot(a, b), a.dot(b)) assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c)) # test passing in an output array c = np.zeros_like(a) a.dot(b, c) assert_equal(c, np.dot(a, b)) # test keyword args c = np.zeros_like(a) a.dot(b=b, out=c) assert_equal(c, np.dot(a, b)) def test_diagonal(self): a = np.arange(12).reshape((3, 4)) assert_equal(a.diagonal(), [0, 5, 10]) assert_equal(a.diagonal(0), [0, 5, 10]) assert_equal(a.diagonal(1), [1, 6, 11]) assert_equal(a.diagonal(-1), [4, 9]) b = np.arange(8).reshape((2, 2, 2)) assert_equal(b.diagonal(), [[0, 6], [1, 7]]) assert_equal(b.diagonal(0), [[0, 6], [1, 7]]) assert_equal(b.diagonal(1), [[2], [3]]) assert_equal(b.diagonal(-1), [[4], [5]]) assert_raises(ValueError, b.diagonal, axis1=0, axis2=0) assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]]) assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]]) assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]]) # Order of axis argument doesn't matter: assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]]) def test_diagonal_deprecation(self): def collect_warning_types(f, *args, **kwargs): with warnings.catch_warnings(record=True) as log: warnings.simplefilter("always") f(*args, **kwargs) return [w.category for w in log] a = np.arange(9).reshape(3, 3) # All the different functions raise a warning, but not an error, and # 'a' is not modified: assert_equal(collect_warning_types(a.diagonal().__setitem__, 0, 10), [FutureWarning]) assert_equal(a, np.arange(9).reshape(3, 3)) assert_equal(collect_warning_types(np.diagonal(a).__setitem__, 0, 10), [FutureWarning]) assert_equal(a, np.arange(9).reshape(3, 3)) assert_equal(collect_warning_types(np.diag(a).__setitem__, 0, 10), [FutureWarning]) assert_equal(a, np.arange(9).reshape(3, 3)) # Views also warn d = np.diagonal(a) d_view = d.view() assert_equal(collect_warning_types(d_view.__setitem__, 0, 10), [FutureWarning]) # But the write goes through: assert_equal(d[0], 10) # Only one warning per call to diagonal, though (even if there are # multiple views involved): assert_equal(collect_warning_types(d.__setitem__, 0, 10), []) # Other ways of accessing the data also warn: # .data goes via the C buffer API, gives a read-write # buffer/memoryview. We don't warn until tp_getwritebuf is actually # called, which is not until the buffer is written to. have_memoryview = (hasattr(__builtins__, "memoryview") or "memoryview" in __builtins__) def get_data_and_write(getter): buf_or_memoryview = getter(a.diagonal()) if (have_memoryview and isinstance(buf_or_memoryview, memoryview)): buf_or_memoryview[0] = np.array(1) else: buf_or_memoryview[0] = "x" assert_equal(collect_warning_types(get_data_and_write, lambda d: d.data), [FutureWarning]) if hasattr(np, "getbuffer"): assert_equal(collect_warning_types(get_data_and_write, np.getbuffer), [FutureWarning]) # PEP 3118: if have_memoryview: assert_equal(collect_warning_types(get_data_and_write, memoryview), [FutureWarning]) # Void dtypes can give us a read-write buffer, but only in Python 2: import sys if sys.version_info[0] < 3: aV = np.empty((3, 3), dtype="V10") assert_equal(collect_warning_types(aV.diagonal().item, 0), [FutureWarning]) # XX it seems that direct indexing of a void object returns a void # scalar, which ignores not just WARN_ON_WRITE but even WRITEABLE. # i.e. in this: # a = np.empty(10, dtype="V10") # a.flags.writeable = False # buf = a[0].item() # 'buf' ends up as a writeable buffer. I guess no-one actually # uses void types like this though... # __array_interface also lets a data pointer get away from us log = collect_warning_types(getattr, a.diagonal(), "__array_interface__") assert_equal(log, [FutureWarning]) # ctypeslib goes via __array_interface__: try: # may not exist in python 2.4: import ctypes except ImportError: pass else: log = collect_warning_types(np.ctypeslib.as_ctypes, a.diagonal()) assert_equal(log, [FutureWarning]) # __array_struct__ log = collect_warning_types(getattr, a.diagonal(), "__array_struct__") assert_equal(log, [FutureWarning]) # Make sure that our recommendation to silence the warning by copying # the array actually works: diag_copy = a.diagonal().copy() assert_equal(collect_warning_types(diag_copy.__setitem__, 0, 10), []) # There might be people who get a spurious warning because they are # extracting a buffer, but then use that buffer in a read-only # fashion. And they might get cranky at having to create a superfluous # copy just to work around this spurious warning. A reasonable # solution would be for them to mark their usage as read-only, and # thus safe for both past and future PyArray_Diagonal # semantics. So let's make sure that setting the diagonal array to # non-writeable will suppress these warnings: ro_diag = a.diagonal() ro_diag.flags.writeable = False assert_equal(collect_warning_types(getattr, ro_diag, "data"), []) # __array_interface__ has no way to communicate read-onlyness -- # effectively all __array_interface__ arrays are assumed to be # writeable :-( # ro_diag = a.diagonal() # ro_diag.flags.writeable = False # assert_equal(collect_warning_types(getattr, ro_diag, # "__array_interface__"), []) if hasattr(__builtins__, "memoryview"): ro_diag = a.diagonal() ro_diag.flags.writeable = False assert_equal(collect_warning_types(memoryview, ro_diag), []) ro_diag = a.diagonal() ro_diag.flags.writeable = False assert_equal(collect_warning_types(getattr, ro_diag, "__array_struct__"), []) def test_diagonal_memleak(self): # Regression test for a bug that crept in at one point a = np.zeros((100, 100)) assert_(sys.getrefcount(a) < 50) for i in range(100): a.diagonal() assert_(sys.getrefcount(a) < 50) def test_ravel(self): a = np.array([[0, 1], [2, 3]]) assert_equal(a.ravel(), [0, 1, 2, 3]) assert_(not a.ravel().flags.owndata) assert_equal(a.ravel('F'), [0, 2, 1, 3]) assert_equal(a.ravel(order='C'), [0, 1, 2, 3]) assert_equal(a.ravel(order='F'), [0, 2, 1, 3]) assert_equal(a.ravel(order='A'), [0, 1, 2, 3]) assert_(not a.ravel(order='A').flags.owndata) assert_equal(a.ravel(order='K'), [0, 1, 2, 3]) assert_(not a.ravel(order='K').flags.owndata) assert_equal(a.ravel(), a.reshape(-1)) a = np.array([[0, 1], [2, 3]], order='F') assert_equal(a.ravel(), [0, 1, 2, 3]) assert_equal(a.ravel(order='A'), [0, 2, 1, 3]) assert_equal(a.ravel(order='K'), [0, 2, 1, 3]) assert_(not a.ravel(order='A').flags.owndata) assert_(not a.ravel(order='K').flags.owndata) assert_equal(a.ravel(), a.reshape(-1)) assert_equal(a.ravel(order='A'), a.reshape(-1, order='A')) a = np.array([[0, 1], [2, 3]])[::-1,:] assert_equal(a.ravel(), [2, 3, 0, 1]) assert_equal(a.ravel(order='C'), [2, 3, 0, 1]) assert_equal(a.ravel(order='F'), [2, 0, 3, 1]) assert_equal(a.ravel(order='A'), [2, 3, 0, 1]) # 'K' doesn't reverse the axes of negative strides assert_equal(a.ravel(order='K'), [2, 3, 0, 1]) assert_(a.ravel(order='K').flags.owndata) class TestSubscripting(TestCase): def test_test_zero_rank(self): x = array([1, 2, 3]) self.assertTrue(isinstance(x[0], np.int_)) if sys.version_info[0] < 3: self.assertTrue(isinstance(x[0], int)) self.assertTrue(type(x[0, ...]) is ndarray) class TestPickling(TestCase): def test_roundtrip(self): import pickle carray = array([[2, 9], [7, 0], [3, 8]]) DATA = [ carray, transpose(carray), array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int), ('c', float)]) ] for a in DATA: assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a) def _loads(self, obj): if sys.version_info[0] >= 3: return loads(obj, encoding='latin1') else: return loads(obj) # version 0 pickles, using protocol=2 to pickle # version 0 doesn't have a version field def test_version0_int8(self): s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.' a = array([1, 2, 3, 4], dtype=int8) p = self._loads(asbytes(s)) assert_equal(a, p) def test_version0_float32(self): s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.' a = array([1.0, 2.0, 3.0, 4.0], dtype=float32) p = self._loads(asbytes(s)) assert_equal(a, p) def test_version0_object(self): s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.' a = np.array([{'a':1}, {'b':2}]) p = self._loads(asbytes(s)) assert_equal(a, p) # version 1 pickles, using protocol=2 to pickle def test_version1_int8(self): s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.' a = array([1, 2, 3, 4], dtype=int8) p = self._loads(asbytes(s)) assert_equal(a, p) def test_version1_float32(self): s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.' a = array([1.0, 2.0, 3.0, 4.0], dtype=float32) p = self._loads(asbytes(s)) assert_equal(a, p) def test_version1_object(self): s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.' a = array([{'a':1}, {'b':2}]) p = self._loads(asbytes(s)) assert_equal(a, p) def test_subarray_int_shape(self): s = "cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb." a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)]) p = self._loads(asbytes(s)) assert_equal(a, p) class TestFancyIndexing(TestCase): def test_list(self): x = ones((1, 1)) x[:, [0]] = 2.0 assert_array_equal(x, array([[2.0]])) x = ones((1, 1, 1)) x[:,:, [0]] = 2.0 assert_array_equal(x, array([[[2.0]]])) def test_tuple(self): x = ones((1, 1)) x[:, (0,)] = 2.0 assert_array_equal(x, array([[2.0]])) x = ones((1, 1, 1)) x[:,:, (0,)] = 2.0 assert_array_equal(x, array([[[2.0]]])) def test_mask(self): x = array([1, 2, 3, 4]) m = array([0, 1], bool) assert_array_equal(x[m], array([2])) def test_mask2(self): x = array([[1, 2, 3, 4], [5, 6, 7, 8]]) m = array([0, 1], bool) m2 = array([[0, 1], [1, 0]], bool) m3 = array([[0, 1]], bool) assert_array_equal(x[m], array([[5, 6, 7, 8]])) assert_array_equal(x[m2], array([2, 5])) assert_array_equal(x[m3], array([2])) def test_assign_mask(self): x = array([1, 2, 3, 4]) m = array([0, 1], bool) x[m] = 5 assert_array_equal(x, array([1, 5, 3, 4])) def test_assign_mask2(self): xorig = array([[1, 2, 3, 4], [5, 6, 7, 8]]) m = array([0, 1], bool) m2 = array([[0, 1], [1, 0]], bool) m3 = array([[0, 1]], bool) x = xorig.copy() x[m] = 10 assert_array_equal(x, array([[1, 2, 3, 4], [10, 10, 10, 10]])) x = xorig.copy() x[m2] = 10 assert_array_equal(x, array([[1, 10, 3, 4], [10, 6, 7, 8]])) x = xorig.copy() x[m3] = 10 assert_array_equal(x, array([[1, 10, 3, 4], [5, 6, 7, 8]])) class TestStringCompare(TestCase): def test_string(self): g1 = array(["This", "is", "example"]) g2 = array(["This", "was", "example"]) assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) def test_mixed(self): g1 = array(["spam", "spa", "spammer", "and eggs"]) g2 = "spam" assert_array_equal(g1 == g2, [x == g2 for x in g1]) assert_array_equal(g1 != g2, [x != g2 for x in g1]) assert_array_equal(g1 < g2, [x < g2 for x in g1]) assert_array_equal(g1 > g2, [x > g2 for x in g1]) assert_array_equal(g1 <= g2, [x <= g2 for x in g1]) assert_array_equal(g1 >= g2, [x >= g2 for x in g1]) def test_unicode(self): g1 = array([sixu("This"), sixu("is"), sixu("example")]) g2 = array([sixu("This"), sixu("was"), sixu("example")]) assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]]) assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]]) class TestArgmax(TestCase): nan_arr = [ ([0, 1, 2, 3, np.nan], 4), ([0, 1, 2, np.nan, 3], 3), ([np.nan, 0, 1, 2, 3], 0), ([np.nan, 0, np.nan, 2, 3], 0), ([0, 1, 2, 3, complex(0, np.nan)], 4), ([0, 1, 2, 3, complex(np.nan, 0)], 4), ([0, 1, 2, complex(np.nan, 0), 3], 3), ([0, 1, 2, complex(0, np.nan), 3], 3), ([complex(0, np.nan), 0, 1, 2, 3], 0), ([complex(np.nan, np.nan), 0, 1, 2, 3], 0), ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0), ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0), ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0), ([complex(0, 0), complex(0, 2), complex(0, 1)], 1), ([complex(1, 0), complex(0, 2), complex(0, 1)], 0), ([complex(1, 0), complex(0, 2), complex(1, 1)], 2), ([np.datetime64('1923-04-14T12:43:12'), np.datetime64('1994-06-21T14:43:15'), np.datetime64('2001-10-15T04:10:32'), np.datetime64('1995-11-25T16:02:16'), np.datetime64('2005-01-04T03:14:12'), np.datetime64('2041-12-03T14:05:03')], 5), ([np.datetime64('1935-09-14T04:40:11'), np.datetime64('1949-10-12T12:32:11'), np.datetime64('2010-01-03T05:14:12'), np.datetime64('2015-11-20T12:20:59'), np.datetime64('1932-09-23T10:10:13'), np.datetime64('2014-10-10T03:50:30')], 3), ([np.datetime64('2059-03-14T12:43:12'), np.datetime64('1996-09-21T14:43:15'), np.datetime64('2001-10-15T04:10:32'), np.datetime64('2022-12-25T16:02:16'), np.datetime64('1963-10-04T03:14:12'), np.datetime64('2013-05-08T18:15:23')], 0), ([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35), timedelta(days=-1, seconds=23)], 0), ([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5), timedelta(days=5, seconds=14)], 1), ([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5), timedelta(days=10, seconds=43)], 2), # Can't reduce a "flexible type" #(['a', 'z', 'aa', 'zz'], 3), #(['zz', 'a', 'aa', 'a'], 0), #(['aa', 'z', 'zz', 'a'], 2), ] def test_all(self): a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) for i in range(a.ndim): amax = a.max(i) aargmax = a.argmax(i) axes = list(range(a.ndim)) axes.remove(i) assert_(all(amax == aargmax.choose(*a.transpose(i,*axes)))) def test_combinations(self): for arr, pos in self.nan_arr: assert_equal(np.argmax(arr), pos, err_msg="%r"%arr) assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r"%arr) class TestArgmin(TestCase): nan_arr = [ ([0, 1, 2, 3, np.nan], 4), ([0, 1, 2, np.nan, 3], 3), ([np.nan, 0, 1, 2, 3], 0), ([np.nan, 0, np.nan, 2, 3], 0), ([0, 1, 2, 3, complex(0, np.nan)], 4), ([0, 1, 2, 3, complex(np.nan, 0)], 4), ([0, 1, 2, complex(np.nan, 0), 3], 3), ([0, 1, 2, complex(0, np.nan), 3], 3), ([complex(0, np.nan), 0, 1, 2, 3], 0), ([complex(np.nan, np.nan), 0, 1, 2, 3], 0), ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0), ([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0), ([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0), ([complex(0, 0), complex(0, 2), complex(0, 1)], 0), ([complex(1, 0), complex(0, 2), complex(0, 1)], 2), ([complex(1, 0), complex(0, 2), complex(1, 1)], 1), ([np.datetime64('1923-04-14T12:43:12'), np.datetime64('1994-06-21T14:43:15'), np.datetime64('2001-10-15T04:10:32'), np.datetime64('1995-11-25T16:02:16'), np.datetime64('2005-01-04T03:14:12'), np.datetime64('2041-12-03T14:05:03')], 0), ([np.datetime64('1935-09-14T04:40:11'), np.datetime64('1949-10-12T12:32:11'), np.datetime64('2010-01-03T05:14:12'), np.datetime64('2014-11-20T12:20:59'), np.datetime64('2015-09-23T10:10:13'), np.datetime64('1932-10-10T03:50:30')], 5), ([np.datetime64('2059-03-14T12:43:12'), np.datetime64('1996-09-21T14:43:15'), np.datetime64('2001-10-15T04:10:32'), np.datetime64('2022-12-25T16:02:16'), np.datetime64('1963-10-04T03:14:12'), np.datetime64('2013-05-08T18:15:23')], 4), ([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35), timedelta(days=-1, seconds=23)], 2), ([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5), timedelta(days=5, seconds=14)], 0), ([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5), timedelta(days=10, seconds=43)], 1), # Can't reduce a "flexible type" #(['a', 'z', 'aa', 'zz'], 0), #(['zz', 'a', 'aa', 'a'], 1), #(['aa', 'z', 'zz', 'a'], 3), ] def test_all(self): a = np.random.normal(0, 1, (4, 5, 6, 7, 8)) for i in range(a.ndim): amin = a.min(i) aargmin = a.argmin(i) axes = list(range(a.ndim)) axes.remove(i) assert_(all(amin == aargmin.choose(*a.transpose(i,*axes)))) def test_combinations(self): for arr, pos in self.nan_arr: assert_equal(np.argmin(arr), pos, err_msg="%r"%arr) assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r"%arr) def test_minimum_signed_integers(self): a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8) assert_equal(np.argmin(a), 1) a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16) assert_equal(np.argmin(a), 1) a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32) assert_equal(np.argmin(a), 1) a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64) assert_equal(np.argmin(a), 1) class TestMinMax(TestCase): def test_scalar(self): assert_raises(ValueError, np.amax, 1, 1) assert_raises(ValueError, np.amin, 1, 1) assert_equal(np.amax(1, axis=0), 1) assert_equal(np.amin(1, axis=0), 1) assert_equal(np.amax(1, axis=None), 1) assert_equal(np.amin(1, axis=None), 1) def test_axis(self): assert_raises(ValueError, np.amax, [1, 2, 3], 1000) assert_equal(np.amax([[1, 2, 3]], axis=1), 3) class TestNewaxis(TestCase): def test_basic(self): sk = array([0, -0.1, 0.1]) res = 250*sk[:, newaxis] assert_almost_equal(res.ravel(), 250*sk) class TestClip(TestCase): def _check_range(self, x, cmin, cmax): assert_(np.all(x >= cmin)) assert_(np.all(x <= cmax)) def _clip_type(self,type_group,array_max, clip_min,clip_max,inplace=False, expected_min=None,expected_max=None): if expected_min is None: expected_min = clip_min if expected_max is None: expected_max = clip_max for T in np.sctypes[type_group]: if sys.byteorder == 'little': byte_orders = ['=', '>'] else: byte_orders = ['<', '='] for byteorder in byte_orders: dtype = np.dtype(T).newbyteorder(byteorder) x = (np.random.random(1000) * array_max).astype(dtype) if inplace: x.clip(clip_min, clip_max, x) else: x = x.clip(clip_min, clip_max) byteorder = '=' if x.dtype.byteorder == '|': byteorder = '|' assert_equal(x.dtype.byteorder, byteorder) self._check_range(x, expected_min, expected_max) return x def test_basic(self): for inplace in [False, True]: self._clip_type('float', 1024, -12.8, 100.2, inplace=inplace) self._clip_type('float', 1024, 0, 0, inplace=inplace) self._clip_type('int', 1024, -120, 100.5, inplace=inplace) self._clip_type('int', 1024, 0, 0, inplace=inplace) x = self._clip_type('uint', 1024, -120, 100, expected_min=0, inplace=inplace) x = self._clip_type('uint', 1024, 0, 0, inplace=inplace) def test_record_array(self): rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')]) y = rec['x'].clip(-0.3, 0.5) self._check_range(y, -0.3, 0.5) def test_max_or_min(self): val = np.array([0, 1, 2, 3, 4, 5, 6, 7]) x = val.clip(3) assert_(np.all(x >= 3)) x = val.clip(min=3) assert_(np.all(x >= 3)) x = val.clip(max=4) assert_(np.all(x <= 4)) class TestPutmask(object): def tst_basic(self, x, T, mask, val): np.putmask(x, mask, val) assert_(np.all(x[mask] == T(val))) assert_(x.dtype == T) def test_ip_types(self): unchecked_types = [str, unicode, np.void, object] x = np.random.random(1000)*100 mask = x < 40 for val in [-100, 0, 15]: for types in np.sctypes.values(): for T in types: if T not in unchecked_types: yield self.tst_basic, x.copy().astype(T), T, mask, val def test_mask_size(self): assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5) def tst_byteorder(self, dtype): x = np.array([1, 2, 3], dtype) np.putmask(x, [True, False, True], -1) assert_array_equal(x, [-1, 2, -1]) def test_ip_byteorder(self): for dtype in ('>i4', '<i4'): yield self.tst_byteorder, dtype def test_record_array(self): # Note mixed byteorder. rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')]) np.putmask(rec['x'], [True, False], 10) assert_array_equal(rec['x'], [10, 5]) assert_array_equal(rec['y'], [2, 4]) assert_array_equal(rec['z'], [3, 3]) np.putmask(rec['y'], [True, False], 11) assert_array_equal(rec['x'], [10, 5]) assert_array_equal(rec['y'], [11, 4]) assert_array_equal(rec['z'], [3, 3]) def test_masked_array(self): ## x = np.array([1,2,3]) ## z = np.ma.array(x,mask=[True,False,False]) ## np.putmask(z,[True,True,True],3) pass class TestTake(object): def tst_basic(self, x): ind = list(range(x.shape[0])) assert_array_equal(x.take(ind, axis=0), x) def test_ip_types(self): unchecked_types = [str, unicode, np.void, object] x = np.random.random(24)*100 x.shape = 2, 3, 4 for types in np.sctypes.values(): for T in types: if T not in unchecked_types: yield self.tst_basic, x.copy().astype(T) def test_raise(self): x = np.random.random(24)*100 x.shape = 2, 3, 4 assert_raises(IndexError, x.take, [0, 1, 2], axis=0) assert_raises(IndexError, x.take, [-3], axis=0) assert_array_equal(x.take([-1], axis=0)[0], x[1]) def test_clip(self): x = np.random.random(24)*100 x.shape = 2, 3, 4 assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0]) assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1]) def test_wrap(self): x = np.random.random(24)*100 x.shape = 2, 3, 4 assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1]) assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0]) assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1]) def tst_byteorder(self, dtype): x = np.array([1, 2, 3], dtype) assert_array_equal(x.take([0, 2, 1]), [1, 3, 2]) def test_ip_byteorder(self): for dtype in ('>i4', '<i4'): yield self.tst_byteorder, dtype def test_record_array(self): # Note mixed byteorder. rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')]) rec1 = rec.take([1]) assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0) class TestLexsort(TestCase): def test_basic(self): a = [1, 2, 1, 3, 1, 5] b = [0, 4, 5, 6, 2, 3] idx = np.lexsort((b, a)) expected_idx = np.array([0, 4, 2, 1, 3, 5]) assert_array_equal(idx, expected_idx) x = np.vstack((b, a)) idx = np.lexsort(x) assert_array_equal(idx, expected_idx) assert_array_equal(x[1][idx], np.sort(x[1])) class TestIO(object): """Test tofile, fromfile, tostring, and fromstring""" def setUp(self): shape = (2, 4, 3) rand = np.random.random self.x = rand(shape) + rand(shape).astype(np.complex)*1j self.x[0,:, 1] = [nan, inf, -inf, nan] self.dtype = self.x.dtype self.tempdir = tempfile.mkdtemp() self.filename = tempfile.mktemp(dir=self.tempdir) def tearDown(self): shutil.rmtree(self.tempdir) def test_bool_fromstring(self): v = np.array([True, False, True, False], dtype=np.bool_) y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_) assert_array_equal(v, y) def test_empty_files_binary(self): f = open(self.filename, 'w') f.close() y = fromfile(self.filename) assert_(y.size == 0, "Array not empty") def test_empty_files_text(self): f = open(self.filename, 'w') f.close() y = fromfile(self.filename, sep=" ") assert_(y.size == 0, "Array not empty") def test_roundtrip_file(self): f = open(self.filename, 'wb') self.x.tofile(f) f.close() # NB. doesn't work with flush+seek, due to use of C stdio f = open(self.filename, 'rb') y = np.fromfile(f, dtype=self.dtype) f.close() assert_array_equal(y, self.x.flat) def test_roundtrip_filename(self): self.x.tofile(self.filename) y = np.fromfile(self.filename, dtype=self.dtype) assert_array_equal(y, self.x.flat) def test_roundtrip_binary_str(self): s = self.x.tostring() y = np.fromstring(s, dtype=self.dtype) assert_array_equal(y, self.x.flat) s = self.x.tostring('F') y = np.fromstring(s, dtype=self.dtype) assert_array_equal(y, self.x.flatten('F')) def test_roundtrip_str(self): x = self.x.real.ravel() s = "@".join(map(str, x)) y = np.fromstring(s, sep="@") # NB. str imbues less precision nan_mask = ~np.isfinite(x) assert_array_equal(x[nan_mask], y[nan_mask]) assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5) def test_roundtrip_repr(self): x = self.x.real.ravel() s = "@".join(map(repr, x)) y = np.fromstring(s, sep="@") assert_array_equal(x, y) def test_file_position_after_fromfile(self): # gh-4118 sizes = [io.DEFAULT_BUFFER_SIZE//8, io.DEFAULT_BUFFER_SIZE, io.DEFAULT_BUFFER_SIZE*8] for size in sizes: f = open(self.filename, 'wb') f.seek(size-1) f.write(b'\0') f.close() for mode in ['rb', 'r+b']: err_msg = "%d %s" % (size, mode) f = open(self.filename, mode) f.read(2) np.fromfile(f, dtype=np.float64, count=1) pos = f.tell() f.close() assert_equal(pos, 10, err_msg=err_msg) def test_file_position_after_tofile(self): # gh-4118 sizes = [io.DEFAULT_BUFFER_SIZE//8, io.DEFAULT_BUFFER_SIZE, io.DEFAULT_BUFFER_SIZE*8] for size in sizes: err_msg = "%d" % (size,) f = open(self.filename, 'wb') f.seek(size-1) f.write(b'\0') f.seek(10) f.write(b'12') np.array([0], dtype=np.float64).tofile(f) pos = f.tell() f.close() assert_equal(pos, 10 + 2 + 8, err_msg=err_msg) f = open(self.filename, 'r+b') f.read(2) f.seek(0, 1) # seek between read&write required by ANSI C np.array([0], dtype=np.float64).tofile(f) pos = f.tell() f.close() assert_equal(pos, 10, err_msg=err_msg) def _check_from(self, s, value, **kw): y = np.fromstring(asbytes(s), **kw) assert_array_equal(y, value) f = open(self.filename, 'wb') f.write(asbytes(s)) f.close() y = np.fromfile(self.filename, **kw) assert_array_equal(y, value) def test_nan(self): self._check_from("nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)", [nan, nan, nan, nan, nan, nan, nan], sep=' ') def test_inf(self): self._check_from("inf +inf -inf infinity -Infinity iNfInItY -inF", [inf, inf, -inf, inf, -inf, inf, -inf], sep=' ') def test_numbers(self): self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133", [1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ') def test_binary(self): self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@', array([1, 2, 3, 4]), dtype='<f4') @dec.slow # takes > 1 minute on mechanical hard drive def test_big_binary(self): """Test workarounds for 32-bit limited fwrite, fseek, and ftell calls in windows. These normally would hang doing something like this. See http://projects.scipy.org/numpy/ticket/1660""" if sys.platform != 'win32': return try: # before workarounds, only up to 2**32-1 worked fourgbplus = 2**32 + 2**16 testbytes = np.arange(8, dtype=np.int8) n = len(testbytes) flike = tempfile.NamedTemporaryFile() f = flike.file np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f) flike.seek(0) a = np.fromfile(f, dtype=np.int8) flike.close() assert_(len(a) == fourgbplus) # check only start and end for speed: assert_((a[:n] == testbytes).all()) assert_((a[-n:] == testbytes).all()) except (MemoryError, ValueError): pass def test_string(self): self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',') def test_counted_string(self): self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',') self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',') self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',') def test_string_with_ws(self): self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ') def test_counted_string_with_ws(self): self._check_from('1 2 3 4 ', [1, 2, 3], count=3, dtype=int, sep=' ') def test_ascii(self): self._check_from('1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',') self._check_from('1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',') def test_malformed(self): self._check_from('1.234 1,234', [1.234, 1.], sep=' ') def test_long_sep(self): self._check_from('1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_') def test_dtype(self): v = np.array([1, 2, 3, 4], dtype=np.int_) self._check_from('1,2,3,4', v, sep=',', dtype=np.int_) def test_dtype_bool(self): # can't use _check_from because fromstring can't handle True/False v = np.array([True, False, True, False], dtype=np.bool_) s = '1,0,-2.3,0' f = open(self.filename, 'wb') f.write(asbytes(s)) f.close() y = np.fromfile(self.filename, sep=',', dtype=np.bool_) assert_(y.dtype == '?') assert_array_equal(y, v) def test_tofile_sep(self): x = np.array([1.51, 2, 3.51, 4], dtype=float) f = open(self.filename, 'w') x.tofile(f, sep=',') f.close() f = open(self.filename, 'r') s = f.read() f.close() assert_equal(s, '1.51,2.0,3.51,4.0') def test_tofile_format(self): x = np.array([1.51, 2, 3.51, 4], dtype=float) f = open(self.filename, 'w') x.tofile(f, sep=',', format='%.2f') f.close() f = open(self.filename, 'r') s = f.read() f.close() assert_equal(s, '1.51,2.00,3.51,4.00') def test_locale(self): in_foreign_locale(self.test_numbers)() in_foreign_locale(self.test_nan)() in_foreign_locale(self.test_inf)() in_foreign_locale(self.test_counted_string)() in_foreign_locale(self.test_ascii)() in_foreign_locale(self.test_malformed)() in_foreign_locale(self.test_tofile_sep)() in_foreign_locale(self.test_tofile_format)() class TestFromBuffer(object): def tst_basic(self, buffer, expected, kwargs): assert_array_equal(np.frombuffer(buffer,**kwargs), expected) def test_ip_basic(self): for byteorder in ['<', '>']: for dtype in [float, int, np.complex]: dt = np.dtype(dtype).newbyteorder(byteorder) x = (np.random.random((4, 7))*5).astype(dt) buf = x.tostring() yield self.tst_basic, buf, x.flat, {'dtype':dt} def test_empty(self): yield self.tst_basic, asbytes(''), np.array([]), {} class TestFlat(TestCase): def setUp(self): a0 = arange(20.0) a = a0.reshape(4, 5) a0.shape = (4, 5) a.flags.writeable = False self.a = a self.b = a[::2, ::2] self.a0 = a0 self.b0 = a0[::2, ::2] def test_contiguous(self): testpassed = False try: self.a.flat[12] = 100.0 except ValueError: testpassed = True assert testpassed assert self.a.flat[12] == 12.0 def test_discontiguous(self): testpassed = False try: self.b.flat[4] = 100.0 except ValueError: testpassed = True assert testpassed assert self.b.flat[4] == 12.0 def test___array__(self): c = self.a.flat.__array__() d = self.b.flat.__array__() e = self.a0.flat.__array__() f = self.b0.flat.__array__() assert c.flags.writeable is False assert d.flags.writeable is False assert e.flags.writeable is True assert f.flags.writeable is True assert c.flags.updateifcopy is False assert d.flags.updateifcopy is False assert e.flags.updateifcopy is False assert f.flags.updateifcopy is True assert f.base is self.b0 class TestResize(TestCase): def test_basic(self): x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) x.resize((5, 5)) assert_array_equal(x.flat[:9], np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat) assert_array_equal(x[9:].flat, 0) def test_check_reference(self): x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) y = x self.assertRaises(ValueError, x.resize, (5, 1)) def test_int_shape(self): x = np.eye(3) x.resize(3) assert_array_equal(x, np.eye(3)[0,:]) def test_none_shape(self): x = np.eye(3) x.resize(None) assert_array_equal(x, np.eye(3)) x.resize() assert_array_equal(x, np.eye(3)) def test_invalid_arguements(self): self.assertRaises(TypeError, np.eye(3).resize, 'hi') self.assertRaises(ValueError, np.eye(3).resize, -1) self.assertRaises(TypeError, np.eye(3).resize, order=1) self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi') def test_freeform_shape(self): x = np.eye(3) x.resize(3, 2, 1) assert_(x.shape == (3, 2, 1)) def test_zeros_appended(self): x = np.eye(3) x.resize(2, 3, 3) assert_array_equal(x[0], np.eye(3)) assert_array_equal(x[1], np.zeros((3, 3))) class TestRecord(TestCase): def test_field_rename(self): dt = np.dtype([('f', float), ('i', int)]) dt.names = ['p', 'q'] assert_equal(dt.names, ['p', 'q']) if sys.version_info[0] >= 3: def test_bytes_fields(self): # Bytes are not allowed in field names and not recognized in titles # on Py3 assert_raises(TypeError, np.dtype, [(asbytes('a'), int)]) assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)]) dt = np.dtype([((asbytes('a'), 'b'), int)]) assert_raises(ValueError, dt.__getitem__, asbytes('a')) x = np.array([(1,), (2,), (3,)], dtype=dt) assert_raises(ValueError, x.__getitem__, asbytes('a')) y = x[0] assert_raises(IndexError, y.__getitem__, asbytes('a')) else: def test_unicode_field_titles(self): # Unicode field titles are added to field dict on Py2 title = unicode('b') dt = np.dtype([((title, 'a'), int)]) dt[title] dt['a'] x = np.array([(1,), (2,), (3,)], dtype=dt) x[title] x['a'] y = x[0] y[title] y['a'] def test_unicode_field_names(self): # Unicode field names are not allowed on Py2 title = unicode('b') assert_raises(TypeError, np.dtype, [(title, int)]) assert_raises(TypeError, np.dtype, [(('a', title), int)]) def test_field_names(self): # Test unicode and 8-bit / byte strings can be used a = np.zeros((1,), dtype=[('f1', 'i4'), ('f2', 'i4'), ('f3', [('sf1', 'i4')])]) is_py3 = sys.version_info[0] >= 3 if is_py3: funcs = (str,) # byte string indexing fails gracefully assert_raises(ValueError, a.__setitem__, asbytes('f1'), 1) assert_raises(ValueError, a.__getitem__, asbytes('f1')) assert_raises(ValueError, a['f1'].__setitem__, asbytes('sf1'), 1) assert_raises(ValueError, a['f1'].__getitem__, asbytes('sf1')) else: funcs = (str, unicode) for func in funcs: b = a.copy() fn1 = func('f1') b[fn1] = 1 assert_equal(b[fn1], 1) fnn = func('not at all') assert_raises(ValueError, b.__setitem__, fnn, 1) assert_raises(ValueError, b.__getitem__, fnn) b[0][fn1] = 2 assert_equal(b[fn1], 2) # Subfield assert_raises(IndexError, b[0].__setitem__, fnn, 1) assert_raises(IndexError, b[0].__getitem__, fnn) # Subfield fn3 = func('f3') sfn1 = func('sf1') b[fn3][sfn1] = 1 assert_equal(b[fn3][sfn1], 1) assert_raises(ValueError, b[fn3].__setitem__, fnn, 1) assert_raises(ValueError, b[fn3].__getitem__, fnn) # multiple Subfields fn2 = func('f2') b[fn2] = 3 assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3)) assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2)) assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,))) # view of subfield view/copy assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(), (2, 3)) assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(), (3, 2)) view_dtype=[('f1', 'i4'), ('f3', [('', 'i4')])] assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(), (2, (1,))) # non-ascii unicode field indexing is well behaved if not is_py3: raise SkipTest('non ascii unicode field indexing skipped; ' 'raises segfault on python 2.x') else: assert_raises(ValueError, a.__setitem__, sixu('\u03e0'), 1) assert_raises(ValueError, a.__getitem__, sixu('\u03e0')) def test_field_names_deprecation(self): def collect_warning_types(f, *args, **kwargs): with warnings.catch_warnings(record=True) as log: warnings.simplefilter("always") f(*args, **kwargs) return [w.category for w in log] a = np.zeros((1,), dtype=[('f1', 'i4'), ('f2', 'i4'), ('f3', [('sf1', 'i4')])]) a['f1'][0] = 1 a['f2'][0] = 2 a['f3'][0] = (3,) b = np.zeros((1,), dtype=[('f1', 'i4'), ('f2', 'i4'), ('f3', [('sf1', 'i4')])]) b['f1'][0] = 1 b['f2'][0] = 2 b['f3'][0] = (3,) # All the different functions raise a warning, but not an error, and # 'a' is not modified: assert_equal(collect_warning_types(a[['f1', 'f2']].__setitem__, 0, (10, 20)), [FutureWarning]) assert_equal(a, b) # Views also warn subset = a[['f1', 'f2']] subset_view = subset.view() assert_equal(collect_warning_types(subset_view['f1'].__setitem__, 0, 10), [FutureWarning]) # But the write goes through: assert_equal(subset['f1'][0], 10) # Only one warning per multiple field indexing, though (even if there are # multiple views involved): assert_equal(collect_warning_types(subset['f1'].__setitem__, 0, 10), []) def test_record_hash(self): a = np.array([(1, 2), (1, 2)], dtype='i1,i2') a.flags.writeable = False b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')]) b.flags.writeable = False c = np.array([(1, 2), (3, 4)], dtype='i1,i2') c.flags.writeable = False self.assertTrue(hash(a[0]) == hash(a[1])) self.assertTrue(hash(a[0]) == hash(b[0])) self.assertTrue(hash(a[0]) != hash(b[1])) self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0]) def test_record_no_hash(self): a = np.array([(1, 2), (1, 2)], dtype='i1,i2') self.assertRaises(TypeError, hash, a[0]) class TestView(TestCase): def test_basic(self): x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype=[('r', np.int8), ('g', np.int8), ('b', np.int8), ('a', np.int8)]) # We must be specific about the endianness here: y = x.view(dtype='<i4') # ... and again without the keyword. z = x.view('<i4') assert_array_equal(y, z) assert_array_equal(y, [67305985, 134678021]) def _mean(a, **args): return a.mean(**args) def _var(a, **args): return a.var(**args) def _std(a, **args): return a.std(**args) class TestStats(TestCase): funcs = [_mean, _var, _std] def setUp(self): np.random.seed(range(3)) self.rmat = np.random.random((4, 5)) self.cmat = self.rmat + 1j * self.rmat self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat]) self.omat = self.omat.reshape(4, 5) def test_keepdims(self): mat = np.eye(3) for f in self.funcs: for axis in [0, 1]: res = f(mat, axis=axis, keepdims=True) assert_(res.ndim == mat.ndim) assert_(res.shape[axis] == 1) for axis in [None]: res = f(mat, axis=axis, keepdims=True) assert_(res.shape == (1, 1)) def test_out(self): mat = np.eye(3) for f in self.funcs: out = np.zeros(3) tgt = f(mat, axis=1) res = f(mat, axis=1, out=out) assert_almost_equal(res, out) assert_almost_equal(res, tgt) out = np.empty(2) assert_raises(ValueError, f, mat, axis=1, out=out) out = np.empty((2, 2)) assert_raises(ValueError, f, mat, axis=1, out=out) def test_dtype_from_input(self): icodes = np.typecodes['AllInteger'] fcodes = np.typecodes['AllFloat'] # object type for f in self.funcs: mat = np.array([[Decimal(1)]*3]*3) tgt = mat.dtype.type res = f(mat, axis=1).dtype.type assert_(res is tgt) # scalar case res = type(f(mat, axis=None)) assert_(res is Decimal) # integer types for f in self.funcs: for c in icodes: mat = np.eye(3, dtype=c) tgt = np.float64 res = f(mat, axis=1).dtype.type assert_(res is tgt) # scalar case res = f(mat, axis=None).dtype.type assert_(res is tgt) # mean for float types for f in [_mean]: for c in fcodes: mat = np.eye(3, dtype=c) tgt = mat.dtype.type res = f(mat, axis=1).dtype.type assert_(res is tgt) # scalar case res = f(mat, axis=None).dtype.type assert_(res is tgt) # var, std for float types for f in [_var, _std]: for c in fcodes: mat = np.eye(3, dtype=c) # deal with complex types tgt = mat.real.dtype.type res = f(mat, axis=1).dtype.type assert_(res is tgt) # scalar case res = f(mat, axis=None).dtype.type assert_(res is tgt) def test_dtype_from_dtype(self): icodes = np.typecodes['AllInteger'] fcodes = np.typecodes['AllFloat'] mat = np.eye(3) # stats for integer types # fixme: # this needs definition as there are lots places along the line # where type casting may take place. #for f in self.funcs: #for c in icodes: #tgt = np.dtype(c).type #res = f(mat, axis=1, dtype=c).dtype.type #assert_(res is tgt) ## scalar case #res = f(mat, axis=None, dtype=c).dtype.type #assert_(res is tgt) # stats for float types for f in self.funcs: for c in fcodes: tgt = np.dtype(c).type res = f(mat, axis=1, dtype=c).dtype.type assert_(res is tgt) # scalar case res = f(mat, axis=None, dtype=c).dtype.type assert_(res is tgt) def test_ddof(self): for f in [_var]: for ddof in range(3): dim = self.rmat.shape[1] tgt = f(self.rmat, axis=1) * dim res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof) for f in [_std]: for ddof in range(3): dim = self.rmat.shape[1] tgt = f(self.rmat, axis=1) * np.sqrt(dim) res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof) assert_almost_equal(res, tgt) assert_almost_equal(res, tgt) def test_ddof_too_big(self): dim = self.rmat.shape[1] for f in [_var, _std]: for ddof in range(dim, dim + 2): with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') res = f(self.rmat, axis=1, ddof=ddof) assert_(not (res < 0).any()) assert_(len(w) > 0) assert_(issubclass(w[0].category, RuntimeWarning)) def test_empty(self): A = np.zeros((0, 3)) for f in self.funcs: for axis in [0, None]: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_(np.isnan(f(A, axis=axis)).all()) assert_(len(w) > 0) assert_(issubclass(w[0].category, RuntimeWarning)) for axis in [1]: with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') assert_equal(f(A, axis=axis), np.zeros([])) def test_mean_values(self): for mat in [self.rmat, self.cmat, self.omat]: for axis in [0, 1]: tgt = mat.sum(axis=axis) res = _mean(mat, axis=axis) * mat.shape[axis] assert_almost_equal(res, tgt) for axis in [None]: tgt = mat.sum(axis=axis) res = _mean(mat, axis=axis) * np.prod(mat.shape) assert_almost_equal(res, tgt) def test_var_values(self): for mat in [self.rmat, self.cmat, self.omat]: for axis in [0, 1, None]: msqr = _mean(mat * mat.conj(), axis=axis) mean = _mean(mat, axis=axis) tgt = msqr - mean * mean.conjugate() res = _var(mat, axis=axis) assert_almost_equal(res, tgt) def test_std_values(self): for mat in [self.rmat, self.cmat, self.omat]: for axis in [0, 1, None]: tgt = np.sqrt(_var(mat, axis=axis)) res = _std(mat, axis=axis) assert_almost_equal(res, tgt) def test_subclass(self): class TestArray(np.ndarray): def __new__(cls, data, info): result = np.array(data) result = result.view(cls) result.info = info return result def __array_finalize__(self, obj): self.info = getattr(obj, "info", '') dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba') res = dat.mean(1) assert_(res.info == dat.info) res = dat.std(1) assert_(res.info == dat.info) res = dat.var(1) assert_(res.info == dat.info) class TestDot(TestCase): def test_dot_2args(self): from numpy.core.multiarray import dot a = np.array([[1, 2], [3, 4]], dtype=float) b = np.array([[1, 0], [1, 1]], dtype=float) c = np.array([[3, 2], [7, 4]], dtype=float) d = dot(a, b) assert_allclose(c, d) def test_dot_3args(self): from numpy.core.multiarray import dot np.random.seed(22) f = np.random.random_sample((1024, 16)) v = np.random.random_sample((16, 32)) r = np.empty((1024, 32)) for i in range(12): dot(f, v, r) assert_equal(sys.getrefcount(r), 2) r2 = dot(f, v, out=None) assert_array_equal(r2, r) assert_(r is dot(f, v, out=r)) v = v[:, 0].copy() # v.shape == (16,) r = r[:, 0].copy() # r.shape == (1024,) r2 = dot(f, v) assert_(r is dot(f, v, r)) assert_array_equal(r2, r) def test_dot_3args_errors(self): from numpy.core.multiarray import dot np.random.seed(22) f = np.random.random_sample((1024, 16)) v = np.random.random_sample((16, 32)) r = np.empty((1024, 31)) assert_raises(ValueError, dot, f, v, r) r = np.empty((1024,)) assert_raises(ValueError, dot, f, v, r) r = np.empty((32,)) assert_raises(ValueError, dot, f, v, r) r = np.empty((32, 1024)) assert_raises(ValueError, dot, f, v, r) assert_raises(ValueError, dot, f, v, r.T) r = np.empty((1024, 64)) assert_raises(ValueError, dot, f, v, r[:, ::2]) assert_raises(ValueError, dot, f, v, r[:, :32]) r = np.empty((1024, 32), dtype=np.float32) assert_raises(ValueError, dot, f, v, r) r = np.empty((1024, 32), dtype=int) assert_raises(ValueError, dot, f, v, r) def test_dot_scalar_and_matrix_of_objects(self): # Ticket #2469 arr = np.matrix([1, 2], dtype=object) desired = np.matrix([[3, 6]], dtype=object) assert_equal(np.dot(arr, 3), desired) assert_equal(np.dot(3, arr), desired) class TestInner(TestCase): def test_inner_scalar_and_matrix_of_objects(self): # Ticket #4482 arr = np.matrix([1, 2], dtype=object) desired = np.matrix([[3, 6]], dtype=object) assert_equal(np.inner(arr, 3), desired) assert_equal(np.inner(3, arr), desired) class TestSummarization(TestCase): def test_1d(self): A = np.arange(1001) strA = '[ 0 1 2 ..., 998 999 1000]' assert_(str(A) == strA) reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])' assert_(repr(A) == reprA) def test_2d(self): A = np.arange(1002).reshape(2, 501) strA = '[[ 0 1 2 ..., 498 499 500]\n' \ ' [ 501 502 503 ..., 999 1000 1001]]' assert_(str(A) == strA) reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \ ' [ 501, 502, 503, ..., 999, 1000, 1001]])' assert_(repr(A) == reprA) class TestChoose(TestCase): def setUp(self): self.x = 2*ones((3,), dtype=int) self.y = 3*ones((3,), dtype=int) self.x2 = 2*ones((2, 3), dtype=int) self.y2 = 3*ones((2, 3), dtype=int) self.ind = [0, 0, 1] def test_basic(self): A = np.choose(self.ind, (self.x, self.y)) assert_equal(A, [2, 2, 3]) def test_broadcast1(self): A = np.choose(self.ind, (self.x2, self.y2)) assert_equal(A, [[2, 2, 3], [2, 2, 3]]) def test_broadcast2(self): A = np.choose(self.ind, (self.x, self.y2)) assert_equal(A, [[2, 2, 3], [2, 2, 3]]) # TODO: test for multidimensional NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4} class TestNeighborhoodIter(TestCase): # Simple, 2d tests def _test_simple2d(self, dt): # Test zero and one padding for simple data type x = np.array([[0, 1], [2, 3]], dtype=dt) r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt), np.array([[0, 0, 0], [0, 1, 0]], dtype=dt), np.array([[0, 0, 1], [0, 2, 3]], dtype=dt), np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)] l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0], NEIGH_MODE['zero']) assert_array_equal(l, r) r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt), np.array([[1, 1, 1], [0, 1, 1]], dtype=dt), np.array([[1, 0, 1], [1, 2, 3]], dtype=dt), np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)] l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0], NEIGH_MODE['one']) assert_array_equal(l, r) r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt), np.array([[4, 4, 4], [0, 1, 4]], dtype=dt), np.array([[4, 0, 1], [4, 2, 3]], dtype=dt), np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)] l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant']) assert_array_equal(l, r) def test_simple2d(self): self._test_simple2d(np.float) def test_simple2d_object(self): self._test_simple2d(Decimal) def _test_mirror2d(self, dt): x = np.array([[0, 1], [2, 3]], dtype=dt) r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt), np.array([[0, 1, 1], [0, 1, 1]], dtype=dt), np.array([[0, 0, 1], [2, 2, 3]], dtype=dt), np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)] l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0], NEIGH_MODE['mirror']) assert_array_equal(l, r) def test_mirror2d(self): self._test_mirror2d(np.float) def test_mirror2d_object(self): self._test_mirror2d(Decimal) # Simple, 1d tests def _test_simple(self, dt): # Test padding with constant values x = np.linspace(1, 5, 5).astype(dt) r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]] l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero']) assert_array_equal(l, r) r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]] l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one']) assert_array_equal(l, r) r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]] l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant']) assert_array_equal(l, r) def test_simple_float(self): self._test_simple(np.float) def test_simple_object(self): self._test_simple(Decimal) # Test mirror modes def _test_mirror(self, dt): x = np.linspace(1, 5, 5).astype(dt) r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5], [2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt) l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror']) self.assertTrue([i.dtype == dt for i in l]) assert_array_equal(l, r) def test_mirror(self): self._test_mirror(np.float) def test_mirror_object(self): self._test_mirror(Decimal) # Circular mode def _test_circular(self, dt): x = np.linspace(1, 5, 5).astype(dt) r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5], [2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt) l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular']) assert_array_equal(l, r) def test_circular(self): self._test_circular(np.float) def test_circular_object(self): self._test_circular(Decimal) # Test stacking neighborhood iterators class TestStackedNeighborhoodIter(TestCase): # Simple, 1d test: stacking 2 constant-padded neigh iterators def test_simple_const(self): dt = np.float64 # Test zero and one padding for simple data type x = np.array([1, 2, 3], dtype=dt) r = [np.array([0], dtype=dt), np.array([0], dtype=dt), np.array([1], dtype=dt), np.array([2], dtype=dt), np.array([3], dtype=dt), np.array([0], dtype=dt), np.array([0], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'], [0, 0], NEIGH_MODE['zero']) assert_array_equal(l, r) r = [np.array([1, 0, 1], dtype=dt), np.array([0, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 0], dtype=dt), np.array([3, 0, 1], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], [-1, 1], NEIGH_MODE['one']) assert_array_equal(l, r) # 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and # mirror padding def test_simple_mirror(self): dt = np.float64 # Stacking zero on top of mirror x = np.array([1, 2, 3], dtype=dt) r = [np.array([0, 1, 1], dtype=dt), np.array([1, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 3], dtype=dt), np.array([3, 3, 0], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'], [-1, 1], NEIGH_MODE['zero']) assert_array_equal(l, r) # Stacking mirror on top of zero x = np.array([1, 2, 3], dtype=dt) r = [np.array([1, 0, 0], dtype=dt), np.array([0, 0, 1], dtype=dt), np.array([0, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 0], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['mirror']) assert_array_equal(l, r) # Stacking mirror on top of zero: 2nd x = np.array([1, 2, 3], dtype=dt) r = [np.array([0, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 0], dtype=dt), np.array([3, 0, 0], dtype=dt), np.array([0, 0, 3], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['mirror']) assert_array_equal(l, r) # Stacking mirror on top of zero: 3rd x = np.array([1, 2, 3], dtype=dt) r = [np.array([1, 0, 0, 1, 2], dtype=dt), np.array([0, 0, 1, 2, 3], dtype=dt), np.array([0, 1, 2, 3, 0], dtype=dt), np.array([1, 2, 3, 0, 0], dtype=dt), np.array([2, 3, 0, 0, 3], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['mirror']) assert_array_equal(l, r) # 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and # circular padding def test_simple_circular(self): dt = np.float64 # Stacking zero on top of mirror x = np.array([1, 2, 3], dtype=dt) r = [np.array([0, 3, 1], dtype=dt), np.array([3, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 1], dtype=dt), np.array([3, 1, 0], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'], [-1, 1], NEIGH_MODE['zero']) assert_array_equal(l, r) # Stacking mirror on top of zero x = np.array([1, 2, 3], dtype=dt) r = [np.array([3, 0, 0], dtype=dt), np.array([0, 0, 1], dtype=dt), np.array([0, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 0], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], [-2, 0], NEIGH_MODE['circular']) assert_array_equal(l, r) # Stacking mirror on top of zero: 2nd x = np.array([1, 2, 3], dtype=dt) r = [np.array([0, 1, 2], dtype=dt), np.array([1, 2, 3], dtype=dt), np.array([2, 3, 0], dtype=dt), np.array([3, 0, 0], dtype=dt), np.array([0, 0, 1], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], [0, 2], NEIGH_MODE['circular']) assert_array_equal(l, r) # Stacking mirror on top of zero: 3rd x = np.array([1, 2, 3], dtype=dt) r = [np.array([3, 0, 0, 1, 2], dtype=dt), np.array([0, 0, 1, 2, 3], dtype=dt), np.array([0, 1, 2, 3, 0], dtype=dt), np.array([1, 2, 3, 0, 0], dtype=dt), np.array([2, 3, 0, 0, 1], dtype=dt)] l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'], [-2, 2], NEIGH_MODE['circular']) assert_array_equal(l, r) # 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator # being strictly within the array def test_simple_strict_within(self): dt = np.float64 # Stacking zero on top of zero, first neighborhood strictly inside the # array x = np.array([1, 2, 3], dtype=dt) r = [np.array([1, 2, 3, 0], dtype=dt)] l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['zero']) assert_array_equal(l, r) # Stacking mirror on top of zero, first neighborhood strictly inside the # array x = np.array([1, 2, 3], dtype=dt) r = [np.array([1, 2, 3, 3], dtype=dt)] l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['mirror']) assert_array_equal(l, r) # Stacking mirror on top of zero, first neighborhood strictly inside the # array x = np.array([1, 2, 3], dtype=dt) r = [np.array([1, 2, 3, 1], dtype=dt)] l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['circular']) assert_array_equal(l, r) class TestWarnings(object): def test_complex_warning(self): x = np.array([1, 2]) y = np.array([1-2j, 1+2j]) with warnings.catch_warnings(): warnings.simplefilter("error", np.ComplexWarning) assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y) assert_equal(x, [1, 2]) class TestMinScalarType(object): def test_usigned_shortshort(self): dt = np.min_scalar_type(2**8-1) wanted = np.dtype('uint8') assert_equal(wanted, dt) def test_usigned_short(self): dt = np.min_scalar_type(2**16-1) wanted = np.dtype('uint16') assert_equal(wanted, dt) def test_usigned_int(self): dt = np.min_scalar_type(2**32-1) wanted = np.dtype('uint32') assert_equal(wanted, dt) def test_usigned_longlong(self): dt = np.min_scalar_type(2**63-1) wanted = np.dtype('uint64') assert_equal(wanted, dt) def test_object(self): dt = np.min_scalar_type(2**64) wanted = np.dtype('O') assert_equal(wanted, dt) if sys.version_info[:2] == (2, 6): from numpy.core.multiarray import memorysimpleview as memoryview from numpy.core._internal import _dtype_from_pep3118 class TestPEP3118Dtype(object): def _check(self, spec, wanted): dt = np.dtype(wanted) if isinstance(wanted, list) and isinstance(wanted[-1], tuple): if wanted[-1][0] == '': names = list(dt.names) names[-1] = '' dt.names = tuple(names) assert_equal(_dtype_from_pep3118(spec), dt, err_msg="spec %r != dtype %r" % (spec, wanted)) def test_native_padding(self): align = np.dtype('i').alignment for j in range(8): if j == 0: s = 'bi' else: s = 'b%dxi' % j self._check('@'+s, {'f0': ('i1', 0), 'f1': ('i', align*(1 + j//align))}) self._check('='+s, {'f0': ('i1', 0), 'f1': ('i', 1+j)}) def test_native_padding_2(self): # Native padding should work also for structs and sub-arrays self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)}) self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)}) def test_trailing_padding(self): # Trailing padding should be included, *and*, the item size # should match the alignment if in aligned mode align = np.dtype('i').alignment def VV(n): return 'V%d' % (align*(1 + (n-1)//align)) self._check('ix', [('f0', 'i'), ('', VV(1))]) self._check('ixx', [('f0', 'i'), ('', VV(2))]) self._check('ixxx', [('f0', 'i'), ('', VV(3))]) self._check('ixxxx', [('f0', 'i'), ('', VV(4))]) self._check('i7x', [('f0', 'i'), ('', VV(7))]) self._check('^ix', [('f0', 'i'), ('', 'V1')]) self._check('^ixx', [('f0', 'i'), ('', 'V2')]) self._check('^ixxx', [('f0', 'i'), ('', 'V3')]) self._check('^ixxxx', [('f0', 'i'), ('', 'V4')]) self._check('^i7x', [('f0', 'i'), ('', 'V7')]) def test_native_padding_3(self): dt = np.dtype( [('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')], align=True) self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt) dt = np.dtype( [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'), ('e', 'b'), ('sub', np.dtype('b,i', align=True))]) self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt) def test_padding_with_array_inside_struct(self): dt = np.dtype( [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')], align=True) self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt) def test_byteorder_inside_struct(self): # The byte order after @T{=i} should be '=', not '@'. # Check this by noting the absence of native alignment. self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0), 'f1': ('i', 5)}) def test_intra_padding(self): # Natively aligned sub-arrays may require some internal padding align = np.dtype('i').alignment def VV(n): return 'V%d' % (align*(1 + (n-1)//align)) self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,))) class TestNewBufferProtocol(object): def _check_roundtrip(self, obj): obj = np.asarray(obj) x = memoryview(obj) y = np.asarray(x) y2 = np.array(x) assert_(not y.flags.owndata) assert_(y2.flags.owndata) assert_equal(y.dtype, obj.dtype) assert_array_equal(obj, y) assert_equal(y2.dtype, obj.dtype) assert_array_equal(obj, y2) def test_roundtrip(self): x = np.array([1, 2, 3, 4, 5], dtype='i4') self._check_roundtrip(x) x = np.array([[1, 2], [3, 4]], dtype=np.float64) self._check_roundtrip(x) x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] self._check_roundtrip(x) dt = [('a', 'b'), ('b', 'h'), ('c', 'i'), ('d', 'l'), ('dx', 'q'), ('e', 'B'), ('f', 'H'), ('g', 'I'), ('h', 'L'), ('hx', 'Q'), ('i', np.single), ('j', np.double), ('k', np.longdouble), ('ix', np.csingle), ('jx', np.cdouble), ('kx', np.clongdouble), ('l', 'S4'), ('m', 'U4'), ('n', 'V3'), ('o', '?'), ('p', np.half), ] x = np.array( [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)], dtype=dt) self._check_roundtrip(x) x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))]) self._check_roundtrip(x) x = np.array([1, 2, 3], dtype='>i2') self._check_roundtrip(x) x = np.array([1, 2, 3], dtype='<i2') self._check_roundtrip(x) x = np.array([1, 2, 3], dtype='>i4') self._check_roundtrip(x) x = np.array([1, 2, 3], dtype='<i4') self._check_roundtrip(x) # check long long can be represented as non-native x = np.array([1, 2, 3], dtype='>q') self._check_roundtrip(x) # Native-only data types can be passed through the buffer interface # only in native byte order if sys.byteorder == 'little': x = np.array([1, 2, 3], dtype='>g') assert_raises(ValueError, self._check_roundtrip, x) x = np.array([1, 2, 3], dtype='<g') self._check_roundtrip(x) else: x = np.array([1, 2, 3], dtype='>g') self._check_roundtrip(x) x = np.array([1, 2, 3], dtype='<g') assert_raises(ValueError, self._check_roundtrip, x) def test_roundtrip_half(self): half_list = [ 1.0, -2.0, 6.5504 * 10**4, # (max half precision) 2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal) 2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal) 0.0, -0.0, float('+inf'), float('-inf'), 0.333251953125, # ~= 1/3 ] x = np.array(half_list, dtype='>e') self._check_roundtrip(x) x = np.array(half_list, dtype='<e') self._check_roundtrip(x) def test_roundtrip_single_types(self): for typ in np.typeDict.values(): dtype = np.dtype(typ) if dtype.char in 'Mm': # datetimes cannot be used in buffers continue if dtype.char == 'V': # skip void continue x = np.zeros(4, dtype=dtype) self._check_roundtrip(x) if dtype.char not in 'qQgG': dt = dtype.newbyteorder('<') x = np.zeros(4, dtype=dt) self._check_roundtrip(x) dt = dtype.newbyteorder('>') x = np.zeros(4, dtype=dt) self._check_roundtrip(x) def test_export_simple_1d(self): x = np.array([1, 2, 3, 4, 5], dtype='i') y = memoryview(x) assert_equal(y.format, 'i') assert_equal(y.shape, (5,)) assert_equal(y.ndim, 1) assert_equal(y.strides, (4,)) assert_equal(y.suboffsets, EMPTY) assert_equal(y.itemsize, 4) def test_export_simple_nd(self): x = np.array([[1, 2], [3, 4]], dtype=np.float64) y = memoryview(x) assert_equal(y.format, 'd') assert_equal(y.shape, (2, 2)) assert_equal(y.ndim, 2) assert_equal(y.strides, (16, 8)) assert_equal(y.suboffsets, EMPTY) assert_equal(y.itemsize, 8) def test_export_discontiguous(self): x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:] y = memoryview(x) assert_equal(y.format, 'f') assert_equal(y.shape, (3, 3)) assert_equal(y.ndim, 2) assert_equal(y.strides, (36, 4)) assert_equal(y.suboffsets, EMPTY) assert_equal(y.itemsize, 4) def test_export_record(self): dt = [('a', 'b'), ('b', 'h'), ('c', 'i'), ('d', 'l'), ('dx', 'q'), ('e', 'B'), ('f', 'H'), ('g', 'I'), ('h', 'L'), ('hx', 'Q'), ('i', np.single), ('j', np.double), ('k', np.longdouble), ('ix', np.csingle), ('jx', np.cdouble), ('kx', np.clongdouble), ('l', 'S4'), ('m', 'U4'), ('n', 'V3'), ('o', '?'), ('p', np.half), ] x = np.array( [(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)], dtype=dt) y = memoryview(x) assert_equal(y.shape, (1,)) assert_equal(y.ndim, 1) assert_equal(y.suboffsets, EMPTY) sz = sum([dtype(b).itemsize for a, b in dt]) if dtype('l').itemsize == 4: assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') else: assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}') # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides if not (np.ones(1).strides[0] == np.iinfo(np.intp).max): assert_equal(y.strides, (sz,)) assert_equal(y.itemsize, sz) def test_export_subarray(self): x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))]) y = memoryview(x) assert_equal(y.format, 'T{(2,2)i:a:}') assert_equal(y.shape, EMPTY) assert_equal(y.ndim, 0) assert_equal(y.strides, EMPTY) assert_equal(y.suboffsets, EMPTY) assert_equal(y.itemsize, 16) def test_export_endian(self): x = np.array([1, 2, 3], dtype='>i') y = memoryview(x) if sys.byteorder == 'little': assert_equal(y.format, '>i') else: assert_equal(y.format, 'i') x = np.array([1, 2, 3], dtype='<i') y = memoryview(x) if sys.byteorder == 'little': assert_equal(y.format, 'i') else: assert_equal(y.format, '<i') def test_export_flags(self): # Check SIMPLE flag, see also gh-3613 (exception should be BufferError) assert_raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',)) def test_padding(self): for j in range(8): x = np.array([(1,), (2,)], dtype={'f0': (int, j)}) self._check_roundtrip(x) def test_reference_leak(self): count_1 = sys.getrefcount(np.core._internal) a = np.zeros(4) b = memoryview(a) c = np.asarray(b) count_2 = sys.getrefcount(np.core._internal) assert_equal(count_1, count_2) def test_padded_struct_array(self): dt1 = np.dtype( [('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')], align=True) x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1) self._check_roundtrip(x1) dt2 = np.dtype( [('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')], align=True) x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2) self._check_roundtrip(x2) dt3 = np.dtype( [('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'), ('e', 'b'), ('sub', np.dtype('b,i', align=True))]) x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3) self._check_roundtrip(x3) class TestArrayAttributeDeletion(object): def test_multiarray_writable_attributes_deletion(self): """ticket #2046, should not seqfault, raise AttributeError""" a = np.ones(2) attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat'] for s in attr: assert_raises(AttributeError, delattr, a, s) def test_multiarray_not_writable_attributes_deletion(self): a = np.ones(2) attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base", "ctypes", "T", "__array_interface__", "__array_struct__", "__array_priority__", "__array_finalize__"] for s in attr: assert_raises(AttributeError, delattr, a, s) def test_multiarray_flags_writable_attribute_deletion(self): a = np.ones(2).flags attr = ['updateifcopy', 'aligned', 'writeable'] for s in attr: assert_raises(AttributeError, delattr, a, s) def test_multiarray_flags_not_writable_attribute_deletion(self): a = np.ones(2).flags attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran", "owndata", "fnc", "forc", "behaved", "carray", "farray", "num"] for s in attr: assert_raises(AttributeError, delattr, a, s) def test_array_interface(): # Test scalar coercion within the array interface class Foo(object): def __init__(self, value): self.value = value self.iface = {'typestr' : '=f8'} def __float__(self): return float(self.value) @property def __array_interface__(self): return self.iface f = Foo(0.5) assert_equal(np.array(f), 0.5) assert_equal(np.array([f]), [0.5]) assert_equal(np.array([f, f]), [0.5, 0.5]) assert_equal(np.array(f).dtype, np.dtype('=f8')) # Test various shape definitions f.iface['shape'] = () assert_equal(np.array(f), 0.5) f.iface['shape'] = None assert_raises(TypeError, np.array, f) f.iface['shape'] = (1, 1) assert_equal(np.array(f), [[0.5]]) f.iface['shape'] = (2,) assert_raises(ValueError, np.array, f) # test scalar with no shape class ArrayLike(object): array = np.array(1) __array_interface__ = array.__array_interface__ assert_equal(np.array(ArrayLike()), 1) def test_flat_element_deletion(): it = np.ones(3).flat try: del it[1] del it[1:2] except TypeError: pass except: raise AssertionError def test_scalar_element_deletion(): a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')]) assert_raises(ValueError, a[0].__delitem__, 'x') class TestMemEventHook(TestCase): def test_mem_seteventhook(self): # The actual tests are within the C code in # multiarray/multiarray_tests.c.src test_pydatamem_seteventhook_start() # force an allocation and free of a numpy array a = np.zeros(10) del a test_pydatamem_seteventhook_end() class TestMapIter(TestCase): def test_mapiter(self): # The actual tests are within the C code in # multiarray/multiarray_tests.c.src a = arange(12).reshape((3, 4)).astype(float) index = ([1, 1, 2, 0], [0, 0, 2, 3]) vals = [50, 50, 30, 16] test_inplace_increment(a, index, vals) assert_equal(a, [[ 0., 1., 2., 19.,], [ 104., 5., 6., 7.,], [ 8., 9., 40., 11.,]]) b = arange(6).astype(float) index = (array([1, 2, 0]),) vals = [50, 4, 100.1] test_inplace_increment(b, index, vals) assert_equal(b, [ 100.1, 51., 6., 3., 4., 5. ]) class PriorityNdarray(): __array_priority__ = 1000 def __init__(self, array): self.array = array def __lt__(self, array): if isinstance(array, PriorityNdarray): array = array.array return PriorityNdarray(self.array < array) def __gt__(self, array): if isinstance(array, PriorityNdarray): array = array.array return PriorityNdarray(self.array > array) def __le__(self, array): if isinstance(array, PriorityNdarray): array = array.array return PriorityNdarray(self.array <= array) def __ge__(self, array): if isinstance(array, PriorityNdarray): array = array.array return PriorityNdarray(self.array >= array) def __eq__(self, array): if isinstance(array, PriorityNdarray): array = array.array return PriorityNdarray(self.array == array) def __ne__(self, array): if isinstance(array, PriorityNdarray): array = array.array return PriorityNdarray(self.array != array) class TestArrayPriority(TestCase): def test_lt(self): l = np.asarray([0., -1., 1.], dtype=dtype) r = np.asarray([0., 1., -1.], dtype=dtype) lp = PriorityNdarray(l) rp = PriorityNdarray(r) res1 = l < r res2 = l < rp res3 = lp < r res4 = lp < rp assert_array_equal(res1, res2.array) assert_array_equal(res1, res3.array) assert_array_equal(res1, res4.array) assert_(isinstance(res1, np.ndarray)) assert_(isinstance(res2, PriorityNdarray)) assert_(isinstance(res3, PriorityNdarray)) assert_(isinstance(res4, PriorityNdarray)) def test_gt(self): l = np.asarray([0., -1., 1.], dtype=dtype) r = np.asarray([0., 1., -1.], dtype=dtype) lp = PriorityNdarray(l) rp = PriorityNdarray(r) res1 = l > r res2 = l > rp res3 = lp > r res4 = lp > rp assert_array_equal(res1, res2.array) assert_array_equal(res1, res3.array) assert_array_equal(res1, res4.array) assert_(isinstance(res1, np.ndarray)) assert_(isinstance(res2, PriorityNdarray)) assert_(isinstance(res3, PriorityNdarray)) assert_(isinstance(res4, PriorityNdarray)) def test_le(self): l = np.asarray([0., -1., 1.], dtype=dtype) r = np.asarray([0., 1., -1.], dtype=dtype) lp = PriorityNdarray(l) rp = PriorityNdarray(r) res1 = l <= r res2 = l <= rp res3 = lp <= r res4 = lp <= rp assert_array_equal(res1, res2.array) assert_array_equal(res1, res3.array) assert_array_equal(res1, res4.array) assert_(isinstance(res1, np.ndarray)) assert_(isinstance(res2, PriorityNdarray)) assert_(isinstance(res3, PriorityNdarray)) assert_(isinstance(res4, PriorityNdarray)) def test_ge(self): l = np.asarray([0., -1., 1.], dtype=dtype) r = np.asarray([0., 1., -1.], dtype=dtype) lp = PriorityNdarray(l) rp = PriorityNdarray(r) res1 = l >= r res2 = l >= rp res3 = lp >= r res4 = lp >= rp assert_array_equal(res1, res2.array) assert_array_equal(res1, res3.array) assert_array_equal(res1, res4.array) assert_(isinstance(res1, np.ndarray)) assert_(isinstance(res2, PriorityNdarray)) assert_(isinstance(res3, PriorityNdarray)) assert_(isinstance(res4, PriorityNdarray)) def test_eq(self): l = np.asarray([0., -1., 1.], dtype=dtype) r = np.asarray([0., 1., -1.], dtype=dtype) lp = PriorityNdarray(l) rp = PriorityNdarray(r) res1 = l == r res2 = l == rp res3 = lp == r res4 = lp == rp assert_array_equal(res1, res2.array) assert_array_equal(res1, res3.array) assert_array_equal(res1, res4.array) assert_(isinstance(res1, np.ndarray)) assert_(isinstance(res2, PriorityNdarray)) assert_(isinstance(res3, PriorityNdarray)) assert_(isinstance(res4, PriorityNdarray)) def test_ne(self): l = np.asarray([0., -1., 1.], dtype=dtype) r = np.asarray([0., 1., -1.], dtype=dtype) lp = PriorityNdarray(l) rp = PriorityNdarray(r) res1 = l != r res2 = l != rp res3 = lp != r res4 = lp != rp assert_array_equal(res1, res2.array) assert_array_equal(res1, res3.array) assert_array_equal(res1, res4.array) assert_(isinstance(res1, np.ndarray)) assert_(isinstance(res2, PriorityNdarray)) assert_(isinstance(res3, PriorityNdarray)) assert_(isinstance(res4, PriorityNdarray)) if __name__ == "__main__": run_module_suite()
gpl-3.0
7,132,038,094,073,860,000
37.143908
587
0.502706
false
3.157034
true
false
false
eleonrk/SickRage
sickbeard/traktChecker.py
1
33797
# coding=utf-8 # Author: Frank Fenton # URL: https://sickchill.github.io # # This file is part of SickChill. # # SickChill is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # SickChill is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with SickChill. If not, see <http://www.gnu.org/licenses/>. from __future__ import print_function, unicode_literals import datetime import os import traceback from libtrakt import TraktAPI from libtrakt.exceptions import traktException import sickbeard from sickbeard import db, helpers, logger, search_queue from sickbeard.common import Quality, SKIPPED, UNKNOWN, WANTED from sickchill.helper.common import episode_num, sanitize_filename from sickchill.helper.encoding import ek from sickchill.helper.exceptions import ex from sickchill.show.Show import Show def setEpisodeToWanted(show, s, e): """ Sets an episode to wanted, only if it is currently skipped """ epObj = show.getEpisode(s, e) if epObj: with epObj.lock: if epObj.status != SKIPPED or epObj.airdate == datetime.date.fromordinal(1): return logger.log("Setting episode {show} {ep} to wanted".format (show=show.name, ep=episode_num(s, e))) # figure out what segment the episode is in and remember it so we can backlog it epObj.status = WANTED epObj.saveToDB() cur_backlog_queue_item = search_queue.BacklogQueueItem(show, [epObj]) sickbeard.searchQueueScheduler.action.add_item(cur_backlog_queue_item) logger.log("Starting backlog search for {show} {ep} because some episodes were set to wanted".format (show=show.name, ep=episode_num(s, e))) class TraktChecker(object): def __init__(self): self.trakt_api = TraktAPI(sickbeard.SSL_VERIFY, sickbeard.TRAKT_TIMEOUT) self.todoBacklog = [] self.todoWanted = [] self.ShowWatchlist = {} self.EpisodeWatchlist = {} self.Collectionlist = {} self.amActive = False def run(self, force=False): self.amActive = True # add shows from trakt.tv watchlist if sickbeard.TRAKT_SYNC_WATCHLIST: self.todoWanted = [] # its about to all get re-added if len(sickbeard.ROOT_DIRS.split('|')) < 2: logger.log("No default root directory", logger.WARNING) return try: self.syncWatchlist() except Exception: logger.log(traceback.format_exc(), logger.DEBUG) try: # sync trakt.tv library with sickchill library self.syncLibrary() except Exception: logger.log(traceback.format_exc(), logger.DEBUG) self.amActive = False def findShow(self, indexer, indexerid): traktShow = None try: library = self.trakt_api.traktRequest("sync/collection/shows") or [] if not library: logger.log("No shows found in your library, aborting library update", logger.DEBUG) return traktShow = [x for x in library if int(indexerid) in [int(x['show']['ids']['tvdb'] or 0), int(x['show']['ids']['tvrage'] or 0)]] except traktException as e: logger.log("Could not connect to Trakt service. Aborting library check. Error: {0}".format(repr(e)), logger.WARNING) return traktShow def removeShowFromTraktLibrary(self, show_obj): if self.findShow(show_obj.indexer, show_obj.indexerid): trakt_id = sickbeard.indexerApi(show_obj.indexer).config['trakt_id'] # URL parameters data = { 'shows': [ { 'title': show_obj.name, 'year': show_obj.startyear, 'ids': {} } ] } if trakt_id == 'tvdb_id': data['shows'][0]['ids']['tvdb'] = show_obj.indexerid else: data['shows'][0]['ids']['tvrage'] = show_obj.indexerid logger.log("Removing {0} from trakt.tv library".format(show_obj.name), logger.DEBUG) try: self.trakt_api.traktRequest("sync/collection/remove", data, method='POST') except traktException as e: logger.log("Could not connect to Trakt service. Aborting removing show {0} from Trakt library. Error: {1}".format(show_obj.name, repr(e)), logger.WARNING) def addShowToTraktLibrary(self, show_obj): """ Sends a request to trakt indicating that the given show and all its episodes is part of our library. show_obj: The TVShow object to add to trakt """ data = {} if not self.findShow(show_obj.indexer, show_obj.indexerid): trakt_id = sickbeard.indexerApi(show_obj.indexer).config['trakt_id'] # URL parameters data = { 'shows': [ { 'title': show_obj.name, 'year': show_obj.startyear, 'ids': {} } ] } if trakt_id == 'tvdb_id': data['shows'][0]['ids']['tvdb'] = show_obj.indexerid else: data['shows'][0]['ids']['tvrage'] = show_obj.indexerid if data: logger.log("Adding {0} to trakt.tv library".format(show_obj.name), logger.DEBUG) try: self.trakt_api.traktRequest("sync/collection", data, method='POST') except traktException as e: logger.log("Could not connect to Trakt service. Aborting adding show {0} to Trakt library. Error: {1}".format(show_obj.name, repr(e)), logger.WARNING) return def syncLibrary(self): if sickbeard.TRAKT_SYNC and sickbeard.USE_TRAKT: logger.log("Sync SickChill with Trakt Collection", logger.DEBUG) if self._getShowCollection(): self.addEpisodeToTraktCollection() if sickbeard.TRAKT_SYNC_REMOVE: self.removeEpisodeFromTraktCollection() def removeEpisodeFromTraktCollection(self): if sickbeard.TRAKT_SYNC_REMOVE and sickbeard.TRAKT_SYNC and sickbeard.USE_TRAKT: logger.log("COLLECTION::REMOVE::START - Look for Episodes to Remove From Trakt Collection", logger.DEBUG) main_db_con = db.DBConnection() sql_selection = 'select tv_shows.indexer, tv_shows.startyear, showid, show_name, season, episode, tv_episodes.status, tv_episodes.location from tv_episodes,tv_shows where tv_shows.indexer_id = tv_episodes.showid' episodes = main_db_con.select(sql_selection) if episodes is not None: trakt_data = [] for cur_episode in episodes: trakt_id = sickbeard.indexerApi(cur_episode[b"indexer"]).config[b'trakt_id'] if self._checkInList(trakt_id, str(cur_episode[b"showid"]), str(cur_episode[b"season"]), str(cur_episode[b"episode"]), List='Collection'): if cur_episode[b"location"] == '': logger.log("Removing Episode {show} {ep} from collection".format( show=cur_episode[b"show_name"], ep=episode_num(cur_episode[b"season"], cur_episode[b"episode"])), logger.DEBUG ) trakt_data.append((cur_episode[b"showid"], cur_episode[b"indexer"], cur_episode[b"show_name"], cur_episode[b"startyear"], cur_episode[b"season"], cur_episode[b"episode"])) if trakt_data: try: data = self.trakt_bulk_data_generate(trakt_data) self.trakt_api.traktRequest("sync/collection/remove", data, method='POST') self._getShowCollection() except traktException as e: logger.log("Could not connect to Trakt service. Error: {0}".format(ex(e)), logger.WARNING) logger.log("COLLECTION::REMOVE::FINISH - Look for Episodes to Remove From Trakt Collection", logger.DEBUG) def addEpisodeToTraktCollection(self): if sickbeard.TRAKT_SYNC and sickbeard.USE_TRAKT: logger.log("COLLECTION::ADD::START - Look for Episodes to Add to Trakt Collection", logger.DEBUG) main_db_con = db.DBConnection() sql_selection = 'select tv_shows.indexer, tv_shows.startyear, showid, show_name, season, episode from tv_episodes,tv_shows where tv_shows.indexer_id = tv_episodes.showid and tv_episodes.status in (' + ','.join([str(x) for x in Quality.DOWNLOADED + Quality.ARCHIVED]) + ')' episodes = main_db_con.select(sql_selection) if episodes is not None: trakt_data = [] for cur_episode in episodes: trakt_id = sickbeard.indexerApi(cur_episode[b"indexer"]).config['trakt_id'] if not self._checkInList(trakt_id, str(cur_episode[b"showid"]), str(cur_episode[b"season"]), str(cur_episode[b"episode"]), List='Collection'): logger.log("Adding Episode {show} {ep} to collection".format (show=cur_episode[b"show_name"], ep=episode_num(cur_episode[b"season"], cur_episode[b"episode"])), logger.DEBUG) trakt_data.append((cur_episode[b"showid"], cur_episode[b"indexer"], cur_episode[b"show_name"], cur_episode[b"startyear"], cur_episode[b"season"], cur_episode[b"episode"])) if trakt_data: try: data = self.trakt_bulk_data_generate(trakt_data) self.trakt_api.traktRequest("sync/collection", data, method='POST') self._getShowCollection() except traktException as e: logger.log("Could not connect to Trakt service. Error: {0}".format(ex(e)), logger.WARNING) logger.log("COLLECTION::ADD::FINISH - Look for Episodes to Add to Trakt Collection", logger.DEBUG) def syncWatchlist(self): if sickbeard.TRAKT_SYNC_WATCHLIST and sickbeard.USE_TRAKT: logger.log("Sync SickChill with Trakt Watchlist", logger.DEBUG) self.removeShowFromSickChill() if self._getShowWatchlist(): # self.addShowToTraktWatchList() self.updateShows() ''' if self._getEpisodeWatchlist(): self.removeEpisodeFromTraktWatchList() self.addEpisodeToTraktWatchList() self.updateEpisodes() ''' def removeEpisodeFromTraktWatchList(self): if sickbeard.TRAKT_SYNC_WATCHLIST and sickbeard.USE_TRAKT: logger.log("WATCHLIST::REMOVE::START - Look for Episodes to Remove from Trakt Watchlist", logger.DEBUG) main_db_con = db.DBConnection() sql_selection = 'select tv_shows.indexer, tv_shows.startyear, showid, show_name, season, episode, tv_episodes.status from tv_episodes,tv_shows where tv_shows.indexer_id = tv_episodes.showid' episodes = main_db_con.select(sql_selection) if episodes is not None: trakt_data = [] for cur_episode in episodes: trakt_id = sickbeard.indexerApi(cur_episode[b"indexer"]).config['trakt_id'] if self._checkInList(trakt_id, str(cur_episode[b"showid"]), str(cur_episode[b"season"]), str(cur_episode[b"episode"])): if cur_episode[b"status"] not in Quality.SNATCHED + Quality.SNATCHED_PROPER + [UNKNOWN] + [WANTED]: logger.log("Removing Episode {show} {ep} from watchlist".format (show=cur_episode[b"show_name"], ep=episode_num(cur_episode[b"season"], cur_episode[b"episode"])), logger.DEBUG) trakt_data.append((cur_episode[b"showid"], cur_episode[b"indexer"], cur_episode[b"show_name"], cur_episode[b"startyear"], cur_episode[b"season"], cur_episode[b"episode"])) if trakt_data: try: data = self.trakt_bulk_data_generate(trakt_data) self.trakt_api.traktRequest("sync/watchlist/remove", data, method='POST') self._getEpisodeWatchlist() except traktException as e: logger.log("Could not connect to Trakt service. Error: {0}".format(ex(e)), logger.WARNING) logger.log("WATCHLIST::REMOVE::FINISH - Look for Episodes to Remove from Trakt Watchlist", logger.DEBUG) def addEpisodeToTraktWatchList(self): if sickbeard.TRAKT_SYNC_WATCHLIST and sickbeard.USE_TRAKT: logger.log("WATCHLIST::ADD::START - Look for Episodes to Add to Trakt Watchlist", logger.DEBUG) main_db_con = db.DBConnection() sql_selection = 'select tv_shows.indexer, tv_shows.startyear, showid, show_name, season, episode from tv_episodes,tv_shows where tv_shows.indexer_id = tv_episodes.showid and tv_episodes.status in (' + ','.join([str(x) for x in Quality.SNATCHED + Quality.SNATCHED_PROPER + [WANTED]]) + ')' episodes = main_db_con.select(sql_selection) if episodes is not None: trakt_data = [] for cur_episode in episodes: trakt_id = sickbeard.indexerApi(cur_episode[b"indexer"]).config['trakt_id'] if not self._checkInList(trakt_id, str(cur_episode[b"showid"]), str(cur_episode[b"season"]), str(cur_episode[b"episode"])): logger.log("Adding Episode {show} {ep} to watchlist".format (show=cur_episode[b"show_name"], ep=episode_num(cur_episode[b"season"], cur_episode[b"episode"])), logger.DEBUG) trakt_data.append((cur_episode[b"showid"], cur_episode[b"indexer"], cur_episode[b"show_name"], cur_episode[b"startyear"], cur_episode[b"season"], cur_episode[b"episode"])) if trakt_data: try: data = self.trakt_bulk_data_generate(trakt_data) self.trakt_api.traktRequest("sync/watchlist", data, method='POST') self._getEpisodeWatchlist() except traktException as e: logger.log("Could not connect to Trakt service. Error {0}".format(ex(e)), logger.WARNING) logger.log("WATCHLIST::ADD::FINISH - Look for Episodes to Add to Trakt Watchlist", logger.DEBUG) def addShowToTraktWatchList(self): if sickbeard.TRAKT_SYNC_WATCHLIST and sickbeard.USE_TRAKT: logger.log("SHOW_WATCHLIST::ADD::START - Look for Shows to Add to Trakt Watchlist", logger.DEBUG) if sickbeard.showList is not None: trakt_data = [] for show in sickbeard.showList: trakt_id = sickbeard.indexerApi(show.indexer).config['trakt_id'] if not self._checkInList(trakt_id, str(show.indexerid), '0', '0', List='Show'): logger.log("Adding Show: Indexer {0} {1} - {2} to Watchlist".format(trakt_id, str(show.indexerid), show.name), logger.DEBUG) show_el = {'title': show.name, 'year': show.startyear, 'ids': {}} if trakt_id == 'tvdb_id': show_el['ids']['tvdb'] = show.indexerid else: show_el['ids']['tvrage'] = show.indexerid trakt_data.append(show_el) if trakt_data: try: data = {'shows': trakt_data} self.trakt_api.traktRequest("sync/watchlist", data, method='POST') self._getShowWatchlist() except traktException as e: logger.log("Could not connect to Trakt service. Error: {0}".format(ex(e)), logger.WARNING) logger.log("SHOW_WATCHLIST::ADD::FINISH - Look for Shows to Add to Trakt Watchlist", logger.DEBUG) def removeShowFromSickChill(self): if sickbeard.TRAKT_SYNC_WATCHLIST and sickbeard.USE_TRAKT and sickbeard.TRAKT_REMOVE_SHOW_FROM_SICKCHILL: logger.log("SHOW_SICKCHILL::REMOVE::START - Look for Shows to remove from SickChill", logger.DEBUG) if sickbeard.showList: for show in sickbeard.showList: if show.status in ("Ended", "Canceled"): if not show.imdbid: logger.log('Could not check trakt progress for {0} because the imdb id is missing from tvdb data, skipping'.format (show.name), logger.WARNING) continue try: progress = self.trakt_api.traktRequest("shows/" + show.imdbid + "/progress/watched") or [] except traktException as e: logger.log("Could not connect to Trakt service. Aborting removing show {0} from SickChill. Error: {1}".format(show.name, repr(e)), logger.WARNING) continue if not progress: continue if progress.get('aired', True) == progress.get('completed', False): sickbeard.showQueueScheduler.action.remove_show(show, full=True) logger.log("Show: {0} has been removed from SickChill".format(show.name), logger.DEBUG) logger.log("SHOW_SICKCHILL::REMOVE::FINISH - Trakt Show Watchlist", logger.DEBUG) def updateShows(self): logger.log("SHOW_WATCHLIST::CHECK::START - Trakt Show Watchlist", logger.DEBUG) if not self.ShowWatchlist: logger.log("No shows found in your watchlist, aborting watchlist update", logger.DEBUG) return indexer = int(sickbeard.TRAKT_DEFAULT_INDEXER) trakt_id = sickbeard.indexerApi(indexer).config['trakt_id'] for show_el in self.ShowWatchlist[trakt_id]: indexer_id = int(str(show_el)) show = self.ShowWatchlist[trakt_id][show_el] # logger.log(u"Checking Show: %s %s %s" % (trakt_id, indexer_id, show['title']),logger.DEBUG) if int(sickbeard.TRAKT_METHOD_ADD) != 2: self.addDefaultShow(indexer, indexer_id, show['title'], SKIPPED) else: self.addDefaultShow(indexer, indexer_id, show['title'], WANTED) if int(sickbeard.TRAKT_METHOD_ADD) == 1: newShow = Show.find(sickbeard.showList, indexer_id) if newShow is not None: setEpisodeToWanted(newShow, 1, 1) else: self.todoWanted.append((indexer_id, 1, 1)) logger.log("SHOW_WATCHLIST::CHECK::FINISH - Trakt Show Watchlist", logger.DEBUG) def updateEpisodes(self): """ Sets episodes to wanted that are in trakt watchlist """ logger.log("SHOW_WATCHLIST::CHECK::START - Trakt Episode Watchlist", logger.DEBUG) if not self.EpisodeWatchlist: logger.log("No episode found in your watchlist, aborting episode update", logger.DEBUG) return managed_show = [] indexer = int(sickbeard.TRAKT_DEFAULT_INDEXER) trakt_id = sickbeard.indexerApi(indexer).config['trakt_id'] for show_el in self.EpisodeWatchlist[trakt_id]: indexer_id = int(show_el) show = self.EpisodeWatchlist[trakt_id][show_el] newShow = Show.find(sickbeard.showList, indexer_id) try: if newShow is None: if indexer_id not in managed_show: self.addDefaultShow(indexer, indexer_id, show['title'], SKIPPED) managed_show.append(indexer_id) for season_el in show['seasons']: season = int(season_el) for episode_el in show['seasons'][season_el]['episodes']: self.todoWanted.append((indexer_id, season, int(episode_el))) else: if newShow.indexer == indexer: for season_el in show['seasons']: season = int(season_el) for episode_el in show['seasons'][season_el]['episodes']: setEpisodeToWanted(newShow, season, int(episode_el)) except TypeError: logger.log("Could not parse the output from trakt for {0} ".format(show["title"]), logger.DEBUG) logger.log("SHOW_WATCHLIST::CHECK::FINISH - Trakt Episode Watchlist", logger.DEBUG) @staticmethod def addDefaultShow(indexer, indexer_id, name, status): """ Adds a new show with the default settings """ if not Show.find(sickbeard.showList, int(indexer_id)): logger.log("Adding show " + str(indexer_id)) root_dirs = sickbeard.ROOT_DIRS.split('|') try: location = root_dirs[int(root_dirs[0]) + 1] except Exception: location = None if location: showPath = ek(os.path.join, location, sanitize_filename(name)) dir_exists = helpers.makeDir(showPath) if not dir_exists: logger.log("Unable to create the folder {0} , can't add the show".format(showPath), logger.WARNING) return else: helpers.chmodAsParent(showPath) sickbeard.showQueueScheduler.action.add_show(int(indexer), int(indexer_id), showPath, default_status=status, quality=int(sickbeard.QUALITY_DEFAULT), season_folders=int(sickbeard.SEASON_FOLDERS_DEFAULT), paused=sickbeard.TRAKT_START_PAUSED, default_status_after=status) else: logger.log("There was an error creating the show, no root directory setting found", logger.WARNING) return def manageNewShow(self, show): logger.log("Checking if trakt watch list wants to search for episodes from new show " + show.name, logger.DEBUG) episodes = [i for i in self.todoWanted if i[0] == show.indexerid] for episode in episodes: self.todoWanted.remove(episode) setEpisodeToWanted(show, episode[1], episode[2]) def _checkInList(self, trakt_id, showid, season, episode, List=None): """ Check in the Watchlist or CollectionList for Show Is the Show, Season and Episode in the trakt_id list (tvdb / tvrage) """ # logger.log(u"Checking Show: %s %s %s " % (trakt_id, showid, List),logger.DEBUG) if "Collection" == List: try: if self.Collectionlist[trakt_id][showid]['seasons'][season]['episodes'][episode] == episode: return True except Exception: return False elif "Show" == List: try: if self.ShowWatchlist[trakt_id][showid]['id'] == showid: return True except Exception: return False else: try: if self.EpisodeWatchlist[trakt_id][showid]['seasons'][season]['episodes'][episode] == episode: return True except Exception: return False def _getShowWatchlist(self): """ Get Watchlist and parse once into addressable structure """ try: self.ShowWatchlist = {'tvdb_id': {}, 'tvrage_id': {}} TraktShowWatchlist = self.trakt_api.traktRequest("sync/watchlist/shows") tvdb_id = 'tvdb' tvrage_id = 'tvrage' for watchlist_el in TraktShowWatchlist: tvdb = False tvrage = False if watchlist_el['show']['ids']["tvdb"] is not None: tvdb = True if watchlist_el['show']['ids']["tvrage"] is not None: tvrage = True title = watchlist_el['show']['title'] year = str(watchlist_el['show']['year']) if tvdb: showid = str(watchlist_el['show']['ids'][tvdb_id]) self.ShowWatchlist[tvdb_id + '_id'][showid] = {'id': showid, 'title': title, 'year': year} if tvrage: showid = str(watchlist_el['show']['ids'][tvrage_id]) self.ShowWatchlist[tvrage_id + '_id'][showid] = {'id': showid, 'title': title, 'year': year} except traktException as e: logger.log("Could not connect to trakt service, cannot download Show Watchlist: {0}".format(repr(e)), logger.WARNING) return False return True def _getEpisodeWatchlist(self): """ Get Watchlist and parse once into addressable structure """ try: self.EpisodeWatchlist = {'tvdb_id': {}, 'tvrage_id': {}} TraktEpisodeWatchlist = self.trakt_api.traktRequest("sync/watchlist/episodes") tvdb_id = 'tvdb' tvrage_id = 'tvrage' for watchlist_el in TraktEpisodeWatchlist: tvdb = False tvrage = False if watchlist_el['show']['ids']["tvdb"] is not None: tvdb = True if watchlist_el['show']['ids']["tvrage"] is not None: tvrage = True title = watchlist_el['show']['title'] year = str(watchlist_el['show']['year']) season = str(watchlist_el['episode']['season']) episode = str(watchlist_el['episode']['number']) if tvdb: showid = str(watchlist_el['show']['ids'][tvdb_id]) if showid not in self.EpisodeWatchlist[tvdb_id + '_id'].keys(): self.EpisodeWatchlist[tvdb_id + '_id'][showid] = {'id': showid, 'title': title, 'year': year, 'seasons': {}} if season not in self.EpisodeWatchlist[tvdb_id + '_id'][showid]['seasons'].keys(): self.EpisodeWatchlist[tvdb_id + '_id'][showid]['seasons'][season] = {'s': season, 'episodes': {}} if episode not in self.EpisodeWatchlist[tvdb_id + '_id'][showid]['seasons'][season]['episodes'].keys(): self.EpisodeWatchlist[tvdb_id + '_id'][showid]['seasons'][season]['episodes'][episode] = episode if tvrage: showid = str(watchlist_el['show']['ids'][tvrage_id]) if showid not in self.EpisodeWatchlist[tvrage_id + '_id'].keys(): self.EpisodeWatchlist[tvrage_id + '_id'][showid] = {'id': showid, 'title': title, 'year': year, 'seasons': {}} if season not in self.EpisodeWatchlist[tvrage_id + '_id'][showid]['seasons'].keys(): self.EpisodeWatchlist[tvrage_id + '_id'][showid]['seasons'][season] = {'s': season, 'episodes': {}} if episode not in self.EpisodeWatchlist[tvrage_id + '_id'][showid]['seasons'][season]['episodes'].keys(): self.EpisodeWatchlist[tvrage_id + '_id'][showid]['seasons'][season]['episodes'][episode] = episode except traktException as e: logger.log("Could not connect to trakt service, cannot download Episode Watchlist: {0}".format(repr(e)), logger.WARNING) return False return True def _getShowCollection(self): """ Get Collection and parse once into addressable structure """ try: self.Collectionlist = {'tvdb_id': {}, 'tvrage_id': {}} logger.log("Getting Show Collection", logger.DEBUG) TraktCollectionList = self.trakt_api.traktRequest("sync/collection/shows") tvdb_id = 'tvdb' tvrage_id = 'tvrage' for watchlist_el in TraktCollectionList: tvdb = False tvrage = False if watchlist_el['show']['ids']["tvdb"] is not None: tvdb = True if watchlist_el['show']['ids']["tvrage"] is not None: tvrage = True title = watchlist_el['show']['title'] year = str(watchlist_el['show']['year']) if 'seasons' in watchlist_el: for season_el in watchlist_el['seasons']: for episode_el in season_el['episodes']: season = str(season_el['number']) episode = str(episode_el['number']) if tvdb: showid = str(watchlist_el['show']['ids'][tvdb_id]) if showid not in self.Collectionlist[tvdb_id + '_id'].keys(): self.Collectionlist[tvdb_id + '_id'][showid] = {'id': showid, 'title': title, 'year': year, 'seasons': {}} if season not in self.Collectionlist[tvdb_id + '_id'][showid]['seasons'].keys(): self.Collectionlist[tvdb_id + '_id'][showid]['seasons'][season] = {'s': season, 'episodes': {}} if episode not in self.Collectionlist[tvdb_id + '_id'][showid]['seasons'][season]['episodes'].keys(): self.Collectionlist[tvdb_id + '_id'][showid]['seasons'][season]['episodes'][episode] = episode if tvrage: showid = str(watchlist_el['show']['ids'][tvrage_id]) if showid not in self.Collectionlist[tvrage_id + '_id'].keys(): self.Collectionlist[tvrage_id + '_id'][showid] = {'id': showid, 'title': title, 'year': year, 'seasons': {}} if season not in self.Collectionlist[tvrage_id + '_id'][showid]['seasons'].keys(): self.Collectionlist[tvrage_id + '_id'][showid]['seasons'][season] = {'s': season, 'episodes': {}} if episode not in self.Collectionlist[tvrage_id + '_id'][showid]['seasons'][season]['episodes'].keys(): self.Collectionlist[tvrage_id + '_id'][showid]['seasons'][season]['episodes'][episode] = episode except traktException as e: logger.log("Could not connect to trakt service, cannot download Show Collection: {0}".format(repr(e)), logger.WARNING) return False return True @staticmethod def trakt_bulk_data_generate(data): """ Build the JSON structure to send back to Trakt """ uniqueShows = {} uniqueSeasons = {} for showid, indexerid, show_name, startyear, season, episode in data: if showid not in uniqueShows: uniqueShows[showid] = {'title': show_name, 'year': startyear, 'ids': {}, 'seasons': []} trakt_id = sickbeard.indexerApi(indexerid).config['trakt_id'] if trakt_id == 'tvdb_id': uniqueShows[showid]['ids']["tvdb"] = showid else: uniqueShows[showid]['ids']["tvrage"] = showid uniqueSeasons[showid] = [] # Get the unique seasons per Show for showid, indexerid, show_name, startyear, season, episode in data: if season not in uniqueSeasons[showid]: uniqueSeasons[showid].append(season) # build the query showList = [] seasonsList = {} for searchedShow in uniqueShows: seasonsList[searchedShow] = [] for searchedSeason in uniqueSeasons[searchedShow]: episodesList = [] for showid, indexerid, show_name, startyear, season, episode in data: if season == searchedSeason and showid == searchedShow: episodesList.append({'number': episode}) show = uniqueShows[searchedShow] show['seasons'].append({'number': searchedSeason, 'episodes': episodesList}) showList.append(show) post_data = {'shows': showList} return post_data
gpl-3.0
1,176,025,136,970,934,000
46.668547
300
0.549516
false
4.219351
false
false
false
facoy/facoy
Searcher/_JaccardSearcher.py
1
2617
# from java.io import File, StringReader # from org.apache.lucene.index import IndexReader, Term # from org.apache.lucene.search import IndexSearcher, FuzzyQuery # from org.apache.lucene.store import SimpleFSDirectory # from org.apache.lucene.analysis.core import KeywordAnalyzer # from org.apache.lucene.util import Version # from org.apache.lucene.queryparser.classic import MultiFieldQueryParser, QueryParser # from collections import Counter # # indexDir = File("/tmp/github") # # # 1. open the index # analyzer = KeywordAnalyzer() # index = SimpleFSDirectory(indexDir) # reader = IndexReader.open(index) # n_docs = reader.numDocs() # print("Index contains %d documents." % n_docs) # # # 2. parse the query from the command line # # a = {"typed_method_call": WhitespaceAnalyzer()} # # wrapper_analyzer = PerFieldAnalyzerWrapper(analyzer, a) # # query_string = "HttpURLConnection.disconnect Exception.printStackTrace BufferedReader.close HttpURLConnection.setRequestProperty HttpURLConnection.setRequestMethod DataOutputStream.writeBytes HttpURLConnection.getInputStream DataOutputStream.close HttpURLConnection.setUseCaches StringBuffer.append URL.openConnection HttpURLConnection.getOutputStream Integer.toString String.getBytes StringBuffer.toString HttpURLConnection.setDoOutput BufferedReader.readLine DataOutputStream.flush HttpURLConnection.setDoInput" # query_parser = MultiFieldQueryParser(Version.LUCENE_CURRENT, ["typed_method_call"], analyzer) # # # #base_query = getSpanNearQuery(analyzer, query_string) # # base_query = query_parser.parse(query_string) # # #http://shaierera.blogspot.com/2013/09/boosting-documents-in-lucene.html # # boost_query = FunctionQuery( LongFieldSource("view_count")) # #query = CustomScoreQuery(base_query, boost_query) # # # queryparser = QueryParser(Version.LUCENE_CURRENT, "title", analyzer) # # query = queryparser.parse(query_string) # # # 3. search the index for the query # # We retrieve and sort all documents that match the query. # # In a real application, use a TopScoreDocCollector to sort the hits. # searcher = IndexSearcher(reader) # hits = searcher.search(base_query, 10).scoreDocs # # # 4. display results # print(query_string) # print("Found %d hits:" % len(hits)) # # api_acc = [] # for i, hit in enumerate(hits): # doc = searcher.doc(hit.doc) # apis = [d.stringValue() for d in doc.getFields("typed_method_call")] # api_acc.extend(apis) # #retrieve_ranked_apis(doc.get("answer_id")) # print("%d. %s Method: %s, Score: %s" % (i + 1, doc.get("file"), apis, hit.score)) # # print Counter(api_acc).most_common(5) # # 5. close resources
apache-2.0
-2,467,094,502,645,306,400
44.912281
515
0.757356
false
3.325286
false
false
false
guyisit/photobooth
photo_booth_ver1.py
1
6531
#By: Guy Fisher, @guyisit #More at https://github.com/guyisit/photobooth ; https://hackaday.io/project/6625-raspberry-pi-photobooth # #Inspired/ helped by: #http://www.instructables.com/id/Raspberry-Pi-photo-booth-controller/?ALLSTEPS #http://code.activestate.com/recipes/362879-watermark-with-pil/ #https://github.com/jcroucher/pi-photo-booth/blob/master/photobooth.py #http://stackoverflow.com/questions/25592240/raspberry-pi-camera-auto-capture-python-script # #Todo: #finish watermark/ blank files #Notes: #Camera output is currently 1920x1080 #watermark is turned off temporarily # from threading import Thread import RPi.GPIO as GPIO import ImageEnhance import time import io import picamera import random import os import sys import cups import PIL from PIL import Image imgPath = '/home/pi/PiCam/temp' #location for images to be saved GPIO.setwarnings(False) #disabled errors when ready LED is already ON GPIO.setmode(GPIO.BCM) GPIO.setup(20, GPIO.IN) #4x1 button GPIO.setup(21, GPIO.IN) #2x2 button GPIO.setup(26, GPIO.OUT) #ready LED GPIO.setup(16, GPIO.IN) #1x1 button GPIO.setup(18, GPIO.OUT) #flash relay #capture image from camera def take_pictures(): with picamera.PiCamera() as cam: counter = 0 for each in range(4): counter = counter + 1 cam.start_preview() if counter == 1: #length of preview time for first picture time.sleep(6) if counter > 1: #length of preview time for pictures 2-4 time.sleep(3) cam.capture(imgPath + '/image' + str(counter) + '.jpg') cam.stop_preview() def reduce_opacity(im, opacity): """Returns an image with reduced opacity.""" assert opacity >= 0 and opacity <= 1 if im.mode != 'RGBA': im = im.convert('RGBA') else: im = im.copy() alpha = im.split()[3] alpha = ImageEnhance.Brightness(alpha).enhance(opacity) im.putalpha(alpha) return im def watermark(im, mark, position, opacity=1): """Adds a watermark to an image.""" if opacity < 1: mark = reduce_opacity(mark, opacity) if im.mode != 'RGBA': im = im.convert('RGBA') # create a transparent layer the size of the image and draw the # watermark in that layer. layer = Image.new('RGBA', im.size, (0,0,0,0)) if position == 'tile': for y in range(0, im.size[1], mark.size[1]): for x in range(0, im.size[0], mark.size[0]): layer.paste(mark, (x, y)) elif position == 'scale': # scale, but preserve the aspect ratio ratio = min( float(im.size[0]) / mark.size[0], float(im.size[1]) / mark.size[1]) w = int(mark.size[0] * ratio) h = int(mark.size[1] * ratio) mark = mark.resize((w, h)) layer.paste(mark, ((im.size[0] - w) / 2, (im.size[1] - h) / 2)) else: layer.paste(mark, position) # composite the watermark with the layer return Image.composite(layer, im, layer) def combineImages2x2(): # Do the merging blankImage = Image.open(imgPath + '/2x2blank.jpg') image1 = Image.open(imgPath + '/image1.jpg') blankImage.paste(image1, (0,200)) #each image is offset 200px to account for boarder image2 = Image.open(imgPath + '/image2.jpg') blankImage.paste(image2, (1920,200)) image3 = Image.open(imgPath + '/image3.jpg') blankImage.paste(image3, (0,1280)) image4 = Image.open(imgPath + '/image4.jpg') blankImage.paste(image4, (1920,1280)) blankImage.save(imgPath + '/combined' + now + '.jpg', 'JPEG', quality=100) def combineImages4x1(): # Do the merging blankImage = Image.open(imgPath + '/blank4x1.jpg') image1 = Image.open(imgPath + '/image1.jpg') image1 = image1.rotate(90) blankImage.paste(image1, (0,0)) image2 = Image.open(imgPath + '/image2.jpg') image2 = image2.rotate(90) blankImage.paste(image2, (1080,0)) image3 = Image.open(imgPath + '/image3.jpg') image3 = image3.rotate(90) blankImage.paste(image3, (2160,0)) image4 = Image.open(imgPath + '/image4.jpg') image4 = image4.rotate(90) blankImage.paste(image4, (3240,0)) blankImage.save(imgPath + '/combined' + now + '.jpg', 'JPEG', quality=100) def combineImages1x1(): blankImage = Image.open(imgPath + '/1x1blank.jpg') image1 = Image.open(imgPath + '/image5.jpg') image1 = image1.rotate(90) blankImage.paste(image1, (0,0)) blankImage.save(imgPath + '/combined' + now + '.jpg', 'JPEG', quality=100) #Print it!! def printPhoto(): conn = cups.Connection() printers = conn.getPrinters() printer_name = printers.keys()[0] conn.printFile(printer_name, imgPath + '/' + now + '.jpg', "TITLE",{}) time.sleep(180) #An attempt to make people wait before ready light comes on again; not tested yet def two_by_two(): GPIO.output(18, True) #turn on flash take_pictures() GPIO.output(18, False) #turn flash off combineImages2x2() #im = Image.open(imgPath + '/combined' + now + '.jpg') #mark = Image.open(imgPath + '/mark.jpg') #watermark(im, mark, 'tile', 0.5).show() #watermark(im, mark, 'scale', 1.0).show() #watermark(im, mark, (0, 0), 0.25).save(imgPath + '/' + now + '.jpg') #printPhoto() def four_by_one(): GPIO.output(18, True) take_pictures() GPIO.output(18, False) combineImages4x1() #im = Image.open(imgPath + '/combined' + now + '.jpg') #mark = Image.open(imgPath + '/mark.jpg') #watermark(im, mark, 'tile', 0.5).show() #watermark(im, mark, 'scale', 1.0).show() #watermark(im, mark, (0, 0), 0.25).save(imgPath + '/' + now + '.jpg') #printPhoto() def one_by_one(): with picamera.PiCamera() as cam: cam.start_preview() GPIO.output(18, True) time.sleep(6) cam.capture(imgPath + '/image' + str(5) + '.jpg') cam.stop_preview() GPIO.output(18, False) combineImages1x1() #printPhoto() if __name__ == '__main__': while True: GPIO.output(26, True) #turn on camera ready LED if (GPIO.input(21) == False): #2x2 GPIO.output(26, False) #turn off camera ready LED now = time.strftime("%Y-%m-%d-%H:%M:%S") #set timestamp two_by_two() #execute subroutine if (GPIO.input(20) == False): #4x1 GPIO.output(26, False) now = time.strftime("%Y-%m-%d-%H:%M:%S") four_by_one() if (GPIO.input(16) == False): #1x1 GPIO.output(26, False) now = time.strftime("%Y-%m-%d-%H:%M:%S") one_by_one() #FIN
gpl-2.0
5,499,134,025,995,295,000
31.655
105
0.62456
false
2.997246
false
false
false
haard/quarterapp
quarterapp/settings.py
1
3629
# # Copyright (c) 2013 Markus Eliasson, http://www.quarterapp.com/ # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import logging import storage import tornado.database class QuarterSettings(object): """ Application settings contains the settings specific to the application, not the running server. I.e. port numbers and such should not be kept here but in the application configuration file (quarterapp.conf). These settings might be updated at runtime """ def __init__(self, db): """ Constructs the application settings and try to update the settings from database @param db The Tornado database object used to access the database """ self.db = db self.settings = {} self.update() def update(self): """ Update the settings from the database, if cannot read from database the old settings remain active """ logging.info("Updating settings...") settings = storage.get_settings(self.db) if settings: for row in settings: self.settings[row.name] = row.value else: logging.warn("Could not find any settings in database - everything setup ok?") def get_value(self, key): """ Get the setting value for the given key, if no setting exist for this key None is returned """ if self.settings.has_key(key): return self.settings[key] else: return None def put_value(self, key, value): """ Updates the value for the given key. If this key does not exist to begin with this function will not insert the value. I.e. this function will only update existing values. @param key The settings key to update value for @param value The new value """ if self.settings.has_key(key): storage.put_setting(self.db, key, value) self.settings[key] = value else: logging.warning("Trying to update a settings key that does not exists! (%s)", key) raise Exception("Trying to update a settings key that does not exists!") def create_default_config(path): """Create a quarterapp.conf file from the example config file""" import shutil, os.path target = os.path.join(path, 'quarterapp.conf') if os.path.exists(target): print('Cowardly refusing to overwrite configuration file') else: shutil.copyfile(os.path.join(os.path.dirname(__file__), 'resources', 'quarterapp.example.conf'), target)
mit
4,340,422,769,168,632,300
38.032258
112
0.669606
false
4.398788
false
false
false
skylifewww/pangolin_new
content/templatetags/content_tags.py
1
4762
from django import template # from django.contrib.admin.util import lookup_field # from django.core.exceptions import ObjectDoesNotExist # from django.core.urlresolvers import NoReverseMatch, reverse # from django.db.models import ForeignKey # from django.template.defaulttags import NowNode # from django.utils.safestring import mark_safe from django.shortcuts import render_to_response, redirect, get_object_or_404 from content.models import * from product.models import Category, Support register = template.Library() @register.inclusion_tag('meta/title.html') def meta_title(): meta = get_object_or_404(Meta, published=1) return {'title': meta.meta_title} @register.inclusion_tag('meta/author.html') def meta_author(): meta = get_object_or_404(Meta, published=1) return {'author': meta.meta_author} @register.inclusion_tag('meta/description.html') def meta_description(): meta = get_object_or_404(Meta, published=1) return {'description': meta.meta_description} @register.inclusion_tag('meta/keywords.html') def meta_keywords(): meta = get_object_or_404(Meta, published=1) return {'keywords': meta.meta_keywords} @register.inclusion_tag('top/image_back.html') def top_image_back(): top = get_object_or_404(Top, published=1) # return {'image_back': top.image_back} return {'image_back': top.slug} @register.inclusion_tag('top/text_small.html') def top_text_small(): top = get_object_or_404(Top, published=1) return {'text_small': top.text_small} @register.inclusion_tag('top/text_big.html') def top_text_big(): top = get_object_or_404(Top, published=1) return {'text_big': top.text_big} @register.inclusion_tag('footer/category_footer.html') def category_footer(): categories = Category.objects.filter(published=1).order_by('ordering') return {'categories': categories} @register.inclusion_tag('footer/support_footer.html') def support_footer(): supports = Support.objects.filter(published=1).order_by('ordering') return {'supports': supports} # @register.inclusion_tag('slides/slides.html') # def slides(): # slides = Slide.objects.filter(published=1).order_by('ordering') # return {'slides': slides} # @register.inclusion_tag('menu/main_menu.html') # def main_menu(): # menu = Menu.objects.get(pk=1) # items = MenuItem.objects.filter(menu=menu, published=1).order_by('ordering') # return {'items': items} # @register.inclusion_tag('comments/comments.html') # def comments(paket, item_model, item_id): # from comments.models import Comments # nodes = Comments.objects.filter(paket=paket, item_model=item_model,item_id=item_id, published=1) # return {'nodes':nodes, 'paket':paket, 'item_model':item_model, 'item_id':item_id} # @register.filter(name='suit_conf') # def suit_conf(name): # value = get_config(name) # return mark_safe(value) if isinstance(value, str) else value # @register.tag # def suit_date(parser, token): # return NowNode(get_config('HEADER_DATE_FORMAT')) # @register.tag # def suit_time(parser, token): # return NowNode(get_config('HEADER_TIME_FORMAT')) # @register.filter # def field_contents_foreign_linked(admin_field): # """Return the .contents attribute of the admin_field, and if it # is a foreign key, wrap it in a link to the admin page for that # object. # Use by replacing '{{ field.contents }}' in an admin template (e.g. # fieldset.html) with '{{ field|field_contents_foreign_linked }}'. # """ # fieldname = admin_field.field['field'] # displayed = admin_field.contents() # obj = admin_field.form.instance # if not hasattr(admin_field.model_admin, # 'linked_readonly_fields') or fieldname not in admin_field \ # .model_admin \ # .linked_readonly_fields: # return displayed # try: # fieldtype, attr, value = lookup_field(fieldname, obj, # admin_field.model_admin) # except ObjectDoesNotExist: # fieldtype = None # if isinstance(fieldtype, ForeignKey): # try: # url = admin_url(value) # except NoReverseMatch: # url = None # if url: # displayed = "<a href='%s'>%s</a>" % (url, displayed) # return mark_safe(displayed) # @register.filter # def admin_url(obj): # info = (obj._meta.app_label, obj._meta.module_name) # return reverse("admin:%s_%s_change" % info, args=[obj.pk]) # @register.simple_tag # def suit_bc(*args): # return utils.value_by_version(args) # @register.assignment_tag # def suit_bc_value(*args): # return utils.value_by_version(args)
mit
1,238,953,157,483,524,400
25.455556
102
0.660227
false
3.33007
false
false
false
joerideturck/gcloud-python-bigtable
scripts/rewrite_imports.py
1
6220
# Copyright 2015 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Build script for rewriting imports for protobuf generated modules. Intended to be used for Google Cloud Bigtable protos (google/bigtable/v1) and the dependent modules (google/api and google/protobuf). """ import glob IMPORT_TEMPLATE = 'import %s' IMPORT_FROM_TEMPLATE = 'from %s import ' PROTOBUF_IMPORT_TEMPLATE = 'from google.protobuf import %s ' REPLACE_PROTOBUF_IMPORT_TEMPLATE = 'from gcloud_bigtable._generated import %s ' REPLACEMENTS = { 'google.api': 'gcloud_bigtable._generated', 'google.bigtable.admin.cluster.v1': 'gcloud_bigtable._generated', 'google.bigtable.admin.table.v1': 'gcloud_bigtable._generated', 'google.bigtable.v1': 'gcloud_bigtable._generated', 'google.longrunning': 'gcloud_bigtable._generated', 'google.rpc': 'gcloud_bigtable._generated', } GOOGLE_PROTOBUF_CUSTOM = ( 'any_pb2', 'duration_pb2', 'empty_pb2', 'timestamp_pb2', ) def transform_old_to_new(line, old_module, new_module, ignore_import_from=False): """Transforms from an old module to a new one. First checks if a line starts with "from {old_module} import ..." then checks if the line contains "import {old_module} ..." and finally checks if the line starts with (ignoring whitespace) "{old_module} ..." In any of these cases, "{old_module}" is replaced with "{new_module}". If none match, nothing is returned. :type line: str :param line: The line to be transformed. :type old_module: str :param old_module: The import to be re-written. :type new_module: str :param new_module: The new location of the re-written import. :type ignore_import_from: bool :param ignore_import_from: Flag to determine if the "from * import" statements should be ignored. :rtype: :class:`str` or :data:`NoneType <types.NoneType>` :returns: The transformed line if the old module was found, otherwise does nothing. """ if not ignore_import_from: import_from_statement = IMPORT_FROM_TEMPLATE % (old_module,) if line.startswith(import_from_statement): new_import_from_statement = IMPORT_FROM_TEMPLATE % (new_module,) # Only replace the first instance of the import statement. return line.replace(import_from_statement, new_import_from_statement, 1) # If the line doesn't start with a "from * import *" statement, it # may still contain a "import * ..." statement. import_statement = IMPORT_TEMPLATE % (old_module,) if import_statement in line: new_import_statement = IMPORT_TEMPLATE % (new_module,) # Only replace the first instance of the import statement. return line.replace(import_statement, new_import_statement, 1) # Also need to change references to the standalone imports. As a # stop-gap we fix references to them at the beginning of a line # (ignoring whitespace). if line.lstrip().startswith(old_module): # Only replace the first instance of the old_module. return line.replace(old_module, new_module, 1) def transform_line(line): """Transforms an import line in a PB2 module. If the line is not an import of one of the packages in ``REPLACEMENTS`` or ``GOOGLE_PROTOBUF_CUSTOM``, does nothing and returns the original. Otherwise it replaces the package matched with our local package or directly rewrites the custom ``google.protobuf`` import statement. :type line: str :param line: The line to be transformed. :rtype: str :returns: The transformed line. """ for old_module, new_module in REPLACEMENTS.iteritems(): result = transform_old_to_new(line, old_module, new_module) if result is not None: return result for custom_protobuf_module in GOOGLE_PROTOBUF_CUSTOM: # We don't use the "from * import" check in transform_old_to_new # because part of `google.protobuf` comes from the installed # `protobuf` library. import_from_statement = PROTOBUF_IMPORT_TEMPLATE % ( custom_protobuf_module,) if line.startswith(import_from_statement): new_import_from_statement = REPLACE_PROTOBUF_IMPORT_TEMPLATE % ( custom_protobuf_module,) # Only replace the first instance of the import statement. return line.replace(import_from_statement, new_import_from_statement, 1) old_module = 'google.protobuf.' + custom_protobuf_module new_module = 'gcloud_bigtable._generated.' + custom_protobuf_module result = transform_old_to_new(line, old_module, new_module, ignore_import_from=True) if result is not None: return result # If no matches, there is nothing to transform. return line def rewrite_file(filename): """Rewrites a given PB2 modules. :type filename: str :param filename: The name of the file to be rewritten. """ with open(filename, 'rU') as file_obj: content_lines = file_obj.read().split('\n') new_content = [] for line in content_lines: new_content.append(transform_line(line)) with open(filename, 'w') as file_obj: file_obj.write('\n'.join(new_content)) def main(): """Rewrites all PB2 files.""" pb2_files = glob.glob('gcloud_bigtable/_generated/*pb2.py') for filename in pb2_files: rewrite_file(filename) if __name__ == '__main__': main()
apache-2.0
6,704,615,396,340,033,000
35.804734
79
0.655949
false
4.005151
false
false
false
AechPro/Machine-Learning
Partners Healthcare/2016 Breast Cancer/dev/ReconNet/util/Working_Batch_Normalization.py
1
3833
from keras.engine import Layer, InputSpec from keras import initializers, regularizers from keras import backend as K class FixedBatchNormalization(Layer): def __init__(self, epsilon=1e-3, axis=-1, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None, **kwargs): self.supports_masking = True self.beta_init = initializers.get(beta_init) self.gamma_init = initializers.get(gamma_init) self.epsilon = epsilon self.axis = axis self.gamma_regularizer = regularizers.get(gamma_regularizer) self.beta_regularizer = regularizers.get(beta_regularizer) self.initial_weights = weights super(FixedBatchNormalization, self).__init__(**kwargs) def build(self, input_shape): self.input_spec = [InputSpec(shape=input_shape)] shape = (input_shape[self.axis],) self.gamma = self.add_weight(shape, initializer=self.gamma_init, regularizer=self.gamma_regularizer, name='{}_gamma'.format(self.name), trainable=False) self.beta = self.add_weight(shape, initializer=self.beta_init, regularizer=self.beta_regularizer, name='{}_beta'.format(self.name), trainable=False) self.running_mean = self.add_weight(shape, initializer='zero', name='{}_running_mean'.format(self.name), trainable=False) self.running_std = self.add_weight(shape, initializer='one', name='{}_running_std'.format(self.name), trainable=False) if self.initial_weights is not None: self.set_weights(self.initial_weights) del self.initial_weights self.built = True def call(self, x, mask=None): assert self.built, 'Layer must be built before being called' input_shape = K.int_shape(x) reduction_axes = list(range(len(input_shape))) del reduction_axes[self.axis] broadcast_shape = [1] * len(input_shape) broadcast_shape[self.axis] = input_shape[self.axis] if sorted(reduction_axes) == range(K.ndim(x))[:-1]: x_normed = K.batch_normalization( x, self.running_mean, self.running_std, self.beta, self.gamma, epsilon=self.epsilon) else: # need broadcasting broadcast_running_mean = K.reshape(self.running_mean, broadcast_shape) broadcast_running_std = K.reshape(self.running_std, broadcast_shape) broadcast_beta = K.reshape(self.beta, broadcast_shape) broadcast_gamma = K.reshape(self.gamma, broadcast_shape) x_normed = K.batch_normalization( x, broadcast_running_mean, broadcast_running_std, broadcast_beta, broadcast_gamma, epsilon=self.epsilon) return x_normed def get_config(self): config = {'epsilon': self.epsilon, 'axis': self.axis, 'gamma_regularizer': self.gamma_regularizer.get_config() if self.gamma_regularizer else None, 'beta_regularizer': self.beta_regularizer.get_config() if self.beta_regularizer else None} base_config = super(FixedBatchNormalization, self).get_config() return dict(list(base_config.items()) + list(config.items()))
apache-2.0
-2,535,344,098,606,507,000
44.180723
111
0.547091
false
4.436343
true
false
false
BaichuanWu/Blog_on_django
blog/views/article.py
1
4770
#!usr/bin/env python # -*-coding:utf-8-*- """ author:wubaichuan """ import json from django.contrib.auth.decorators import login_required from django.shortcuts import render, redirect, HttpResponse from blog.forms import ArticleForm from blog.models import Article, ArticleType, User, Reply from blog.tools.json_type import CJsonEncoder def article_type(request, type_slug): context_dict = {} try: a_type = ArticleType.objects.get(slug=type_slug) context_dict['article_type'] = a_type arts = Article.objects.filter(article_type=a_type).order_by('-create_date') context_dict['articles'] = arts print context_dict except ArticleType.DoesNotExist: pass return render(request, 'blog/article/atype.html', context_dict) def article(request, art_id): context_dict = {} try: art = Article.objects.get(id=art_id) context_dict['article'] = art except ArticleType.DoesNotExist: pass return render(request, 'blog/article/article.html', context_dict) def profile(request, author_id): context_dict = {} try: user = User.objects.get(id=author_id) context_dict['author'] = user article_list = Article.objects.filter(author_id=author_id).order_by('-revised_date') context_dict['articles'] = article_list except ArticleType.DoesNotExist: pass return render(request, 'blog/article/profile.html', context_dict) def get_reply(request): if request.method == 'POST': rid = request.POST.get('nid') reply_list = Reply.objects.filter(article_id=rid).values('id', 'content', 'create_date', 'user__user__username').order_by('create_date') reply_list = list(reply_list) reply_list = json.dumps(reply_list, cls=CJsonEncoder) return HttpResponse(reply_list) @login_required def add_reply(request): reply_list = {} if request.method == 'POST': rid = request.POST.get('nid') content = request.POST.get('data') try: new_r = Reply.objects.create(content=content, article_id=rid, user=request.user.userprofile, ) art = Article.objects.get(id=rid) art.reply_count = Reply.objects.filter(article_id=rid).count() art.save() reply_list = {'content': new_r.content, 'create_date': new_r.create_date, 'user': new_r.user.user.username, 'count': art.reply_count } reply_list = json.dumps(reply_list, cls=CJsonEncoder) except Exception, e: print e return HttpResponse(reply_list) @login_required def add_article(request): if request.method == 'POST': form = ArticleForm(request.POST) title = request.POST.get('title') summary = request.POST.get('summary') content = request.POST.get('content') type_id = request.POST.get('article_type_id') if form.is_valid(): Article.objects.get_or_create(title=title, summary=summary, content=content, article_type_id=type_id, author_id=request.user.id) request.session['notice'] = u'%s发布成功' % title return redirect('/blog/') else: print form.errors error = form.errors.as_data().values()[0][0].messages[0] return render(request, 'blog/article/add_article.html', {'error': error, 'title': title, 'summary': summary, 'content': content}) else: return render(request, 'blog/article/add_article.html') @login_required def delete_article(request, art_id): art_id = int(art_id) art = Article.objects.filter(id=art_id)[0] if request.user.id == art.author.user.id: art.delete() return redirect('/blog/profile/%d' % request.user.id) else: return redirect('/blog/') @login_required def edit_article(request, art_id): art_id = int(art_id) art = Article.objects.filter(id=art_id)[0] if request.method == 'POST': content = request.POST.get('content') if content == '': return render(request, 'blog/article/edit_article.html', {'error': u'必须填写内容', 'article': art}) art.content = content art.save() return redirect('/blog/profile/%d' % request.user.id) else: return render(request, 'blog/article/edit_article.html', {'article': art})
mit
5,132,814,077,789,584,000
34.984848
112
0.578526
false
3.852393
false
false
false
JulianSchuette/android-instrumentation
injector/injector/logger.py
1
1218
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2012, The Honeynet Project. All rights reserved. # Author: Kun Yang <kelwya@gmail.com> # # APKIL is free software: you can redistribute it and/or modify it under # the terms of version 3 of the GNU Lesser General Public License as # published by the Free Software Foundation. # # APKIL is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for # more details. # # You should have received a copy of the GNU Lesser General Public License # along with APKIL. If not, see <http://www.gnu.org/licenses/>. import logging debug = 'TERM' debug = "" log = None def Pass(*args): pass if debug == "TERM": logging.basicConfig(level=logging.DEBUG, format='%(filename)s Line:%(lineno)d Fun:%(funcName)s %(message)s',) log = logging.debug elif debug == "FILE": logging.basicConfig(level=logging.DEBUG, format='%(asctime)s Line:%(lineno)d Fun:%(funcName)s %(message)s', filename='./apkil.log', filemode='w') log = logging.debug else: log = Pass
apache-2.0
-9,155,691,572,675,177,000
29.45
81
0.690476
false
3.510086
false
false
false
DaveBackus/Data_Bootcamp
Code/Python/bootcamp_mini_basics.py
1
3994
""" For Class #1 of an informal mini-course at NYU Stern, Fall 2014. Topics: calculations, assignments, strings, slicing, lists, data frames, reading csv and xls files Repository of materials (including this file): * https://github.com/DaveBackus/Data_Bootcamp Written by Dave Backus, Sarah Beckett-Hile, and Glenn Okun Created with Python 3.4 """ """ Check Python version """ # https://docs.python.org/3.4/library/sys.html import sys print('\nWhat version of Python? \n', sys.version, '\n', sep='') if float(sys.version_info[0]) < 3.0 : raise Exception('Program halted, old version of Python. \n', 'Sorry, you need to install Anaconda again.') else: print('Congratulations, Python is up to date!') #%% """ Calculations and assignments (best in IPython console) """ 2*3 2 * 3 2^3 log(3) #%% """ Strings """ a = 'some' b = 'thing' c = a + b print('c = ', c) print('c[1] is:', c[1]) print('c[1:2] is', c[1:2]) print('c[1:3] is:', c[1:3]) print('c[1:] is:', c[1:]) #%% #print(['a[1:3]', a[1:3]]) # names first, last = 'Dave', 'Backus' full = first + ' ' + last #%% longstring = """ Four score and seven years ago Our fathers brought forth """ print(longstring) #%% """ Output and input """ print(full) print(first, last) print(last, ', ', first) print(last, ', ', first, sep='') #x = input('Type your name here --> ') #print(x, end='\n\n') """ Lists """ numbers = [x, y, z] strings = [a, b, c] both = numbers + strings print(['both[3:]', both[3:]]) #%% """ Functions """ def hello(firstname): print('Hello, ', firstname) hello('Dave') #%% def combine(first, last): lastfirst = last + ', ' + first return lastfirst both = combine('Chase', 'Coleman') print(both) #%% """ Inputting data """ import pandas as pd # check version print('Pandas version ', pd.__version__) # read from local file file = '../Data/test1.csv' df = pd.read_csv(file) #%% # some properties print(df) print(type(df)) print(['Shape is', df.shape]) print(df.mean()) print(df.columns) print(['column labels', df.columns]) print(['row labels', df.index]) #%% # read from url url = 'https://raw.githubusercontent.com/DaveBackus/Data_Bootcamp/master/Code/Data/test1.csv' dfurl = pd.read_csv(url) #%% import pandas as pd # read IMF's WEO data from url = 'http://www.imf.org/external/pubs/ft/weo/2015/01/weodata/WEOApr2015all.xls' weo = pd.read_csv(url, sep='\t') # tab = \t print(weo.head()) print(['column labels', weo.columns]) print(['row labels', weo.index]) #%% countries = ['AFG', 'USA'] variables = ['NGDP_RPCH', 'FLIBOR6'] weo_sub = weo[weo['ISO'].isin(countries) & weo['WEO Subject Code'].isin(variables)] weo_sub.to_csv('weo.csv') #%% # copy file from url to hard drive import urllib.request # this is a module from the package urllib file = 'foo.csv' url = 'https://raw.githubusercontent.com/DaveBackus/Data_Bootcamp/master/Code/Data/test1.csv' urllib.request.urlretrieve(url, file) #%% # Sarah's version f = urllib.request.urlopen(url) file = 'foo_sbh.csv' with open(file, 'wb') as local_file: local_file.write(f.read()) #%% # read from xls file = '../Data/test2.xlsx' xls = pd.read_excel(file) # default is first sheet #%% # zip files import pandas as pd import urllib import zipfile import os # this is a big file, best to test with something smaller url = 'http://databank.worldbank.org/data/download/WDI_csv.zip' file = os.path.basename(url) # strip out file name urllib.request.urlretrieve(url, file) # copy to disk # see what's there print(['Is zipfile?', zipfile.is_zipfile(file)]) zf = zipfile.ZipFile(file, 'r') print('List of zipfile contents (two versions)') [print(file) for file in zf.namelist()] zf.printdir() # extract a component csv = zf.extract('WDI_Data.csv') # copy to disk df1 = pd.read_csv('WDI_Data.csv') # read print(df1.columns) # check contents # alternative: open and read csv = zf.open('WDI_Data.csv') df2 = pd.read_csv(csv) print(df2.columns)
mit
3,868,143,909,878,868,500
19.587629
93
0.649975
false
2.758287
false
false
false
Christian-B/galaxy_shedtools
multi_merger/merge_files.py
1
13066
import collections import optparse # using optparse as hyrda still python 2.6 import re import sys def report_error(error): """Prints the error, and exits -1""" print error sys.stderr.write(error) sys.stderr.write("\n") sys.stderr.flush() sys.exit(1) def clean_part(part, tab_replace=" "): part = part.strip() part = part.replace("\t", tab_replace) return part def merge_files(file_paths, file_names, target_path=None, divider="\t", reguired_row_regexes=[], negative_row_regexes=[], column_sort=False, row_sort=False, na_value="", tab_replace=" ", verbose=False): """ Merges a list of files into a single tsv file. file_paths is a list of paths to the input files file_names is an equally long list of names for these files which will be taken as the column names. Note: use run_merge_files to shorten the file_names, read them from a file or to use the file_paths as file names target_path specifies where the tsv file will be written divider is the string to be search for in each line of the input files. If found exactly once the part before will be considered a row_name and the part after the data Note: If the same row_name is found only the last line is used. column_sort and row_sort if set cause the data to be sorted accordingly. reguired_row_regexes if provided must be a list of regex patterns. Each row_name must match at least one of these for the row to be included negative_row_regexes if provided must be a list of regex patterns. Each row_name must match none of these for the row to be included na_value whenever a file does not have data for a row_name tab_replace is used to replace any tabs that remain in the row_names and or data after they have been striped of starting and ending whitespace verbose if set will cause more verbose infomormation such as lines that do not have the divider """ # Check parameters if not file_paths: report_error("One or more file_paths parameter must be provided") if not target_path: report_error("No target_path parameter provided") if len(file_names) != len(file_paths): report_error("Found " + str(len(file_paths)) + " file_paths but file_names/names_path contains " + str(len(file_names)) + " values.") # Read data from file all_values = collections.defaultdict(lambda: collections.defaultdict(lambda: na_value)) for count, file_path in enumerate(file_paths): mis_match = 0 with open(file_path, 'r') as f: for line in f: parts = line.strip().split(divider) if len(parts) == 2: key = clean_part(parts[0], tab_replace) value = clean_part(parts[1], tab_replace) all_values[key][file_names[count]] = value else: mis_match += 1 if verbose: if mis_match < 5: print "ignoring following line from", file_path print line if mis_match > 0: print "In file " + file_path + " " + str(mis_match) + " lines did not have 1 divider (" + divider + ") " # rows names are all the keys from the data found row_names = all_values.keys() # check row_names against the regex rules if reguired_row_regexes or negative_row_regexes: ok_names = [] if reguired_row_regexes: reguired_res = [] for reguired_row_regex in reguired_row_regexes: reguired_res.append(re.compile(reguired_row_regex)) if negative_row_regexes: negative_res = [] for negative_row_regex in negative_row_regexes: negative_res.append(re.compile(negative_row_regex)) for row_name in row_names: if reguired_row_regexes: ok = False for reguired_re in reguired_res: if reguired_re.search(row_name): ok = True else: ok = True if negative_row_regexes and ok: for negative_re in negative_res: if negative_re.search(row_name): ok = False if ok: ok_names.append(row_name) row_names = ok_names # Sort keys if required if column_sort: file_names = sorted(file_names) if row_sort: row_names = sorted(row_names) # Write the data with open(target_path, 'w') as f: for name in file_names: f.write("\t") f.write(name) f.write("\n") for key in row_names: f.write(key) for name in file_names: f.write("\t") f.write(all_values[key][name]) f.write("\n") # To run the method shortening and if reguried getting file_names or file_paths use this section def remove_common(names): start = names[0] end = names[0] for name in names: while len(start) > 0 and not(name.startswith(start)): start = start[: -1] while len(end) > 0 and not(name.endswith(end)): end = end[1:] new_names = [] for name in names: if len(end) > 0: new_name = name[len(start): -len(end)] else: new_name = name[len(start):] new_names.append(new_name) return new_names # See merge_files method for kwargs def run_merge_files(file_paths=[], file_names=[], files_path=None, **kwargs): """ Handles file paths and file names before calling merge-files. file_paths is a list of the paths to be merge together. file_names is a list of names that will be shortened and then used for column names. The lenght of file_names must match file_paths, and the order is relevant to file_names. files_path if provided will the path of files paths and or file names to be used if they are not supplied directly. The kwargs arguements are defined by merge_files method which is called at the end of this method. """ # read file_paths and/or file_names if required if files_path: if file_paths: print "Using parameters file_paths and not the ones in files_path" else: file_paths = read_names(files_path) if file_names: print "Using parameters file_names and not the ones in files_path" else: file_names = read_names(files_path) # use file_paths if no file_names provided if not file_names: file_names = file_paths #To avoid wide column names the start and end text shared by all names is removed file_names = remove_common(file_names) #Call the name merge_files method merge_files(file_paths, file_names, **kwargs) # From here on down is the code if this is being run from the command line including galaxy. def remove_symbols(s): if s.find("__") == -1: return s # Patterns used by Galaxy s = s.replace("__cb__", ']') s = s.replace("__cc__", '}') s = s.replace("__dq__", '"') s = s.replace("__lt__", '<') s = s.replace("__gt__", '>') s = s.replace("__ob__", '[') s = s.replace("__oc__", '{') s = s.replace("__sq__", "'") # Patterns added by Christian s = s.replace("__in__", '%in%') s = s.replace("__not__", '!') end = 0 # tab = 9 # | = 124 while True: start = s.find("__", end) if start == -1: return s end = s.find("__", start + 2) + 2 if end == 1: return s part = s[start + 2: end - 2] if part == "": # start + 2 to leave one set of __ behind s = s[:start + 2] + s[end:] end = start + 2 else: try: ascii = int(part) s = s[:start] + chr(ascii) + s[end:] end = start - 1 # (2) __ removed before start and one character added after so -1 except ValueError: pass return s def read_names(names_path): names = [] with open(names_path, 'r') as f: for line in f: line = line.strip() if len(line) > 0: names.append(line) return names if __name__ == '__main__': parser = optparse.OptionParser() parser.add_option("--verbose", action="store_true", default=False, help="If set will generate output of what the tool is doing.") parser.add_option("--file_path", action="append", type="string", help="Path to one of the files to be merged together.") parser.add_option("--file_name", action="append", type="string", help="Names for the files. To be used to generate column names. " "Order and size are relavant and must match file_path. " "Optional: Can also be provides as a path to a file using names_path " "If neither are provide the file_paths are used.") parser.add_option("--files_path", action="store", type="string", help="Path to file that holds the file_paths and or file_names. " "Ignored if file_paths and or file_names are provided directly.") parser.add_option("--target_path", action="store", type="string", help="Path to write merged data to") parser.add_option("--divider", action="store", type="string", help="Divider between key and value. Special symbols can be entered using galaxy code or __acsii__ (for __ use ____). " "Note: After splitiing on divider both parts will be trimmed for whitespace.") parser.add_option("--na_value", action="store", type="string", help="String to use when the part before the divider/ row name is found in some files but not in others. " "Default if not specified is a blank. ") parser.add_option("--column_sort", action="store_true", default=False, help="If set will sort the columns based on shortened file names.") parser.add_option("--row_sort", action="store_true", default=False, help="If set will sort the row based on shortened file names.") parser.add_option("--reguired_row_regex", action="append", type="string", help="If provided, only rows whose cleaned name matches one or more of these regex rules will be kept. " "Special symbols can be entered using galaxy code or __acsii__ (for __ use ____) ") parser.add_option("--negative_row_regex", action="append", type="string", help="If provided, only rows whose cleaned name matches none of these regex rules will be kept. " "Special symbols can be entered using galaxy code or __acsii__ (for __ use ____) ") parser.add_option("--tab_replace", action="store", type="string", default=" ", help="Value to beinserted in data including column and row names whenever a tab is found. " "Default is a single space.") (options, args) = parser.parse_args() if not options.divider: report_error("No divider parameter provided") clean_divider = remove_symbols(options.divider) if options.verbose and (clean_divider != options.divider): print "divider", options.divider, "cleaned to", clean_divider options.divider = clean_divider if not options.na_value: if options.verbose: print "As no na-value provided a blank space will be used" options.na_value = "" if not options.tab_replace: options.tab_replace = " " if not options.reguired_row_regex: options.reguired_row_regex = [] for i, rule in enumerate(options.reguired_row_regex): clean_rule = remove_symbols(rule) if options.verbose and (clean_rule != rule): print "reguired_row_regex", rule, "cleaned to", clean_rule options.reguired_row_regex[i] = clean_rule if not options.negative_row_regex: options.negative_row_regex = [] for i, rule in enumerate(options.negative_row_regex): clean_rule = remove_symbols(rule) if options.verbose and (clean_rule != rule): print "negative_row_regex", rule, "cleaned to", clean_rule options.negative_row_regex[i] = remove_symbols(rule) run_merge_files(file_paths=options.file_path, file_names=options.file_name, files_path=options.files_path, target_path=options.target_path, verbose=options.verbose, divider=options.divider, column_sort=options.column_sort, row_sort=options.row_sort, na_value=options.na_value, tab_replace=options.tab_replace, reguired_row_regexes=options.reguired_row_regex, negative_row_regexes=options.negative_row_regex)
gpl-2.0
-6,640,525,352,530,222,000
40.878205
147
0.591612
false
3.985967
false
false
false
gagoncal/Selenium
2016/backup/Equal_Experts/pages/LoggedInProductPage.py
1
1708
from BasePage import BasePage from Equal_Experts.Constants import EE_Constants from BasePage import IncorrectPageException from Equal_Experts.UIMap import LoggedInPoductPageMap from NewSnippetPage import NewSnippetPage from UploadPage import UploadPage class LoggedInProductPage(BasePage): def __init__(self, driver): super(LoggedInProductPage, self).__init__(driver) def _verify_page(self): try: self.wait_for_element_visibility(10, "xpath", LoggedInPoductPageMap['UserFieldXpath'] ) except: print LoggedInProductPageMap['UserFieldXpath'] raise IncorrectPageException def click_NewSnippet_button(self): #mainWindowHandle = self.driver.window_handles self.click(10, "xpath", LoggedInPoductPageMap['NewSnippetLinkXpath']) #allWindowHandles = self.driver.window_handles #for handle in allWindowHandles: #if handle != mainWindowHandle[0]: # self.switch_to_window(handle) # break return NewSnippetPage(self.driver, EE_Constants['NewSnippetContent'] ) def Click_Upload_button(self): self.click(10, "xpath", LoggedInPoductPageMap['UploadLinkXpath']) #allWindowHandles = self.driver.window_handles #for handle in allWindowHandles: #if handle != mainWindowHandle[0]: # self.switch_to_window(handle) # break return UploadPage(self.driver, EE_Constants['Path_To_File'] )
lgpl-2.1
-8,497,478,021,366,939,000
35.340426
80
0.596604
false
4.125604
false
false
false
smartsheet-platform/smartsheet-python-sdk
smartsheet/models/date_object_value.py
1
1644
# pylint: disable=C0111,R0902,R0904,R0912,R0913,R0915,E1101 # Smartsheet Python SDK. # # Copyright 2018Smartsheet.com, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"): you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import from .object_value import * from ..util import deserialize from datetime import datetime from dateutil.parser import parse class DateObjectValue(ObjectValue): """Smartsheet DateObjectValue data model.""" def __init__(self, props=None, object_type=None, base_obj=None): """Initialize the DateObjectValue model.""" super(DateObjectValue, self).__init__(object_type, base_obj) self._base = None if base_obj is not None: self._base = base_obj self._value = None if props: deserialize(self, props) self.__initialized = True @property def value(self): return self._value @value.setter def value(self, value): if isinstance(value, datetime): self._value = value else: if isinstance(value, six.string_types): value = parse(value) self._value = value
apache-2.0
1,558,399,692,050,538,500
29.444444
75
0.669708
false
4.059259
false
false
false
coberger/DIRAC
Core/Base/DB.py
1
2013
""" DB is a base class for multiple DIRAC databases that are based on MySQL. It uniforms the way the database objects are constructed """ __RCSID__ = "$Id$" from DIRAC import gLogger, gConfig from DIRAC.Core.Utilities.MySQL import MySQL from DIRAC.ConfigurationSystem.Client.Utilities import getDBParameters from DIRAC.ConfigurationSystem.Client.PathFinder import getDatabaseSection class DB( MySQL ): def __init__( self, dbname, fullname, debug = False ): self.fullname = fullname database_name = dbname self.log = gLogger.getSubLogger( database_name ) result = getDBParameters( fullname ) if not result['OK'] : raise Exception( 'Cannot get database parameters: %s' % result['Message'] ) dbParameters = result[ 'Value' ] self.dbHost = dbParameters[ 'Host' ] self.dbPort = dbParameters[ 'Port' ] self.dbUser = dbParameters[ 'User' ] self.dbPass = dbParameters[ 'Password' ] self.dbName = dbParameters[ 'DBName' ] MySQL.__init__( self, self.dbHost, self.dbUser, self.dbPass, self.dbName, self.dbPort, debug = debug ) if not self._connected: raise RuntimeError( 'Can not connect to DB %s, exiting...' % self.dbName ) self.log.info( "==================================================" ) #self.log.info("SystemInstance: "+self.system) self.log.info( "User: " + self.dbUser ) self.log.info( "Host: " + self.dbHost ) self.log.info( "Port: " + str( self.dbPort ) ) #self.log.info("Password: "+self.dbPass) self.log.info( "DBName: " + self.dbName ) self.log.info( "==================================================" ) ############################################################################# def getCSOption( self, optionName, defaultValue = None ): cs_path = getDatabaseSection( self.fullname ) return gConfig.getValue( "/%s/%s" % ( cs_path, optionName ), defaultValue )
gpl-3.0
4,559,042,075,579,786,000
39.26
81
0.574267
false
3.978261
false
false
false