code
stringlengths
2
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
2
1.05M
from npoapi.npoapi import NpoApi class Schedule(NpoApi): def get(self, guideDay=None, channel=None, sort="asc", offset=0, limit=240, properties=None, accept=None): params = { 'guideDay': guideDay, "sort": sort, "max": limit, "offset": offset, "properties": properties } if channel: return self.request("/api/schedule/channel/" + channel, params=params, accept=accept) else: return self.request("/api/schedule", params=params) def search(self, form="{}", sort="asc", offset=0, limit=240, profile=None, properties=None, accept=None): return self.request("/api/schedule/", data=form, accept=accept, params={ "profile": profile, "sort": sort, "offset": offset, "max": limit, "properties": properties} )
npo-poms/pyapi
npoapi/schedule.py
Python
gpl-3.0
871
import sys import os from ..data.molecular_species import molecular_species from ..data.reaction_mechanism_class import reaction_mechanism from ..data.condition_class import condition from ..data.reagent import reagent from ..data.puzzle_class import puzzle from ..data.solution_class import solution def name(class_obj): return class_obj.__name__ # depends on JSON base class for class_being_tested in [molecular_species, condition, reaction_mechanism, reagent, puzzle, solution]: system_output = sys.stdout # store stdout sys.stdout = open(os.getcwd() + "/testing_result_" + name(class_being_tested) + ".txt", "w") # pipe to file test_result = class_being_tested.test() sys.stdout.close() # close file sys.stdout = system_output #replace stdout if test_result: print("PASSED", name(class_being_tested), sep=" ") else: print("FAILED", name(class_being_tested), sep=" ")
ckwatson/kernel
tests/quick_test.py
Python
gpl-3.0
893
#!/usr/bin/env python # -*- coding: utf-8 -*- import tkinter as tk from tkinter import ttk from multiprocessing import pool import numpy as np import stat_functions as stat from scipy import stats as scstats from gui import gui_functions as guif import os import functions as fu class process_charts(ttk.Frame): def __init__(self,window,master,main_tabs,tabs): style = ttk.Style() style.configure("TFrame", background='white') ttk.Frame.__init__(self,master,style='new.TFrame') self.window=window self.ll=None self.initialized=False self.subplot=tabs.subplot self.print_subplot=tabs.print_subplot self.img_tmp=tabs.img_tmp def get_images_for_storage(self): charts=[] for i in self.charts: charts.append((i.path,i.name)) return charts def charts_from_stored(self,charts): self.add_content() if charts is None: return for i in range(len(charts)): path,name=charts[i] guif.display_from_img(self.charts[i],path,name,i) def add_content(self): self.n_charts=3 self.columnconfigure(0,weight=1) for i in range(self.n_charts+1): self.rowconfigure(i,weight=1) tk.Label(self,text='Charts on normalized residuals:',bg='white',font='Tahoma 10 bold').grid(row=0,column=0) self.charts=[] for i in range(self.n_charts): frm=tk.Frame(self,background='white') frm.rowconfigure(0,weight=1) frm.rowconfigure(1) frm.columnconfigure(0,weight=1) self.charts.append(tk.Label(frm,background='white')) self.charts[i].grid(row=0,column=0) chart_path=os.path.join(os.getcwd(),'img',f'chart{i}.png') self.charts[i].path=fu.obtain_fname(chart_path)# self.img_tmp.TemporaryFile() guif.setbutton(frm, 'Save image', lambda: self.save(self.n_charts-i-1),bg='white').grid(row=1,column=0) frm.grid(row=i+1) def save(self,i): if not hasattr(self.charts[i],'graph_file') or not hasattr(self,'panel'): print('No graphics displayed yet') return name=self.charts[i].name f = tk.filedialog.asksaveasfile(mode='bw', defaultextension=".jpg",initialfile=f"{name}.jpg") if f is None: return flst=[ self.histogram, self.correlogram, self.correlogram_variance, ] flst[i](self.ll,self.print_subplot,f) f.close() def initialize(self,panel): if not self.initialized: self.panel=panel self.add_content() self.initialized=True def plot(self,ll): self.initialize(ll.panel) self.ll=ll self.histogram(ll,self.subplot) self.correlogram(ll,self.subplot) self.correlogram_variance(ll,self.subplot) def histogram(self,ll,subplot,f=None): N,T,k=ll.panel.X.shape fgr,axs=subplot n=ll.e_norm_centered.shape[2] e=ll.e_norm_centered[self.panel.included[2]].flatten() N=e.shape[0] e=e.reshape((N,1)) grid_range=4 grid_step=0.05 h,grid=histogram(e,grid_range,grid_step) norm=scstats.norm.pdf(grid)*grid_step axs.bar(grid,h,color='grey', width=0.025,label='histogram') axs.plot(grid,norm,'green',label='normal distribution') axs.legend(prop={'size': 6}) name='Histogram - frequency' axs.set_title(name) if f is None: guif.display(self.charts[0],name,0,subplot) else: guif.save(subplot,f) def correlogram(self,ll,subplot,f=None): fgr,axs=subplot lags=20 rho=stat.correlogram(self.panel, ll.e_norm_centered,lags) x=np.arange(lags+1) axs.bar(x,rho,color='grey', width=0.5,label='correlogram') name='Correlogram - residuals' axs.set_title(name) if f is None: guif.display(self.charts[1],name,1,subplot) else: guif.save(subplot,f) def correlogram_variance(self,ll,subplot,f=None): N,T,k=ll.panel.X.shape fgr,axs=subplot lags=20 e2=ll.e_norm_centered**2 e2=(e2-self.panel.mean(e2))*self.panel.included[3] rho=stat.correlogram(self.panel, e2,lags) x=np.arange(lags+1) axs.bar(x,rho,color='grey', width=0.5,label='correlogram') name='Correlogram - squared residuals' axs.set_title(name) if f is None: guif.display(self.charts[2],name,2,subplot) else: guif.save(subplot,f) def histogram(x,grid_range,grid_step): N,k=x.shape grid_n=int(2*grid_range/grid_step) grid=np.array([i*grid_step-grid_range for i in range(grid_n)]).reshape((1,grid_n)) ones=np.ones((N,1)) x_u=np.concatenate((ones,x>=grid),1) x_l=np.concatenate((x<grid,ones),1) grid=np.concatenate((grid.flatten(),[grid[0,-1]+grid_step])) histogram=np.sum((x_u*x_l),0) if int(np.sum(histogram))!=N: raise RuntimeError('Error in histogram calculation') return histogram/N,grid
espensirnes/paneltime
build/lib.win-amd64-3.7/paneltime/gui/gui_charts.py
Python
gpl-3.0
4,478
compiler = './icc.py' mpicompiler = './icc.py' mpilinker = 'MPICH_CC=gcc mpicc' scalapack = True library_dirs += ['/opt/intel/Compiler/11.0/074/mkl/lib/em64t'] libraries = ['mkl_intel_lp64' ,'mkl_sequential' ,'mkl_core', 'mkl_lapack', 'mkl_scalapack_lp64', 'mkl_blacs_intelmpi_lp64', 'pthread' ] libraries += ['xc'] # change this to your installation directory LIBXCDIR='/lustre/jhome5/hfr04/hfr047/gridpaw/libxc-2.0.2/install/' library_dirs += [LIBXCDIR + 'lib'] include_dirs += [LIBXCDIR + 'include'] define_macros += [('GPAW_NO_UNDERSCORE_CBLACS', '1')] define_macros += [('GPAW_NO_UNDERSCORE_CSCALAPACK', '1')] define_macros += [("GPAW_ASYNC",1)] define_macros += [("GPAW_MPI2",1)]
robwarm/gpaw-symm
doc/install/Linux/customize_juropa_icc_libxc.py
Python
gpl-3.0
743
# -*- coding: utf-8 -*- """ Hardware file for the Superconducting Magnet (SCM) QuDi is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. QuDi is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with QuDi. If not, see <http://www.gnu.org/licenses/>. Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/> """ import socket from core.base import Base import numpy as np import time from interface.magnet_interface import MagnetInterface from collections import OrderedDict import re class Magnet(Base, MagnetInterface): """Magnet positioning software for superconducting magnet. Enables precise positioning of the magnetic field in spherical coordinates with the angle theta, phi and the radius rho. The superconducting magnet has three coils, one in x, y and z direction respectively. The current through these coils is used to compute theta, phi and rho. The alignment can be done manually as well as automatically via fluorescence alignment. """ _modtype = 'Magnet' _modclass = 'hardware' def __init__(self, **kwargs): """Here the connections to the power supplies and to the counter are established""" super().__init__(**kwargs) socket.setdefaulttimeout(3) try: self.soc_x = socket.socket(socket.AF_INET, socket.SOCK_STREAM) except socket.timeout: self.log.error("socket timeout for coil in x-direction") try: self.soc_y = socket.socket(socket.AF_INET, socket.SOCK_STREAM) except socket.timeout: self.log.error("socket timeout for coil in y-direction") try: self.soc_z = socket.socket(socket.AF_INET, socket.SOCK_STREAM) except socket.timeout: self.log.error("socket timeout for coil in z-direction") # default waiting time of the pc after a message was sent to the magnet # should be set in the config file self.waitingtime = 0.01 # This is saves in which interval the input theta was in the last movement self._inter = 1 # this is a new thing. "normal_mode" # allows one to set magnetic fields up to 1 T along each axis # and the magnetic field vector can't be larger than 1.2 T # In z_mode you are allowed to move in a 5° solid angle along the z-axis # with a maximum field of 3 T. # For more documentation or how to change the mode look into # the function switch_mode self.mode = "normal_mode" # constraints of the superconducting magnet in T # should be set in the config file # Normally you should get and set constraints in the # function get_constraints(). The problem is here that # the constraint rho is no constant and is dependent on the # current theta and phi value. self.x_constr = 1.0 self.y_constr = 1.0 self.z_constr = 3.0 self.rho_constr = 1.2 def on_activate(self): """ loads the config file and extracts the necessary configurations for the superconducting magnet @return int: (0: Ok, -1:error) """ # get necessary information from the config file config = self.getConfiguration() if 'magnet_port' in config.keys(): port = config['magnet_port'] else: self.log.error('No port hs been defined in the config file!') return -1 if 'magnet_IP_address_x' in config.keys(): self.soc_x.connect((config['magnet_IP_address_x'], port)) else: self.log.error('No ip-address for connection to x-coil defined!') return -1 if 'magnet_IP_address_y' in config.keys(): self.soc_y.connect((config['magnet_IP_address_y'], port)) else: self.log.error('No ip-address for connection to y-coil defined!') return -1 if 'magnet_IP_address_z' in config.keys(): self.soc_z.connect((config['magnet_IP_address_z'], port)) else: self.log.error('No ip-address for connection to z-coil defined!') return -1 if 'magnet_waitingtime' in config.keys(): self.waitingtime = config['magnet_waitingtime'] if 'magnet_x_constr' in config.keys(): self.x_constr = config['magnet_x_constr'] if 'magnet_y_constr' in config.keys(): self.y_constr = config['magnet_y_constr'] if 'magnet_z_constr' in config.keys(): self.z_constr = config['magnet_z_constr'] if 'magnet_rho_constr' in config.keys(): self.rho_constr = config['magnet_rho_constr'] # sending a signal to all coils to receive an answer to cut off the # useless welcome message. ask_dict = {'x': "STATE?\n", 'y': "STATE?\n", 'z': "STATE?\n"} answ_dict = self.ask(ask_dict) self.log.info("Magnet in state: {0}".format(answ_dict)) # sending a command to the magnet to turn into SI units regarding # field units. self.heat_all_switches() tell_dict = {'x': 'CONF:FIELD:UNITS 1', 'y': 'CONF:FIELD:UNITS 1', 'z': 'CONF:FIELD:UNITS 1'} self.tell(tell_dict) def on_deactivate(self): self.soc_x.close() self.soc_y.close() self.soc_z.close() def utf8_to_byte(self, myutf8): """ Convenience function for code refactoring @param string myutf8 the message to be encoded @return the encoded message in bytes """ return myutf8.encode('utf-8') def byte_to_utf8(self, mybytes): """ Convenience function for code refactoring @param bytes mybytes the byte message to be decoded @return the decoded string in uni code """ return mybytes.decode() # =========================== Magnet Functionality Core ==================================== def get_constraints(self): """ Retrieve the hardware constraints from the magnet driving device. @return dict: dict with constraints for the magnet hardware. These constraints will be passed via the logic to the GUI so that proper display elements with boundary conditions could be made. Provides all the constraints for each axis of a motorized stage (like total travel distance, velocity, ...) Each axis has its own dictionary, where the label is used as the identifier throughout the whole module. The dictionaries for each axis are again grouped together in a constraints dictionary in the form {'<label_axis0>': axis0 } where axis0 is again a dict with the possible values defined below. The possible keys in the constraint are defined in the interface file. If the hardware does not support the values for the constraints, then insert just None. If you are not sure about the meaning, look in other hardware files to get an impression. """ constraints = OrderedDict() pos_dict = self.get_pos() coord_list = [pos_dict['rho'], pos_dict['theta'], pos_dict['phi']] pos_max_dict = self.rho_pos_max({'rad': coord_list}) # get the constraints for the x axis: axis0 = {} axis0['label'] = 'rho' # name is just as a sanity included axis0['unit'] = 'T' # the SI units axis0['pos_min'] = 0 axis0['pos_max'] = pos_max_dict['rho'] axis0['pos_step'] = 300000 axis0['vel_min'] = 0 axis0['vel_max'] = 0.0404*0.01799 # unit is T/s axis0['vel_step'] = 10**4 # In fact position constraints for rho is dependent on theta and phi, which would need # the use of an additional function to calculate # going to change the return value to a function rho_max_pos which needs the current theta and # phi position # get the constraints for the x axis: axis1 = {} axis1['label'] = 'theta' # name is just as a sanity included axis1['unit'] = 'rad' # the SI units axis1['pos_min'] = -1000 # arbitrary values for now ( there isn't any restriction on them ) axis1['pos_max'] = 1000 # that is basically the traveling range axis1['pos_step'] = 36000 axis1['vel_min'] = 0 axis1['vel_max'] = 0.0404*0.01799 #unit is T/s axis1['vel_step'] = 10**4 # get the constraints for the x axis: axis2 = {} axis2['label'] = 'phi' # name is just as a sanity included axis2['unit'] = 'rad' # the SI units axis2['pos_min'] = -1000 # arbitrary values for now ( there isn't any restriction on them ) axis2['pos_max'] = 1000 # that is basically the traveling range axis2['pos_step'] = 92000 axis2['vel_min'] = 0 axis2['vel_max'] = 0.0380*0.07028 #unit is T/s axis2['vel_step'] = 10**4 # assign the parameter container for x to a name which will identify it constraints[axis0['label']] = axis0 constraints[axis1['label']] = axis1 constraints[axis2['label']] = axis2 return constraints def tell(self, param_dict): """Send a command string to the magnet. @param dict param_dict: has to have one of the following keys: 'x', 'y' or 'z' with an appropriate command for the magnet """ internal_counter = 0 if param_dict.get('x') is not None: if not param_dict['x'].endswith('\n'): param_dict['x'] += '\n' self.soc_x.send(self.utf8_to_byte(param_dict['x'])) internal_counter += 1 if param_dict.get('y') is not None: if not param_dict['y'].endswith('\n'): param_dict['y'] += '\n' self.soc_y.send(self.utf8_to_byte(param_dict['y'])) internal_counter += 1 if param_dict.get('z') is not None: if not param_dict['z'].endswith('\n'): param_dict['z'] += '\n' self.soc_z.send(self.utf8_to_byte(param_dict['z'])) internal_counter += 1 if internal_counter == 0: self.log.warning('no parameter_dict was given therefore the ' 'function tell() call was useless') def ask(self, param_dict): """Asks the magnet a 'question' and returns an answer from it. @param dictionary param_dict: has to have one of the following keys: 'x', 'y' or 'z' the items have to be valid questions for the magnet. @return answer_dict: contains the same labels as the param_dict if it was set correct and the corresponding items are the answers of the magnet (format is string), else an empty dictionary is returned """ answer_dict = {} if param_dict.get('x') is not None: if not param_dict['x'].endswith('\n'): param_dict['x'] += '\n' # repeat this block to get out crappy messages. self.soc_x.send(self.utf8_to_byte(param_dict['x'])) # time.sleep(self.waitingtime) # you need to wait until magnet generating # an answer. answer_dict['x'] = self.byte_to_utf8(self.soc_x.recv(1024)) # receive an answer self.soc_x.send(self.utf8_to_byte(param_dict['x'])) # time.sleep(self.waitingtime) # you need to wait until magnet generating # an answer. answer_dict['x'] = self.byte_to_utf8(self.soc_x.recv(1024)) # receive an answer answer_dict['x'] = answer_dict['x'].replace('\r', '') answer_dict['x'] = answer_dict['x'].replace('\n', '') if param_dict.get('y') is not None: if not param_dict['y'].endswith('\n'): param_dict['y'] += '\n' self.soc_y.send(self.utf8_to_byte(param_dict['y'])) # time.sleep(self.waitingtime) # you need to wait until magnet generating # an answer. answer_dict['y'] = self.byte_to_utf8(self.soc_y.recv(1024)) # receive an answer self.soc_y.send(self.utf8_to_byte(param_dict['y'])) # time.sleep(self.waitingtime) # you need to wait until magnet generating # an answer. answer_dict['y'] = self.byte_to_utf8(self.soc_y.recv(1024)) # receive an answer answer_dict['y'] = answer_dict['y'].replace('\r', '') answer_dict['y'] = answer_dict['y'].replace('\n', '') if param_dict.get('z') is not None: if not param_dict['z'].endswith('\n'): param_dict['z'] += '\n' self.soc_z.send(self.utf8_to_byte(param_dict['z'])) # time.sleep(self.waitingtime) # you need to wait until magnet generating # an answer. answer_dict['z'] = self.byte_to_utf8(self.soc_z.recv(1024)) # receive an answer self.soc_z.send(self.utf8_to_byte(param_dict['z'])) # time.sleep(self.waitingtime) # you need to wait until magnet generating # an answer. answer_dict['z'] = self.byte_to_utf8(self.soc_z.recv(1024)) # receive an answer answer_dict['z'] = answer_dict['z'].replace('\r', '') answer_dict['z'] = answer_dict['z'].replace('\n', '') if len(answer_dict) == 0: self.log.warning('no parameter_dict was given therefore the ' 'function call ask() was useless') return answer_dict def get_status(self, param_list=None): """ Get the status of the position @param list param_list: optional, if a specific status of an axis is desired, then the labels of the needed axis should be passed in the param_list. If nothing is passed, then from each axis the status is asked. @return dict: with the axis label as key and the status number as item. Possible states are { -1 : Error, 1: SCM doing something, 0: SCM doing nothing } """ # I have chosen the numbers rather lightly and # an improvement is probably easily achieved. if param_list is not None: status_plural = self.ask_status(param_list) else: status_plural = self.ask_status() status_dict = {} for axes in status_plural: status = status_plural[axes] translated_status = -1 if status == '1': translated_status = 1 elif status == '2': translated_status = 0 elif status == '3': translated_status = 0 elif status == '4': translated_status = 1 elif status == '5': translated_status = 0 elif status == '6': translated_status = 1 elif status == '7': translated_status = -1 elif status == '8': translated_status = 0 elif status == '9': translated_status = 1 elif status == '10': translated_status = 1 status_dict[axes] = translated_status # adjusting to the axis problem axes = ['rho', 'theta', 'phi'] return_dict = {axes[i] : status_dict[old_key] for i, old_key in enumerate(status_dict)} return return_dict def heat_switch(self, axis): """ This function enables heating of the PJSwitch, which is a necessary step to conduct current to the coils. @param string axis: desired axis (x, y, z) """ if axis == "x": self.soc_x.send(self.utf8_to_byte("PS 1\n")) elif axis == "y": self.soc_y.send(self.utf8_to_byte("PS 1\n")) elif axis == "z": self.soc_z.send(self.utf8_to_byte("PS 1\n")) else: self.log.error("In function heat_switch only 'x', 'y' and 'z' are possible axes") def heat_all_switches(self): """ Just a convenience function to heat all switches at once, as it is unusual to only apply a magnetic field in one direction""" self.heat_switch("x") self.heat_switch("y") self.heat_switch("z") def cool_switch(self, axis): """ Turns off the heating of the PJSwitch, axis depending on user input @param string axis: desired axis (x, y, z) """ if axis == "x": self.soc_x.send(self.utf8_to_byte("PS 0\n")) elif axis == "y": self.soc_y.send(self.utf8_to_byte("PS 0\n")) elif axis == "z": self.soc_z.send(self.utf8_to_byte("PS 0\n")) else: self.log.error("In function cool_switch only 'x', 'y' and 'z' are possible axes") def cool_all_switches(self): """ Just a convenience function to cool all switches at once This will take 600s.""" self.cool_switch("x") self.cool_switch("y") self.cool_switch("z") def initialize(self): """ Acts as a switch. When all coils of the superconducting magnet are heated it cools them, else the coils get heated. @return int: (0: Ok, -1:error) """ # need to ask if the PJSwitch is on answ_dict = {} answ_dict = self.ask({'x': "PS?", 'y': "PS?", 'z': "PS?"}) if answ_dict['x'] == answ_dict['y'] == answ_dict['z']: if answ_dict['x'] == '0': self.heat_all_switches() else: self.cool_all_switches() else: self.log.warning('can not correctly turn on/ turn off magnet, ' 'because not all coils are in the same state in function ' 'initialize') return -1 return 0 # how to realize this function ? def idle_magnet(self): """ Cool all coils of the superconducting magnet to achieve maximum accuracy after aligning. @return int: (0: Ok, -1:error) """ self.cool_all_switches() return 0 def wake_up_magnet(self): """ Heat all coils of the superconducting magnet to get back to the working state. @return int: (0: Ok, -1:error) """ self.heat_all_switches() return 0 def switch_mode(self, bool_var): """ This function is special for this Super Conducting Magnet (SCM). It stems from the constraints on the coils. There is one mode (so called "normal_mode" which allows a field strength of up to 1 T in each direction and a combined field strength of 1.2 T. The z_mode is special as the z-coil can conduct more current and therefore exert higher field values. In this mode the combined field strength is allowed to be as high as 3 T but only within a cone of 5° to the z-axis. @param bool_var: True sets mode to "normal_mode", and False to "z_mode" @return int: (0: 0k, -1:error) """ if bool_var: if self.mode != "normal_mode": self.calibrate() self.mode = "normal_mode" else: if self.mode != "z_mode": self.calibrate() self.mode = "z_mode" return 0 def target_field_setpoint(self, param_dict): """ Function to set the target field (in T), which will be reached through the function ramp(self, param_list). @param dict param_dict: Contains as keys the axes to be set e.g. 'x' or 'y' and the items are the float values for the new field generated by the coil of that axis. @return int: error code (0:OK, -1:error) """ field_dict = self.get_current_field() mode = self.mode if param_dict.get('x') is not None: field_dict['x'] = param_dict['x'] if param_dict.get('y') is not None: field_dict['y'] = param_dict['y'] if param_dict.get('z') is not None: field_dict['z'] = param_dict['z'] if param_dict.get('x') is None and param_dict.get('x') is None and param_dict.get('x') is None: self.log.warning('no valid axis was supplied in ' 'target_field_setpoint') return -1 new_coord = [field_dict['x'], field_dict['y'], field_dict['z']] check_var = self.check_constraints({mode: {'cart': new_coord}}) if check_var: if param_dict.get('x') is not None: self.soc_x.send(self.utf8_to_byte("CONF:FIELD:TARG " + str(param_dict['x']) + "\n")) if param_dict.get('y') is not None: self.soc_y.send(self.utf8_to_byte("CONF:FIELD:TARG " + str(param_dict['y']) + "\n")) if param_dict.get('z') is not None: self.soc_z.send(self.utf8_to_byte("CONF:FIELD:TARG " + str(param_dict['z']) + "\n")) else: self.log.warning('resulting field would be too high in ' 'target_field_setpoint') return -1 return 0 def ramp(self, param_list=None): """ function to ramp the magnetic field in the direction(s) to the target field values @param list param_list: This param is optional. If supplied it has to contain the labels for the axes, which should be ramped (only cartesian makes sense here), else all axes will be ramped. @return int: error code (0:OK, -1:error) """ if param_list is None: self.soc_x.send(self.utf8_to_byte("RAMP\n")) self.soc_y.send(self.utf8_to_byte("RAMP\n")) self.soc_z.send(self.utf8_to_byte("RAMP\n")) else: if 'x' in param_list: self.soc_x.send(self.utf8_to_byte("RAMP\n")) elif 'y' in param_list: self.soc_y.send(self.utf8_to_byte("RAMP\n")) elif 'z' in param_list: self.soc_z.send(self.utf8_to_byte("RAMP\n")) else: self.log.warning('in function ramp your definition of ' 'param_list was incorrect') return -1 return 0 def ramp_to_zero(self, axis): """ Function to ramp down a specific coil to zero current @param axis: string axis: (allowed inputs 'x', 'y' and 'z') """ if axis == "x": self.soc_x.send(self.utf8_to_byte("ZERO\n")) elif axis == "y": self.soc_y.send(self.utf8_to_byte("ZERO\n")) elif axis == "z": self.soc_z.send(self.utf8_to_byte("ZERO\n")) else: self.log.error("In function ramp_to_zero only 'x', 'y' and 'z' are possible axes") def calibrate(self, param_list=None): """ Calibrates the stage. In the case of the super conducting magnet this just means moving all or a user specified coil to zero magnetic field. @param dict param_list: param_list: optional, if a specific calibration of an axis is desired, then the labels of the needed axis should be passed in the param_list. If nothing is passed, then all connected axis will be calibrated. @return int: error code (0:OK, -1:error) After calibration the stage moves to home position which will be the zero point for the passed axis. The calibration procedure will be different for each stage. """ if not param_list: self.ramp_to_zero("x") self.ramp_to_zero("y") self.ramp_to_zero("z") else: if 'x' in param_list: self.ramp_to_zero("x") elif 'y' in param_list: self.ramp_to_zero("y") elif 'z' in param_list: self.ramp_to_zero("z") else: self.log.error('no valid axis was supplied') return -1 return 0 def set_coordinates(self, param_dict): """ Function to set spherical coordinates ( keep in mind all is in radians) This function is intended to replace the old set functions ( set_magnitude, set_theta, set_phi ). @param dict param_dict: dictionary, which passes all the relevant field values, that should be passed. Usage: {'axis_label': <the-abs-pos-value>}. 'axis_label' must correspond to a label given to one of the axis. In this case the axes are labeled 'rho', 'theta' and 'phi' @return int: error code (0:OK, -1:error) """ answ_dict = {} coord_list = [] transform_dict = {'cart': {'rad': coord_list}} answ_dict = self.get_current_field() coord_list.append(answ_dict['x']) coord_list.append(answ_dict['y']) coord_list.append(answ_dict['z']) coord_list = self.transform_coordinates(transform_dict) label_list = ['rho', 'theta', 'phi'] if param_dict.get('rho') is not None: coord_list[0] = param_dict['rho'] if param_dict.get('theta') is not None: coord_list[1] = param_dict['theta'] if param_dict.get('phi') is not None: coord_list[2] = param_dict['phi'] for key in param_dict.keys(): if key not in label_list: self.log.warning("The key "+key+" provided is no valid key in set_coordinates.") return -1 transform_dict = {'rad': {'cart': coord_list}} coord_list = self.transform_coordinates(transform_dict) set_point_dict = {'x': coord_list[0], 'y': coord_list[1], 'z': coord_list[2]} check_val = self.target_field_setpoint(set_point_dict) return check_val def move_abs(self, param_dict): """ Moves stage to absolute position (absolute movement) @param dict param_dict: dictionary, which passes all the relevant parameters, that should be changed. Usage: {'axis_label': <the-abs-pos-value>}. 'axis_label' must correspond to a label given to one of the axis. In this case the axes are labeled 'rho', 'theta' and 'phi'. @return int: error code (0:OK, -1:error) """ # the problem here is, that check_coordinates needs a complete dictionary with all # labels while move_abs doesn't need it. I think it is better to extend this flexibility to # check_constraints than changing move_abs. coord_list = [] mode = self.mode param_dict = self.update_coordinates(param_dict) coord_list.append(param_dict['rho']) coord_list.append(param_dict['theta']) coord_list.append(param_dict['phi']) # lets adjust theta theta = param_dict['theta'] phi = param_dict['phi'] # switch variable decides what has to be done ( in intervals [2*k*np.pi, 2k+1*np.pi] the movement would # be ok ( no rotation in phi ). In the other intervals one has to see if there was a movement before this # movement in one of these regions or not. If not just move, if there was shift to the interval [0, np.pi] and # move there. switch = np.ceil(theta / np.pi) % 2 inter1 = np.ceil(theta / np.pi) inter1 = int(inter1) # if inter1 > 0: # inter1 -= 1 # move the theta values in the right range # for the constraints # if in an even interval if switch: theta -= np.pi * (inter1 - 1) else: # get into the correct interval theta -= np.pi * (inter1 - 1) # now mirror at the center of the interval theta = np.pi/2 - (theta - np.pi/2) # interval was correct if switch: self._inter = inter1 # interval that needs rotation around z-axis in case it wasn't outside that interval before else: # actually it isn't necessary to distinguish here. I initially thought it is necessary and it would # be if one would move the magnet based on the magnet field of previous values. # I will leave the code here for now, when somebody in the future wants to extend this function # to allow both behaviors he can use the existing code. # theta was in a correct interval before but isn't now ( change of interval ) self.log.debug('need rotation around phi to adjust for negative theta value') self.log.debug('old int: {0}, new int: {1}'.format(self._inter, inter1)) if int(np.abs(self._inter - inter1)) is 1: phi += np.pi # theta wasn't in a correct interval before and is still in the same interval ( in this case do nothing ) elif int(np.abs(self._inter - inter1)) is 0: phi += np.pi else: self.log.warning("There was a difference in intervals larger " "than one between two consecutive movements. This is not supported " "yet.{0}".format(self._inter - inter1)) self._inter = inter1 # adjust the phi values so they are in the right interval. They might be in the wrong interval # due to user input or theta values inter2 = np.ceil(phi / (2 * np.pi)) inter2 = int(inter2) # if inter2 > 0: # inter2 -= 1 phi -= 2 * np.pi * (inter2 - 1) self.log.debug('show old dictionary: {0}'.format(param_dict)) # set the corrected values param_dict['theta'] = theta param_dict['phi'] = phi constr_dict = {mode: {'rad': coord_list}} self.log.debug('show new dictionary: {0}'.format(param_dict)) check_bool = self.check_constraints(constr_dict) if check_bool: check_1 = self.set_coordinates(param_dict) check_2 = self.ramp() else: self.log.warning("move_abs hasn't done anything, see check_constraints message why") return -1 if check_1 is check_2: if check_1 is 0: return 0 else: return -1 def move_rel(self, param_dict): """ Moves stage in given direction (in spheric coordinates with theta and phi in radian) @param dict param_dict: dictionary, which passes all the relevant parameters, which should be changed. Usage: {'axis_label': <the-abs-pos-value>}. 'axis_label' must correspond to a label given to one of the axis. @return int: error code (0:OK, -1:error) """ coord_list = [] answ_dict = self.get_current_field() coord_list.append(answ_dict['x']) coord_list.append(answ_dict['y']) coord_list.append(answ_dict['z']) transform_dict = {'cart': {'rad': coord_list}} coord_list = self.transform_coordinates(transform_dict) label_list = ['rho', 'theta', 'phi'] if param_dict.get('rho') is not None: coord_list[0] += param_dict['rho'] if param_dict.get('theta') is not None: coord_list[1] += param_dict['theta'] if param_dict.get('phi') is not None: coord_list[2] += param_dict['phi'] for key in param_dict.keys(): if key not in label_list: self.log.warning("The key "+key+" provided is no valid key in set_coordinates.") return -1 new_coord_dict = {'rho': coord_list[0], 'theta': coord_list[1], 'phi': coord_list[2]} check_val = self.move_abs(new_coord_dict) return check_val def transform_coordinates(self, param_dict): """ Function for generic coordinate transformation. This is a refactoring to the old functions (4) to be replaced by just one function @param dict param_dict: contains a param_dict, which contains a list of values to be transformed. The transformation depends on the keys of the first and the second dictionary. Possible keys are: "deg", "rad", "cart" for example if the first key is deg and the second is cartesian then the values in the list will be transformed from deg to cartesian. Ordering of the values should be [x,y,z] (cartesian) or [rho, theta, phi] for deg or rad @return list containing the transformed values """ # here all the possible cases for transformations # are checked if param_dict.get('deg') is not None: if param_dict['deg'].get('rad') is not None: try: rho, theta, phi = param_dict['deg'].get('rad') except ValueError: self.log.error('Supplied input list for transform_coordinates has to be of length 3: returning initial values') return [-1, -1, -1] theta = theta*np.pi/180 phi = phi*np.pi/180 return_list = [rho, theta, phi] return return_list if param_dict['deg'].get('cart') is not None: cartesian_list = [] try: rho, theta, phi = param_dict['deg'].get('cart') except ValueError: self.log.error('Supplied input list for transform_coordinates has to be of length 3: returning [-1,-1,-1]') return [-1, -1, -1] # transformations that should probably be revisited. # They are there in case the theta and phi values # are not in the correct range. while theta >= 180: phi += 180 theta = 360 - theta while theta < 0: theta = -theta phi += 180 while phi >= 360: phi += 360 while phi < 0: phi += 360 cartesian_list.append(rho * np.sin(theta * 2 * np.pi / 360) * np.cos(phi * 2 * np.pi / 360)) cartesian_list.append(rho * np.sin(theta * 2 * np.pi / 360) * np.sin(phi * 2 * np.pi / 360)) cartesian_list.append(rho * np.cos(theta * 2 * np.pi / 360)) return cartesian_list if param_dict.get('rad') is not None: if param_dict['rad'].get('deg') is not None: try: rho, theta, phi = param_dict['rad']['deg'] except ValueError: self.log.error("Supplied input list for transform_coordinates has to be of length 3: returning [-1, -1, -1]") return [-1,-1,-1] theta = 180*theta/np.pi phi = 180*phi/np.pi return_list = [rho, theta, phi] return return_list if param_dict['rad'].get('cart') is not None: try: rho, theta, phi = param_dict['rad']['cart'] except ValueError: self.log.error("Supplied input list for transf has to be of length 3: returning [-1, -1, -1]") return [-1,-1,-1] x_val = rho * np.sin(theta) * np.cos(phi) y_val = rho * np.sin(theta) * np.sin(phi) z_val = rho * np.cos(theta) return_list = [x_val, y_val, z_val] return return_list if param_dict.get('cart') is not None: if param_dict['cart'].get('deg') is not None: try: x_val, y_val, z_val = param_dict['cart']['deg'] except ValueError: self.log.error("Supplied input list for transform_coordinates has to be of length 3: returning [-1, -1, -1]") return [-1,-1,-1] rho = np.sqrt(x_val ** 2 + y_val ** 2 + z_val ** 2) if rho == 0: theta = 0 else: theta = np.arccos(z_val/rho) * 360/(2 * np.pi) if x_val == 0 and y_val == 0: phi = 0 else: phi = np.arctan2(y_val, x_val) * 360/(2 * np.pi) if phi < 0: phi += 360 return_list = [rho, theta, phi] return return_list if param_dict['cart'].get('rad') is not None: try: x_val, y_val, z_val = param_dict['cart']['rad'] except ValueError: self.log.error("Supplied input list for transform_coordinates has to be of length 3: returning [-1, -1, -1]") return [-1,-1,-1] rho = np.sqrt(x_val ** 2 + y_val ** 2 + z_val ** 2) if rho == 0: theta = 0 else: theta = np.arccos(z_val/rho) if x_val == 0 and y_val == 0: phi = 0 else: phi = np.arctan2(y_val, x_val) if phi < 0: phi += 2 * np.pi return_list = [rho, theta, phi] return return_list def get_current_field(self): """ Function that asks the magnet for the current field strength in each direction @param: @param x : representing the field strength in x direction @param y : representing the field strength in y direction float z : representing the field strength in z direction """ ask_dict = {} ask_dict['x'] = "FIELD:MAG?\n" ask_dict['y'] = "FIELD:MAG?\n" ask_dict['z'] = "FIELD:MAG?\n" answ_dict = self.ask(ask_dict) # having always a weird bug, where the response of the magnet # doesn't make sense, as it is always the same way I try to # catch these exceptions. # pattern to recognize decimal numbers ( There is one issue here e.g. (0.01940.01345) gives one match # with 0.01940. Don't think it will matter much.) my_pattern = re.compile('[-+]?[0-9][.][0-9]+') try: answ_dict['x'] = float(answ_dict['x']) except ValueError: match_list = re.findall(my_pattern, answ_dict['x']) answ_dict['x'] = float(match_list[0]) try: answ_dict['y'] = float(answ_dict['y']) except ValueError: match_list = re.findall(my_pattern, answ_dict['y']) answ_dict['y'] = float(match_list[0]) try: answ_dict['z'] = float(answ_dict['z']) except ValueError: match_list = re.findall(my_pattern, answ_dict['z']) answ_dict['z'] = float(match_list[0]) return answ_dict def get_pos(self, param_list=None): """ Gets current position of the stage @param list param_list: optional, if a specific position of an axis is desired, then the labels of the needed axis should be passed in the param_list. If nothing is passed, then from each axis the position is asked. @return dict mypos: with keys being the axis labels and item the current position. Given in spheric coordinates with Units T, rad , rad. """ mypos = {} mypos1 = {} answ_dict = self.get_current_field() coord_list = [answ_dict['x'], answ_dict['y'], answ_dict['z']] rho, theta, phi = self.transform_coordinates({'cart': {'rad': coord_list}}) mypos1['rho'] = rho mypos1['theta'] = theta mypos1['phi'] = phi if param_list is None: return mypos1 else: if "rho" in param_list: mypos['rho'] = mypos1['rho'] if "theta" in param_list: mypos['theta'] = mypos1['theta'] if "phi" in param_list: mypos['phi'] = mypos1['phi'] return mypos def stop_hard(self, param_list=None): """ function that pauses the heating of a specific coil depending on the elements in param_list. @param list param_list: Can contain elements 'x', 'y' or 'z'. In the case no list is supplied the heating of all coils is stopped @return integer: 0 everything is ok and -1 an error occured. """ if not param_list: self.soc_x.send(self.utf8_to_byte("PAUSE\n")) self.soc_y.send(self.utf8_to_byte("PAUSE\n")) self.soc_z.send(self.utf8_to_byte("PAUSE\n")) elif len(param_list) > 0: self.log.warning('Some useless parameters were passed.') return -1 else: if 'x' in param_list: self.soc_x.send(self.utf8_to_byte("PAUSE\n")) param_list.remove('x') if 'y' in param_list: self.soc_y.send(self.utf8_to_byte("PAUSE\n")) param_list.remove('y') if 'z' in param_list: self.soc_z.send(self.utf8_to_byte("PAUSE\n")) param_list.remove('z') return 0 def abort(self): """ Stops movement of the stage @return int: error code (0:OK, -1:error) """ # could think about possible exceptions here and # catch them and return -1 in case ab = self.stop_hard() return ab def ask_status(self, param_list = None): """ Function that returns the status of the coils ('x','y' and 'z') given in the param_dict @param list param_list: string (elements allowed 'x', 'y' and 'z') for which the status should be returned. Can be None, then the answer is the same as for the list ['x','y','z']. @return state: returns a string, which contains the number '1' to '10' representing the state, the magnet is in. For further information on the meaning of the numbers see translated_get_status() """ ask_dict = {} for i_dea in range(2): if not param_list: ask_dict['x'] = "STATE?\n" ask_dict['y'] = "STATE?\n" ask_dict['z'] = "STATE?\n" else: for axis in param_list: ask_dict[axis] = "STATE?\n" if i_dea == 0: pass # wait some time not sure if this is necessary. # time.sleep(self.waitingtime) answer_dict = self.ask(ask_dict) return answer_dict def translated_get_status(self, param_list=None): """ Just a translation of the numbers according to the manual supplied by American Magnets, Inc. @param list param_list: string (elements allowed 'x', 'y' and 'z') for which the translated status should be returned. Can be None, then the answer is the same as for the list ['x','y','z'] @return dictionary status_dict: keys are the elements of param_list and the items contain the message for the user. """ status_dict = self.ask_status(param_list) for myiter in status_dict.keys(): stateval = status_dict[myiter] try: if int(stateval) > 10: stateval = int(stateval) while stateval > 10: stateval //= 10 stateval = str(stateval) if stateval == '1': translated_status = 'RAMPING to target field/current' elif stateval == '2': translated_status = 'HOLDING at the target field/current' elif stateval == '3': translated_status = 'PAUSED' elif stateval == '4': translated_status = 'Ramping in MANUAL UP mode' elif stateval == '5': translated_status = 'Ramping in MANUAL DOWN mode' elif stateval == '6': translated_status = 'ZEROING CURRENT (in progress)' elif stateval == '7': translated_status = 'Quench detected' elif stateval == '8': translated_status = 'At ZERO current' elif stateval == '9': translated_status = 'Heating persistent switch' elif stateval == '10': translated_status = 'Cooling persistent switch' else: self.log.warning('Something went wrong in ask_status as the statevalue was not between 1 and 10!') return -1 except ValueError: self.log.warning("Sometimes the magnet returns nonsense after a request") return -1 status_dict[myiter] = translated_status return status_dict # This first version of set and get velocity will be very simple # Normally one can set up several ramping rates for different field # regions and so on. I also leave it to the user to find out how many # segments he has and so on. If nothing is changed the magnet should have # 1 segment and max_val should be the max_val that can be reached in that # direction. def set_velocity(self, param_dict): """ Function to change the ramp rate in T/s (ampere per second) @param dict: contains as keys the different cartesian axes ('x', 'y', 'z') and the dict contains list of parameters, that have to be supplied. In this case this is segment, ramp_rate and maxval. How does this work? The maxval for the current marks the endpoint and in between you have several segments with differen ramp_rates. @return int: error code (0:OK, -1:error) """ tell_dict = {} return_val = 0 internal_counter = 0 constraint_dict = self.get_constraints() if param_dict.get('x') is not None: param_list = list() param_list.append(1) # the segment param_list.append(param_dict['x']) param_list.append(1) # the upper bound of the velocity constraint_x = constraint_dict['rho']['vel_max'] if constraint_x > param_list[1]: tell_dict['x'] = 'CONF:RAMP:RATE:FIELD:' + str(param_list[0]) + ", " + str(param_list[1]) + ", " + str(param_list[2]) else: self.log.warning("constraint vel_max was violated in set_velocity with axis = 'x'") internal_counter += 1 if param_dict.get('y') is not None: param_list = list() param_list.append(1) # the segment param_list.append(param_dict['y']) param_list.append(1) # the upper bound of the velocity constraint_y = constraint_dict['theta']['vel_max'] if constraint_y > param_list[1]: tell_dict['y'] = 'CONF:RAMP:RATE:FIELD:' + str(param_list[0]) + ", " + str(param_list[1]) + ", " + str(param_list[2]) else: self.log.warning("constraint vel_max was violated in set_velocity with axis = 'y'") internal_counter += 1 if param_dict.get('z') is not None: param_list = list() param_list.append(1) # the segment param_list.append(param_dict['z']) param_list.append(3) # the upper bound of the velocity constraint_z = constraint_dict['phi']['vel_max'] if constraint_z > param_list[1]: tell_dict['z'] = 'CONF:RAMP:RATE:FIELD:' + str(param_list[0]) + ", " + str(param_list[1]) + ", " + str(param_list[2]) else: self.log.warning("constraint vel_max was violated in set_velocity with axis = 'z'") internal_counter += 1 if internal_counter > 0: self.tell(tell_dict) else: self.log.warning('There was no statement supplied in change_ramp_rate') return_val = -1 return return_val def get_velocity(self, param_list=None): """ Gets the current velocity for all connected axes. @param dict param_list: optional, if a specific velocity of an axis is desired, then the labels of the needed axis should be passed as the param_list. If nothing is passed, then from each axis the velocity is asked. @return dict: with the axis label as key and the velocity as item. """ ask_dict = {} return_dict = {} if param_list is None: ask_dict['x'] = "RAMP:RATE:FIELD:1?" ask_dict['y'] = "RAMP:RATE:FIELD:1?" ask_dict['z'] = "RAMP:RATE:FIELD:1?" answ_dict = self.ask(ask_dict) return_dict['x'] = float(answ_dict['x'].split(',')[0]) return_dict['y'] = float(answ_dict['y'].split(',')[0]) return_dict['z'] = float(answ_dict['z'].split(',')[0]) else: for axis in param_list: ask_dict[axis] = "RAMP:RATE:FIELD:1?" answ_dict = self.ask(ask_dict) for axis in param_list: return_dict[axis] = float(answ_dict[axis].split(',')[0]) return return_dict def check_constraints(self, param_dict): """ Function that verifies if for a given configuration of field strength exerted through the coils the constraints of the magnet are violated. @param dictionary param_dict: the structure of the dictionary is as follows {'z_mode': {'cart': [a,b,c]}} with available keys 'z_mode' and 'normal_mode'. The dictionary inside the dictionary can contain the label 'deg', 'cart' and 'rad'. The list contains then the new values and checks the constraints for them. z_mode means you can reach fields of 3 T in z-direction as long as the field vector is directed in z-direction within an accuracy of 5°. In this mode you should still be careful and the 5° restriction is kind of arbitrary and not experimented with. @return: boolean check_var: True if the constraints are fulfilled and False otherwise """ # First going to include a local function to check the constraints for cartesian coordinates # This helps to just reuse this function for the check of 'deg' and 'rad' cases. def check_cart_constraints(coord_list, mode): my_boolean = True try: x_val, y_val, z_val = coord_list except ValueError: self.log.error("In check_constraints list has not the right amount of elements (3).") return [-1, -1, -1] if mode == "normal_mode": if np.abs(x_val) > self.x_constr: my_boolean = False if np.abs(y_val) > self.y_constr: my_boolean = False if np.abs(z_val) > self.x_constr: my_boolean = False field_magnitude = np.sqrt(x_val**2 + y_val**2 + z_val**2) if field_magnitude > self.rho_constr: my_boolean = False elif mode == "z_mode": # Either in sphere on top of the cone # or in cone itself. my_boolean = False # angle 5° cone # 3T * cos(5°) height_cone = 2.9886 if (np.abs(z_val) <= height_cone) and ((x_val**2 + y_val**2) <= z_val**2): my_boolean = True elif x_val**2 + y_val**2 + (z_val - height_cone)**2 <= self.rho_constr: my_boolean = True elif x_val**2 + y_val**2 + (z_val + height_cone)**2 <= self.rho_constr: my_boolean = True if not my_boolean: self.log.warning("In check_constraints your settings don't lie in the allowed cone. See the " "function for more information") return my_boolean return_val = False if param_dict.get('normal_mode') is not None: if param_dict['normal_mode'].get("cart") is not None: return_val = check_cart_constraints(param_dict['normal_mode']["cart"], 'normal_mode') if param_dict['normal_mode'].get("rad") is not None: transform_dict = {'rad': {'cart': param_dict['normal_mode']["rad"]}} cart_coord = self.transform_coordinates(transform_dict) return_val = check_cart_constraints(cart_coord, 'normal_mode') # ok degree mode here won't work properly, because I don't check the move constraints if param_dict['normal_mode'].get("deg") is not None: transform_dict = {'deg': {'cart': param_dict['normal_mode']["deg"]}} cart_coord = self.transform_coordinates(transform_dict) return_val = check_cart_constraints(cart_coord, 'normal_mode') elif param_dict.get('z_mode') is not None: if param_dict['z_mode'].get("cart") is not None: return_val = check_cart_constraints(param_dict['z_mode']["cart"], 'z_mode') if param_dict['z_mode'].get("rad") is not None: transform_dict = {'rad':{'cart': param_dict['z_mode']["rad"]}} cart_coord = self.transform_coordinates(transform_dict) return_val = check_cart_constraints(cart_coord, 'z_mode') if param_dict['z_mode'].get("deg") is not None: transform_dict = {'deg': {'cart': param_dict['z_mode']["deg"]}} cart_coord = self.transform_coordinates(transform_dict) return_val = check_cart_constraints(cart_coord, 'z_mode') else: self.log.warning("no valid key was provided, therefore nothing happened in function check_constraints.") return return_val def rho_pos_max(self, param_dict): """ Function that calculates the constraint for rho either given theta and phi values in degree or x, y and z in cartesian coordinates. @param dictionary param_dict: Has to be of the form {'rad': [rho, theta, phi]} supports also 'deg' and 'cart' option. @return float pos_max: the max position for given theta and phi values. Returns -1 in case of failure. """ # so I'm going to rework this function. The answer in the case # of z_mode is easy. (Max value for r is constant 3 True) # For the "normal_mode" I decided to come up with a new # algorithm. # That algorithm can be summarized as follows: # Check if the vector (r,theta,phi) # with length so that it is on the surface of the sphere. In case it conflicts with the # rectangular constraints given by the coils itself (x<=10, y<=10, z<=10) # we need to find the # intersection between the vector and the cube (Sadly this will need # 6 cases, just like a dice), else we are finished. pos_max_dict = {'rho': -1, 'theta': -1, 'phi': -1} pos_max_dict['phi'] = 2*np.pi param_dict = {self.mode: param_dict} if param_dict.get("z_mode") is not None: pos_max_dict['theta'] = np.pi*5/180 # 5° cone if self.check_constraints(param_dict): pos_max_dict['rho'] = self.z_constr else: pos_max_dict['rho'] = 0.0 elif param_dict.get("normal_mode") is not None: pos_max_dict['theta'] = np.pi if param_dict["normal_mode"].get("cart") is not None: transform_dict = {'cart': {'rad': param_dict["normal_mode"].get("cart")}} coord_dict_rad = self.transform_coordinates(transform_dict) coord_dict_rad = {'rad': coord_dict_rad} coord_dict_rad['rad'][0] = self.rho_constr transform_dict = {'rad': {'cart': coord_dict_rad['rad']}} coord_dict_cart = self.transform_coordinates(transform_dict) coord_dict_cart = {'normal_mode': {'cart': coord_dict_cart}} elif param_dict["normal_mode"].get("rad") is not None: # getting the coord list and transforming the coordinates to # cartesian, so cart_constraints can make use of it # setting the radial coordinate, as only the angular coordinates # are of importance and e.g. a zero in the radial component would be # To set it to rho_constr is also important, as it allows a check # if the sphere is the valid constraint in the current direction. coord_list = param_dict["normal_mode"]["rad"] coord_dict_rad = param_dict["normal_mode"] coord_dict_rad['rad'][0] = self.rho_constr transform_dict = {'rad': {'cart': coord_dict_rad['rad']}} coord_dict_cart = self.transform_coordinates(transform_dict) coord_dict_cart = {'normal_mode': {'cart': coord_dict_cart}} elif param_dict["normal_mode"].get("deg") is not None: coord_list = param_dict["normal_mode"]["deg"] coord_dict_deg = param_dict["normal_mode"] coord_dict_deg['deg'][0] = self.rho_constr coord_dict_rad = self.transform_coordinates({'deg': {'rad': coord_dict_deg['deg']}}) coord_dict_rad = {'rad': coord_dict_rad} transform_dict = {'rad': {'cart': coord_dict_rad['rad']}} coord_dict_cart = self.transform_coordinates(transform_dict) coord_dict_cart = {'normal_mode': {'cart': coord_dict_cart}} my_boolean = self.check_constraints(coord_dict_cart) if my_boolean: pos_max_dict['rho'] = self.rho_constr else: # now I need to find out, which plane I need to check phi = coord_dict_rad['rad'][2] theta = coord_dict_rad['rad'][1] # Sides of the rectangular intersecting with position vector if (np.pi/4 <= theta) and (theta < np.pi - np.pi/4): if (7*np.pi/4 < phi < 2*np.pi) or (0 <= phi <= np.pi/4): pos_max_dict['rho'] = self.x_constr/(np.cos(phi)*np.sin(theta)) elif (np.pi/4 < phi) and (phi <= 3*np.pi/4): pos_max_dict['rho'] = self.y_constr / (np.sin(phi)*np.sin(theta)) elif (3*np.pi/4 < phi) and (phi <= 5*np.pi/4): pos_max_dict['rho'] = -self.x_constr/(np.cos(phi)*np.sin(theta)) elif (5*np.pi/4 < phi) and (phi <= 7*np.pi/4): pos_max_dict['rho'] = -self.y_constr / (np.sin(phi)*np.sin(theta)) # Top and bottom of the rectangular elif (0 <= theta) and (theta < np.pi/4): pos_max_dict['rho'] = self.x_constr / np.cos(theta) elif (3*np.pi/4 <= theta) and (theta <= np.pi): pos_max_dict['rho'] = - self.x_constr / np.cos(theta) return pos_max_dict def update_coordinates(self, param_dict): """ A small helper function that does make the functions set_coordinates, transform_coordinates compatible with the interface defined functions. The problem is, that in the interface functions each coordinate is item to an key which represents the axes of the current coordinate system. This function only makes the set of coordinates complete. E.g {'rho': 1.3} to {'rho': 1.3, 'theta': np.pi/2, 'phi': 0 } @param param_dict: Contains the incomplete dictionary @return: the complete dictionary """ current_coord_dict = self.get_pos() for key in current_coord_dict.keys(): if param_dict.get(key) is None: param_dict[key] = current_coord_dict[key] return param_dict def set_magnet_idle_state(self, magnet_idle=True): """ Set the magnet to couple/decouple to/from the control. @param bool magnet_idle: if True then magnet will be set to idle and each movement command will be ignored from the hardware file. If False the magnet will react on movement changes of any kind. @return bool: the actual state which was set in the magnet hardware. True = idle, decoupled from control False = Not Idle, coupled to control """ pass def get_magnet_idle_state(self): """ Retrieve the current state of the magnet, whether it is idle or not. @return bool: the actual state which was set in the magnet hardware. True = idle, decoupled from control False = Not Idle, coupled to control """ pass
tobiasgehring/qudi
hardware/sc_magnet/magnet.py
Python
gpl-3.0
62,977
import re import lxml.html from pupa.scrape import Scraper, Organization class WYCommitteeScraper(Scraper): members = {} urls = { "list": "http://legisweb.state.wy.us/LegbyYear/CommitteeList.aspx?Year=%s", "detail": "http://legisweb.state.wy.us/LegbyYear/%s", } def scrape(self, session=None): if session is None: session = self.latest_session() self.info('no session specified, using %s', session) list_url = self.urls["list"] % (session, ) committees = {} page = self.get(list_url).text page = lxml.html.fromstring(page) for el in page.xpath(".//a[contains(@href, 'CommitteeMembers')]"): committees[el.text.strip()] = el.get("href") for c in committees: self.info(c) detail_url = self.urls["detail"] % (committees[c],) page = self.get(detail_url).text page = lxml.html.fromstring(page) if re.match('\d{1,2}-', c): c = c.split('-', 1)[1] jcomm = Organization(name=c.strip(), chamber='joint', classification='committee') for table in page.xpath(".//table[contains(@id, 'CommitteeMembers')]"): rows = table.xpath(".//tr") chamber = rows[0].xpath('.//td')[0].text_content().strip() chamber = 'upper' if chamber == 'Senator' else 'lower' comm = Organization(name=c.strip(), chamber=chamber, classification='committee') for row in rows[1:]: tds = row.xpath('.//td') name = tds[0].text_content().strip() role = 'chairman' if tds[3].text_content().strip() == 'Chairman' else 'member' comm.add_member(name, role) jcomm.add_member(name, role) comm.add_source(detail_url) yield comm jcomm.add_source(detail_url) yield jcomm
cliftonmcintosh/openstates
openstates/wy/committees.py
Python
gpl-3.0
1,983
#!/usr/bin/env python # gRefer is a Bibliographic Management System that uses Google Docs # as shared storage. # # Copyright (C) 2011 NigelB # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from gRefer import log def start_notifier(): import os from gRefer.config_constants import dir_name, bibfiler_log_file from gRefer.log import NotifyHandler import logging import logging.handlers logger = logging.getLogger(name="Notifier") if not os.path.exists(dir_name): os.makedirs(dir_name) fh = logging.handlers.TimedRotatingFileHandler( os.path.join(dir_name,bibfiler_log_file), when="midnight", backupCount="7" ) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') fh.setFormatter(formatter) logger.root.addHandler(NotifyHandler()) logger.root.addHandler(fh) logger.setLevel(log.TRACE) logger.root.setLevel(log.TRACE) from gRefer.filer.systray import run_systray run_systray()
nigelb/gRefer
gRefer/filer/startup.py
Python
gpl-3.0
1,643
""" Load this into a Python Script widget (does not have to be connected to any other widgets), and set the project directory to the location of the git repo. Hit execute and enjoy! """ PROJECT_DIR = '/home/mahmoud/work/qualityvis' import sys sys.path.append(PROJECT_DIR) sys.path.append(PROJECT_DIR+'/orange_scripts')
slaporte/qualityvis
orange_scripts/project_setup.py
Python
gpl-3.0
321
a = 'GeeksforGeeks' # initialising a byte object c = b'GeeksforGeeks' # using encode() to encode the String # encoded version of a is stored in d # using ASCII mapping d = a.encode('ASCII') # checking if a is converted to bytes or not if (d==c): print ("Encoding successful") else : print ("Encoding Unsuccessful") # initialising a String a = 'GeeksforGeeks' # initialising a byte object c = b'GeeksforGeeks' # using decode() to decode the Byte object # decoded version of c is stored in d # using ASCII mapping d = c.decode('ASCII') # checking if c is converted to String or not if (d==a): print ("Decoding successful") else : print ("Decoding Unsuccessful")
srinivasanmit/all-in-all
G4G/byte_vs_string_obj.py
Python
gpl-3.0
683
from dcgpy import expression_gdual_double as expression from dcgpy import kernel_set_gdual_double as kernel_set from pyaudi import gdual_double as gdual # 1- Instantiate a random expression using the 4 basic arithmetic operations ks = kernel_set(["sum", "diff", "div", "mul"]) ex = expression(inputs = 1, outputs = 1, rows = 1, cols = 6, levels_back = 6, arity = 2, kernels = ks(), n_eph = 0, seed = 4232123212) # 2 - Define the symbol set to be used in visualizing the expression # (in our case, 1 input variable named "x") and visualize the expression in_sym = ["x"] print("Expression:", ex(in_sym)[0]) # 3 - Print the simplified expression print("Simplified expression:", ex.simplify(in_sym)) # 4 - Visualize the dCGP graph ex.visualize(in_sym) # 5 - Define a gdual number of value 1.2 and truncation order 2 x = gdual(1.2, "x", 2) # 6 - Compute the output of the expression and its second derivative in x = 1.2 and print print("Expression in x=1.2:", ex([x])[0]) print("Second derivative:", ex([x])[0].get_derivative([2])) # 5 - Mutate the expression with 2 random mutations of active genes and print ex.mutate_active(2) print("Mutated expression:", ex(in_sym)[0])
darioizzo/d-CGP
doc/examples/getting_started.py
Python
gpl-3.0
1,313
import random from django.http import HttpResponse from httplib import HTTPConnection, HTTPSConnection from urlparse import urlsplit import httplib2 import urllib from django.utils import simplejson as json from django.conf import settings from django.contrib.auth.decorators import login_required from django.utils.html import escape from django.views.decorators.csrf import csrf_exempt import logging from urlparse import urlparse from geonode.maps.models import LayerStats, Layer from xml.etree.ElementTree import XML, ParseError import re logger = logging.getLogger("geonode.proxy.views") HGL_URL = 'http://hgl.harvard.edu:8080/HGL' _valid_tags = "\{http\:\/\/www\.opengis\.net\/wms\}WMS_Capabilities|\ WMT_MS_Capabilities|WMS_DescribeLayerResponse|\ \{http\:\/\/www\.opengis\.net\/gml\}FeatureCollection|msGMLOutput|\ \{http\:\/\/www.opengis\.net\/wfs\}FeatureCollection|\ rss|{http://www.w3.org/2005/Atom}feed|\ \{http\:\/\/www\.w3\.org\/2001\/XMLSchema\}schema|\ {http://www.w3.org/1999/02/22-rdf-syntax-ns#}RDF" _user, _password = settings.GEOSERVER_CREDENTIALS h = httplib2.Http() h.add_credentials(_user, _password) _netloc = urlparse(settings.GEOSERVER_BASE_URL).netloc h.authorizations.append( httplib2.BasicAuthentication( (_user, _password), _netloc, settings.GEOSERVER_BASE_URL, {}, None, None, h ) ) @csrf_exempt def proxy(request): if 'url' not in request.GET: return HttpResponse( "The proxy service requires a URL-encoded URL as a parameter.", status=400, content_type="text/plain" ) url = urlsplit(request.GET['url']) # Don't allow localhost connections unless in DEBUG mode if not settings.DEBUG and re.search('localhost|127.0.0.1', url.hostname): return HttpResponse(status=403) locator = url.path if url.query != "": locator += '?' + url.query if url.fragment != "": locator += '#' + url.fragment # Strip all headers and cookie info headers = {} conn = HTTPConnection(url.hostname, url.port) if url.scheme == "http" else HTTPSConnection(url.hostname, url.port) conn.request(request.method, locator, request.raw_post_data, headers) result = conn.getresponse() response = HttpResponse( valid_response(result.read()), status=result.status, content_type=result.getheader("Content-Type", "text/plain") ) return response def valid_response(responseContent): #Proxy should only be used when expecting an XML or JSON response #ArcGIS Server GetFeatureInfo xml response if re.match("<FeatureInfoResponse", responseContent): return responseContent # ows exceptions if "<ows:ExceptionReport" in responseContent: return responseContent if responseContent[0] == "<": try: from defusedxml.ElementTree import fromstring et = fromstring(responseContent) if re.match(_valid_tags, et.tag): return responseContent except ParseError: return None elif re.match('\[|\{', responseContent): try: json.loads(responseContent) return responseContent except: return None return None @csrf_exempt def geoserver_rest_proxy(request, proxy_path, downstream_path): if not request.user.is_authenticated(): return HttpResponse( "You must be logged in to access GeoServer", mimetype="text/plain", status=401) def strip_prefix(path, prefix): assert path.startswith(prefix) return path[len(prefix):] path = strip_prefix(request.get_full_path(), proxy_path) url = "".join([settings.GEOSERVER_BASE_URL, downstream_path, path]) http = httplib2.Http() http.add_credentials(*settings.GEOSERVER_CREDENTIALS) headers = dict() if request.method in ("POST", "PUT") and "CONTENT_TYPE" in request.META: headers["Content-Type"] = request.META["CONTENT_TYPE"] response, content = http.request( url, request.method, body=request.raw_post_data or None, headers=headers) return HttpResponse( content=content, status=response.status, mimetype=response.get("content-type", "text/plain")) def picasa(request): url = "http://picasaweb.google.com/data/feed/base/all?thumbsize=160c&" kind = request.GET['kind'] if request.method == 'GET' else request.POST['kind'] bbox = request.GET['bbox'] if request.method == 'GET' else request.POST['bbox'] query = request.GET['q'] if request.method == 'GET' else request.POST['q'] maxResults = request.GET['max-results'] if request.method == 'GET' else request.POST['max-results'] coords = bbox.split(",") coords[0] = -180 if float(coords[0]) <= -180 else coords[0] coords[2] = 180 if float(coords[2]) >= 180 else coords[2] coords[1] = coords[1] if float(coords[1]) > -90 else -90 coords[3] = coords[3] if float(coords[3]) < 90 else 90 newbbox = str(coords[0]) + ',' + str(coords[1]) + ',' + str(coords[2]) + ',' + str(coords[3]) url = url + "kind=" + kind + "&max-results=" + maxResults + "&bbox=" + newbbox + "&q=" + urllib.quote(query.encode('utf-8')) #+ "&alt=json" feed_response = urllib.urlopen(url).read() return HttpResponse(feed_response, mimetype="text/xml") def flickr(request): url = "http://api.flickr.com/services/rest/?method=flickr.photos.search&api_key=%s" % settings.FLICKR_API_KEY bbox = request.GET['bbox'] if request.method == 'GET' else request.POST['bbox'] query = request.GET['q'] if request.method == 'GET' else request.POST['q'] maxResults = request.GET['max-results'] if request.method == 'GET' else request.POST['max-results'] coords = bbox.split(",") coords[0] = -180 if float(coords[0]) <= -180 else coords[0] coords[2] = 180 if float(coords[2]) >= 180 else coords[2] coords[1] = coords[1] if float(coords[1]) > -90 else -90 coords[3] = coords[3] if float(coords[3]) < 90 else 90 newbbox = str(coords[0]) + ',' + str(coords[1]) + ',' + str(coords[2]) + ',' + str(coords[3]) url = url + "&tags=%s&per_page=%s&has_geo=1&bbox=%s&format=json&extras=geo,url_q&accuracy=1&nojsoncallback=1" % (query,maxResults,newbbox) feed_response = urllib.urlopen(url).read() return HttpResponse(feed_response, mimetype="text/xml") def hglpoints (request): from xml.dom import minidom import re url = HGL_URL + "/HGLGeoRSS?GeometryType=point" bbox = ["-180","-90","180","90"] max_results = request.GET['max-results'] if request.method == 'GET' else request.POST['max-results'] if max_results is None: max_results = "100" try: bbox = request.GET['bbox'].split(",") if request.method == 'GET' else request.POST['bbox'].split(",") except: pass query = request.GET['q'] if request.method == 'GET' else request.POST['q'] url = url + "&UserQuery=" + urllib.quote(query.encode('utf-8')) #+ \ #"&BBSearchOption=1&minx=" + bbox[0] + "&miny=" + bbox[1] + "&maxx=" + bbox[2] + "&maxy=" + bbox[3] dom = minidom.parse(urllib.urlopen(url)) iterator = 1 for node in dom.getElementsByTagName('item'): if iterator <= int(max_results): description = node.getElementsByTagName('description')[0] guid = node.getElementsByTagName('guid')[0] title = node.getElementsByTagName('title')[0] if guid.firstChild.data != 'OWNER.TABLE_NAME': description.firstChild.data = description.firstChild.data + '<br/><br/><p><a href=\'javascript:void(0);\' onClick=\'app.addHGL("' \ + escape(title.firstChild.data) + '","' + re.sub("SDE\d?\.","", guid.firstChild.data) + '");\'>Add to Map</a></p>' iterator +=1 else: node.parentNode.removeChild(node) return HttpResponse(dom.toxml(), mimetype="text/xml") def hglServiceStarter (request, layer): #Check if the layer is accessible to public, if not return 403 accessUrl = HGL_URL + "/ogpHglLayerInfo.jsp?ValidationKey=" + settings.HGL_VALIDATION_KEY +"&layers=" + layer accessJSON = json.loads(urllib.urlopen(accessUrl).read()) if accessJSON[layer]['access'] == 'R': return HttpResponse(status=403) #Call the RemoteServiceStarter to load the layer into HGL's Geoserver in case it's not already there startUrl = HGL_URL + "/RemoteServiceStarter?ValidationKey=" + settings.HGL_VALIDATION_KEY + "&AddLayer=" + layer return HttpResponse(urllib.urlopen(startUrl).read()) def tweetServerProxy(request,geopsip): url = urlsplit(request.get_full_path()) if geopsip == "standard": geopsip = settings.GEOPS_IP tweet_url = "http://" + geopsip + "?" + url.query identifyQuery = re.search("QUERY_LAYERS", tweet_url) if identifyQuery is not None: if re.search("%20limit%2010&", tweet_url)is None: return HttpResponse(status=403) step1 = urllib.urlopen(tweet_url) step2 = step1.read() if 'content-type' in step1.info().dict: response = HttpResponse(step2, mimetype= step1.info().dict['content-type']) else: response = HttpResponse(step2) try : cookie = step1.info().dict['set-cookie'].split(";")[0].split("=")[1] response.set_cookie("tweet_count", cookie) except: pass return response def tweetDownload (request): if (not request.user.is_authenticated() or not request.user.get_profile().is_org_member): return HttpResponse(status=403) proxy_url = urlsplit(request.get_full_path()) download_url = "http://" + settings.GEOPS_IP + "?" + proxy_url.query + settings.GEOPS_DOWNLOAD http = httplib2.Http() response, content = http.request( download_url, request.method) response = HttpResponse( content=content, status=response.status, mimetype=response.get("content-type", "text/plain")) response['Content-Disposition'] = response.get('Content-Disposition', 'attachment; filename="tweets"' + request.user.username + '.csv'); return response def tweetTrendProxy (request): tweetUrl = "http://" + settings.AWS_INSTANCE_IP + "/?agg=trend&bounds=" + request.POST["bounds"] + "&dateStart=" + request.POST["dateStart"] + "&dateEnd=" + request.POST["dateEnd"]; resultJSON ="" # resultJSON = urllib.urlopen(tweetUrl).read() # import datetime # # # startDate = datetime.datetime.strptime(request.POST["dateStart"], "%Y-%b-%d") # endDate = datetime.datetime.strptime(request.POST["dateEnd"], "%Y-%b-%d") # # recString = "record: [" # # while startDate <= endDate: # recString += "{'date': '$date', 'Ebola$rnd5' : $rnd6, 'Malaria$rnd4' : $rnd7, 'Influenza$rnd3': $rnd8, 'Plague$rnd3': $rnd9, 'Lyme_Disease$rnd1': $rnd10}," # recString = recString.replace("$rnd6", str(random.randrange(50,500,1))) # recString = recString.replace("$rnd7", str(random.randrange(50,500,1))) # recString = recString.replace("$rnd8", str(random.randrange(50,500,1))) # recString = recString.replace("$rnd9", str(random.randrange(50,500,1))) # recString = recString.replace("$rnd10", str(random.randrange(50,500,1))) # recString = recString.replace("$date", datetime.datetime.strftime(startDate, '%b-%d-%Y')) # startDate = startDate + datetime.timedelta(days=1) # # recString += "]" # # resultJSON = """ # { # metaData: { # root: "record", # fields: [ # {name: 'date'}, # {name: 'Ebola$rnd5'}, # {name: 'Malaria$rnd4'}, # {name: 'Influenza$rnd3'}, # {name: 'Plague$rnd3'}, # {name: 'Lyme_Disease$rnd1'} # ], # }, # // Reader's configured root # $recString #} #""" # # resultJSON = resultJSON.replace("$recString", recString) # # # # resultJSON = resultJSON.replace("$rnd1", str(random.randrange(50,500,1))) # resultJSON = resultJSON.replace("$rnd2", str(random.randrange(50,500,1))) # resultJSON = resultJSON.replace("$rnd3", str(random.randrange(50,500,1))) # resultJSON = resultJSON.replace("$rnd4", str(random.randrange(50,500,1))) # resultJSON = resultJSON.replace("$rnd5", str(random.randrange(50,500,1))) # resultJSON = '{"metaData":{"fields":[{"name":"Tuberculosis"},{"name":"STD"},{"name":"Gastroenteritis"},{"name":"Influenza"},{"name":"Common_Cold"},{"name":"date"}],"root":"record"},"record":[{"Common_Cold":18,"Gastroenteritis":104,"Influenza":76,"STD":121,"Tuberculosis":236,"date":"2012-01-26"},{"Common_Cold":19,"Gastroenteritis":115,"Influenza":114,"STD":146,"Tuberculosis":397,"date":"2012-01-27"},{"Common_Cold":26,"Gastroenteritis":104,"Influenza":83,"STD":137,"Tuberculosis":402,"date":"2012-01-28"},{"Common_Cold":25,"Gastroenteritis":96,"Influenza":76,"STD":141,"Tuberculosis":358,"date":"2012-01-29"},{"Common_Cold":30,"Gastroenteritis":106,"Influenza":87,"STD":158,"Tuberculosis":372,"date":"2012-01-30"},{"Common_Cold":12,"Gastroenteritis":74,"Influenza":44,"STD":116,"Tuberculosis":222,"date":"2012-01-31"}]}' return HttpResponse(resultJSON, mimetype="application/json") def youtube(request): url = "http://gdata.youtube.com/feeds/api/videos?v=2&prettyprint=true&" bbox = request.GET['bbox'] if request.method == 'GET' else request.POST['bbox'] query = request.GET['q'] if request.method == 'GET' else request.POST['q'] maxResults = request.GET['max-results'] if request.method == 'GET' else request.POST['max-results'] coords = bbox.split(",") coords[0] = coords[0] if float(coords[0]) > -180 else -180 coords[2] = coords[2] if float(coords[2]) < 180 else 180 coords[1] = coords[1] if float(coords[1]) > -90 else -90 coords[3] = coords[3] if float(coords[3]) < 90 else 90 #location would be the center of the map. location = str((float(coords[3]) + float(coords[1]))/2) + "," + str((float(coords[2]) + float(coords[0]))/2); #calculating the location-readius R = 6378.1370; PI = 3.1415926; left = R*float(coords[0])/180.0/PI; right = R*float(coords[2])/180.0/PI; radius = (right - left)/2*2; radius = 1000 if (radius > 1000) else radius; url = url + "location=" + location + "&max-results=" + maxResults + "&location-radius=" + str(radius) + "km&q=" + urllib.quote(query.encode('utf-8')) feed_response = urllib.urlopen(url).read() return HttpResponse(feed_response, mimetype="text/xml") def download(request, service, layer, format): params = request.GET #mimetype = params.get("outputFormat") if service == "wfs" else params.get("format") service=service.replace("_","/") url = settings.GEOSERVER_BASE_URL + service + "?" + params.urlencode() layerObj = Layer.objects.get(pk=layer) if layerObj.downloadable and request.user.has_perm('maps.view_layer', obj=layerObj): layerstats,created = LayerStats.objects.get_or_create(layer=layer) layerstats.downloads += 1 layerstats.save() download_response, content = h.request( url, request.method, body=None, headers=dict()) content_disposition = None if 'content_disposition' in download_response: content_disposition = download_response['content-disposition'] mimetype = download_response['content-type'] response = HttpResponse(content, mimetype = mimetype) if content_disposition is not None: response['Content-Disposition'] = content_disposition else: response['Content-Disposition'] = "attachment; filename=" + layerObj.name + "." + format return response else: return HttpResponse(status=403)
cga-harvard/cga-worldmap
geonode/proxy/views.py
Python
gpl-3.0
15,886
#!/usr/bin/env python from . import memfbf from numpy import append import os import logging from glob import glob LOG = logging.getLogger(__name__) class SlicerFrame(dict): pass class FBFSlicer(object): """Given a workspace directory of flat binary files, grab all useful filenames and return a record of data at a time as a python dictionary. """ def __init__(self, work_dir, buffer_size=0, filename_filter=None): """Initialize slicer object parameters. :param work_dir: Workspace directory of flat binary files to read :param buffer_size: Circular buffer size or 0 for non-circular buffers/FBFs :param filename_filter: Filter function that returns True if the provided file should be opened for reading. Should return False otherwise. """ self._wd = work_dir self._buffer_size = buffer_size self._open_files = dict() if filename_filter is None: filename_filter = lambda filename: True self.should_include = filename_filter def _update_open_files(self): for fn in glob(os.path.join(self._wd, '*')): if fn not in self._open_files and self.should_include(os.path.split(fn)[-1]): LOG.debug('opening %s' % fn) try: nfo = memfbf.FBF(fn) except Exception as oops: nfo = None LOG.info('%s could not be opened as FBF' % fn) LOG.debug(repr(oops)) LOG.debug('found new file %s' % fn) self._open_files[fn] = nfo def __call__(self, first_record, last_record=None): """Retrieve a slice of a FBF directory using inclusive 1-based record number range, noting that last-first+1 records are returned. """ last_record = first_record if last_record is None else last_record if not self._open_files: self._update_open_files() data = SlicerFrame() for name, nfo in self._open_files.items(): if nfo is not None: # note we use % in order to deal with # wavenumber files that are only ever 1 record long # circular buffers which are fixed length files file_len = nfo.length() # check for non-circular buffer case and going off the end of the file # note use of > since record numbers are 1-based if (self._buffer_size == 0) and (file_len != 1) and (first_record > file_len or last_record > file_len): LOG.warning('%s: length is %d but start-end is %d-%d' % (name, file_len, first_record, last_record)) return None # check for circular buffers that aren't preallocated properly if self._buffer_size > 0 and file_len not in (1, self._buffer_size): LOG.info('buffer file %s size mismatch (%d != %d)! ignoring' % (name, file_len, self._buffer_size)) else: # 0-based circular buffer first_index = (first_record - 1) % file_len last_index = (last_record - 1) % file_len if last_index >= first_index: # Records are in one continuous line idx = slice(first_index, last_index + 1) # +1 to include last item data[nfo.stemname] = nfo[idx] else: # Records are on two ends of the circular buffer idx1 = slice(first_index, self._buffer_size) idx2 = slice(0, last_index + 1) # +1 to include last item arr1 = nfo[idx1] arr2 = nfo[idx2] data[nfo.stemname] = append(arr1, arr2, axis=0) return data
davidh-ssec/pyfbf
pyfbf/slicer.py
Python
gpl-3.0
3,928
#!/usr/bin/python # Copyright 2013 Gandi SAS # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: gandi_iface version_added: "2.0" short_description: create, attach, detach or delete Gandi network interfaces description: - Manage Gandi network interfaces options: state: description: - desired state of the resource required: false default: "created" choices: ["created", "deleted"] aliases: [] datacenter: description: - datacenter location for servers required: true choices: ["Saint Denis", "Bissen", "Baltimore"] bandwith: description: - bandwith ot the interface in bits/s (float) required: false vlan: description: - private vlan name the interface belongs to (str) required: false default: null ip_address: description: - CIDR IPv4|IPv6 address ot the interface on the vlan (str) required: false default: null ip_version: description: - ip version of the interface (str) required: false default: null requirements: [ "libcloud" ] author: Eric Garrigues <eric@gandi.net> ''' EXAMPLES = ''' # Basic provisioning example. Create a new iface on vlan mypvlan # Luxembourg datacenter - gandi_iface: vlan: mypvlan datacenter: "Bissen" ip_address: 192.168.0.1 ip_version: 4 bandwidth: 50000.0 ''' import sys USER_AGENT_PRODUCT = "Ansible-gandi" USER_AGENT_VERSION = "v0.1" try: from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver from libcloud.common.gandi import GandiException _ = Provider.GANDI except ImportError: print("failed=True " + "msg='libcloud with Gandi support required for this module'") sys.exit(1) # Load in the libcloud secrets file try: import secrets except ImportError: secrets = None ARGS = getattr(secrets, 'GANDI_PARAMS', ()) if not ARGS: print("failed=True " + "msg='Missing Gandi connection in libcloud secrets file.'") sys.exit(1) def unexpected_error_msg(error): """Create an error string based on passed in error.""" # XXX : better error management return error def _get_by_name(name, entities): find = [x for x in entities if x.name == name] return find[0] if find else None def _get_by_id(id, entities): find = [x for x in entities if x.id == id] return find[0] if find else None def get_datacenter(driver, name): """Get datacenter by name """ dcs = driver.list_locations() return _get_by_name(name, dcs) def get_pvlan(driver, name): pvlans = driver.ex_list_pvlans() return _get_by_name(name, pvlans) def get_iface(driver, id): ifaces = driver.ex_list_ifaces() return _get_by_id(id, ifaces) def get_iface_info(iface): """Retrieves interface information from an interace object and returns it as a dictionary. """ return({ 'vlan': not iface.vlan is None and iface.vlan.name or None, 'bandwidth': iface.extra.get('bandwidth'), 'datacenter_id': iface.extra.get('datacenter_id') }) def create_iface(module, driver): """Creates a new pvlan. module : AnsibleModule object driver: authenticated libcloud driver on Gandi provider Returns: A Dictionary with information about the vlan that was created. """ iface = {} ip_address = module.params.get('ip_address') ip_version = module.params.get('ip_version') pvlan_name = module.params.get('vlan') bandwidth = module.params.get('bandwidth') datacenter = module.params.get('datacenter') changed = False lc_location = get_datacenter(driver, datacenter) if not lc_location: module.fail_json(msg='Invalid datacenter %s' % datacenter, changed=False) pvlan = get_pvlan(driver, pvlan_name) # module.fail_json(msg=pvlan, changed=False) if not pvlan and not ip_version: module.fail_json(msg='ip_version is mandatory when not a vlan', changed=False) try: iface = driver.ex_create_iface(location=lc_location, ip_version=ip_version, ip_address=ip_address, vlan=pvlan, bandwitdh=bandwidth) changed = True except GandiException as e: module.fail_json(msg='Unexpected error attempting to create iface') iface_json_data = get_iface_info(iface) return (changed, iface_json_data) def delete_iface(module, driver, iface_id): """Delete an interface. module: Ansible module object driver: authenticated Gandi connection object iface_id: int id of the interface Returns a dictionary of with operation status. """ changed = False pvlan = None try: iface = get_iface(driver, iface_id) except Exception as e: module.fail_json(msg=unexpected_error_msg(e), changed=False) if iface: driver.ex_delete_iface(iface) changed = True return (changed, iface_id) def main(): module = AnsibleModule( argument_spec=dict( state=dict(choices=['created', 'deleted'], default='created'), datacenter=dict(default='Bissen'), ip_version=dict(), ip_address=dict(), vlan=dict(), bandwidth=dict() ) ) ip_version = module.params.get('ip_version') ip_address = module.params.get('ip_address') vlan_name = module.params.get('vlan') bandwidth = module.params.get('bandwidth') state = module.params.get('state') dc = module.params.get('datacenter') changed = False try: gandi = get_driver(Provider.GANDI)(*ARGS) gandi.connection.user_agent_append("%s/%s" % ( USER_AGENT_PRODUCT, USER_AGENT_VERSION)) except Exception as e: module.fail_json(msg=unexpected_error_msg(e), changed=False) if not dc and state in ['created']: module.fail_json(msg='Must specify a "datacenter"', changed=False) json_output = {'datacenter': dc} if state in ['deleted']: json_output['state'] = 'deleted' (changed, iface_id) = delete_iface(module, gandi, iface_id) json_output['iface_id'] = iface_id elif state in ['created']: json_output['state'] = 'created' (changed, iface_data) = create_iface(module, gandi) json_output['iface_data'] = iface_data json_output['changed'] = changed print json.dumps(json_output) sys.exit(0) from ansible.module_utils.basic import * main()
ericgarrigues/ansible-modules-extras
cloud/gandi/gandi_iface.py
Python
gpl-3.0
7,326
#!/usr/bin/python # -*- coding: utf-8 -*- #-----------------------------------------------------------------------# # # # This file is part of the Parametric Workbench # # # # Copyright (C) 2015 Mundo Reader S.L. # # # # Author: David Estévez Fernández <david.estevez@bq.com> # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see <http://www.gnu.org/licenses/>. # # # #-----------------------------------------------------------------------# __author__ = "David Estévez Fernández <david.estevez@bq.com>" __license__ = "GNU General Public License v3 http://www.gnu.org/licenses/gpl.html" import os import FreeCAD, FreeCADGui import Parameter class RecomputeParameters: """Creates a new parameter""" def GetResources(self): icon_path = os.path.join(os.path.realpath(os.path.dirname(__file__)), 'Gui', 'Resources', 'icons', 'Force_Recompute.png') return {'Pixmap' : icon_path, # the name of a svg file available in the resources 'MenuText': 'Recompute parameters', 'ToolTip' : 'Recompute all parameters in the document'} def Activated(self): for obj in Parameter.Parameter.getAvailableParameters(): obj.touch() FreeCAD.ActiveDocument.recompute() return True def IsActive(self): """ Active only when there is a document, and Parameters have been created """ if not FreeCAD.ActiveDocument: return False else: if Parameter.Parameter.getAvailableParameters(): return True else: return False FreeCADGui.addCommand('RecomputeParameters', RecomputeParameters())
David-Estevez/Parametric
Parametric/RecomputeParameters.py
Python
gpl-3.0
2,887
# # Ophidia WPS Module # Copyright (C) 2015-2021 CMCC Foundation # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. #
OphidiaBigData/ophidia-wps-module
processes/__init__.py
Python
gpl-3.0
740
from rest_framework import generics from django.utils.translation import ugettext_lazy as _ from django.utils import timezone from aklub.models import UserProfile, AdministrativeUnit from oauth2_provider.contrib.rest_framework import TokenHasReadWriteScope from rest_framework import serializers from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from rest_framework import status from interactions.models import Interaction from interactions.interaction_types import * from ..serializers import GetOrCreateUserprofile, get_or_create_user_profile_fields class ApplyForMembershipSerializer( GetOrCreateUserprofile, ): administrative_unit = serializers.SlugRelatedField( required=True, queryset=AdministrativeUnit.objects.filter(), slug_field="id", ) skills = serializers.CharField(required=False, allow_blank=True) class Meta: model = UserProfile fields = get_or_create_user_profile_fields + [ "administrative_unit", "skills", ] class ApplyForMembershipView(generics.CreateAPIView): permission_classes = [TokenHasReadWriteScope | IsAuthenticated] required_scopes = ["can_create_userprofile_interaction"] serializer_class = ApplyForMembershipSerializer def post(self, request, *args, **kwargs): serializer = ApplyForMembershipSerializer(data=self.request.data) serializer.is_valid(raise_exception=True) user, created = serializer.get_or_create_user_profile() administrative_unit = serializer.validated_data.get("administrative_unit") user.administrative_units.add(administrative_unit), interaction_type = membership_application_interaction_type() Interaction.objects.create( user=user, type=interaction_type, administrative_unit=administrative_unit, date_from=timezone.now(), subject=interaction_type.name, ) return Response( {"user_id": user.pk}, status=status.HTTP_200_OK, ) def test_apply_for_membership(administrative_unit_1, app_request): from rest_framework.reverse import reverse from freezegun import freeze_time url = reverse("unknown_user_apply_for_membership") post_data = { "first_name": "John", "last_name": "Dock", "telephone": "720000000", "email": "john@rock.com", "note": "iam alergic to bees", "age_group": 2012, "birth_month": 12, "birth_day": 12, "street": "Belmont Avenue 2414", "city": "New York", "zip_code": "10458", "administrative_unit": administrative_unit_1.pk, "skills": "cooking", } current_date = timezone.now() with freeze_time(current_date): response = app_request.post(url, post_data) assert response.status_code == 200 new_user = UserProfile.objects.get(profileemail__email=post_data["email"]) assert new_user.pk == response.json()["user_id"] assert new_user.first_name == post_data["first_name"] assert new_user.last_name == post_data["last_name"] assert new_user.age_group == post_data["age_group"] assert new_user.birth_month == post_data["birth_month"] assert new_user.birth_day == post_data["birth_day"] assert new_user.street == post_data["street"] assert new_user.city == post_data["city"] assert new_user.zip_code == post_data["zip_code"] assert new_user.administrative_units.first() == administrative_unit_1 assert new_user.interaction_set.count() == 1 interaction = new_user.interaction_set.first() assert interaction.administrative_unit == administrative_unit_1 assert interaction.subject == "Žadost o Členství" assert interaction.date_from == current_date # second registration => user recognized and only new interaction is created! post_data["skills"] = "drawing" response = app_request.post(url, post_data) assert response.status_code == 200 assert new_user.interaction_set.count() == 2
auto-mat/klub
apps/api/frontend/unknown_user_apply_for_membership_unit.py
Python
gpl-3.0
4,109
#cash register #Samuel Armstrong
samuelarm/A-Level_2016-18
general/cash register.py
Python
gpl-3.0
45
# -*- coding: utf-8 -*- import re from pyload.plugin.Account import Account class StahnuTo(Account): __name = "StahnuTo" __type = "account" __version = "0.05" __description = """StahnuTo account plugin""" __license = "GPLv3" __authors = [("zoidberg", "zoidberg@mujmail.cz")] def loadAccountInfo(self, user, req): html = req.load("http://www.stahnu.to/") m = re.search(r'>VIP: (\d+.*)<', html) trafficleft = self.parseTraffic(m.group(1)) if m else 0 return {"premium": trafficleft > 512, "trafficleft": trafficleft, "validuntil": -1} def login(self, user, data, req): html = req.load("http://www.stahnu.to/login.php", post={"username": user, "password": data['password'], "submit": "Login"}, decode=True) if not '<a href="logout.php">' in html: self.wrongPassword()
ardi69/pyload-0.4.10
pyload/plugin/account/StahnuTo.py
Python
gpl-3.0
991
import SeqIterator import Constants """ @author: Jacob Porter @summary: An iterator class for iterating through two sequence record files simultaneously. @requires: SeqIterator """ class SeqDoubleIterator: def __init__(self, file_name1, file_name2, file_type=Constants.FASTQ, gzip_switch = False): self.SeqIterator1 = SeqIterator.SeqIterator(file_name1, file_type=file_type, gzip_switch = gzip_switch) self.SeqIterator2 = SeqIterator.SeqIterator(file_name2, file_type=file_type, gzip_switch = gzip_switch) def __iter__(self): return self def __next__(self): return self.next() def next(self): record1 = self.SeqIterator1.next() record2 = self.SeqIterator2.next() return (record1, record2)
JacobPorter/BisPin
Utilities/SeqDoubleIterator.py
Python
gpl-3.0
775
# -*- coding: utf-8 -*- # (c) Manuel Guil # © 2016 Esther Martín - AvanzOSC # License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html from openerp import models, fields, api class SaleOrder(models.Model): _inherit = "sale.order" period_ack = fields.Char(string='ACK Period', size=6, required=True) #@api.one def action_button_confirm(self, cr, uid, ids, context=None): return super(SaleOrder, self).action_button_confirm(cr, uid, ids, context)
esthermm/enco
enco_sale_order_ext/models/sale_order.py
Python
gpl-3.0
492
import unicodedata import re class PathExtension: """ Enables readable url path names instead of ids for object traversal. Names are stored as meta.pool_filename and generated from title by default. Automatic generation can be disabled by setting *meta.customfilename* to False for each object. Extensions like *.html* are not stored. Path matching works independent from extensions. """ maxlength = 55 # max path length containerNamespace = True # unique filenames for container or global extension = None def Init(self): if self.id == 0: # skip roots return self.ListenEvent("commit", "TitleToFilename") self._SetName() def TitleToFilename(self, **kw): """ Uses title for filename """ customfilename = self.data.get("customfilename", None) # might not exist if customfilename: self._SetName() return # create url compatible filename from title filename = self.EscapeFilename(self.meta.title) # make unique filename filename = self.UniqueFilename(filename) if self.AddExtension(filename) == self.meta.pool_filename: # no change return if filename: # update self.meta["pool_filename"] = self.AddExtension(filename) else: # reset filename self.meta["pool_filename"] = "" self._SetName() self.Signal("pathupdate", path=self.meta["pool_filename"]) def UniqueFilename(self, name): """ Converts name to valid path/url """ if name == "file": name = "file_" if self.containerNamespace: unitref = self.parent.id else: unitref = None cnt = 1 root = self.root while root.search.FilenameToID(self.AddExtension(name), unitref, parameter=dict(id=self.id), operators=dict(id="!=")) != 0: if cnt>1: name = name.rstrip("1234567890-") name = name+"-"+str(cnt) cnt += 1 return name def EscapeFilename(self, path): """ Converts name to valid path/url Path length between *self.maxlength-20* and *self.maxlength* chars. Tries to cut longer names at spaces. (based on django's slugify) """ path = unicodedata.normalize("NFKD", path).encode("ascii", "ignore") path = path.decode("utf-8") path = re.sub('[^\w\s-]', '', path).strip().lower() path = re.sub('[-\s]+', '_', path) # avoid ids as filenames try: int(path) path += "_n" except: pass # cut long filenames cutlen = 20 if len(path) <= self.maxlength: return path # cut at '_' pos = path[self.maxlength-cutlen:].find("_") if pos > cutlen: # no '_' found. cut at maxlength. return path[:self.maxlength] return path[:self.maxlength-cutlen+pos] def AddExtension(self, filename): if not self.extension: return filename return "%s.%s" % (filename, self.extension) # system functions ----------------------------------------------------------------- def __getitem__(self, id): """ Traversal lookup based on object.pool_filename and object.id. Trailing extensions are ignored if self.extension is None. `file` is a reserved name and used in the current object to map file downloads. """ if id == "file": raise KeyError(id) if self.extension is None: id = id.split(".") if len(id)>2: id = (".").join(id[:-1]) else: id = id[0] try: id = int(id) except ValueError: name = id id = 0 if name: id = self.root.search.FilenameToID(name, self.id) if not id: raise KeyError(id) obj = self.GetObj(id) if obj is None: raise KeyError(id) return obj def _SetName(self): self.__name__ = self.meta["pool_filename"] if not self.__name__: self.__name__ = str(self.id) class RootPathExtension(object): """ Extension for nive root objects to handle alternative url names """ extension = None # system functions ----------------------------------------------------------------- def __getitem__(self, id): """ Traversal lookup based on object.pool_filename and object.id. Trailing extensions are ignored. `file` is a reserved name and used in the current object to map file downloads. """ if id == "file": raise KeyError(id) if self.extension is None: id = id.split(".") if len(id)>2: id = (".").join(id[:-1]) else: id = id[0] try: id = int(id) except: name = id id = 0 if name: id = self.search.FilenameToID(name, self.id) if not id: raise KeyError(id) obj = self.GetObj(id) if not obj: raise KeyError(id) return obj class PersistentRootPath(object): """ Extension for nive root objects to handle alternative url names """ def Init(self): self.ListenEvent("commit", "UpdateRouting") self.ListenEvent("dataloaded", "UpdateRouting") self.UpdateRouting() def UpdateRouting(self, **kw): # check url name of root if self.meta.get("pool_filename"): name = self.meta.get("pool_filename") if name != self.__name__: # close cached root self.app._CloseRootObj(name=self.__name__) # update __name__ and hash self.__name__ = str(name) self.path = name # unique root id generated from name . negative integer. self.idhash = abs(hash(self.__name__))*-1 from nive.tool import Tool, ToolView from nive.definitions import ToolConf, FieldConf, ViewConf, IApplication tool_configuration = ToolConf( id = "rewriteFilename", context = "nive.extensions.path.RewriteFilenamesTool", name = "Rewrite pool_filename based on title", description = "Rewrites all or empty filenames based on form selection.", apply = (IApplication,), mimetype = "text/html", data = [ FieldConf(id="types", datatype="checkbox", default="", settings=dict(codelist="types"), name="Object types", description=""), FieldConf(id="testrun", datatype="bool", default=1, name="Testrun, no commits", description=""), FieldConf(id="resetall", datatype="string", default="", size=15, name="Reset all filenames", description="<b>Urls will change! Enter 'reset all'</b>"), FieldConf(id="tag", datatype="string", default="rewriteFilename", hidden=1) ], views = [ ViewConf(name="", view=ToolView, attr="form", permission="admin", context="nive.extensions.path.RewriteFilenamesTool") ] ) class RewriteFilenamesTool(Tool): def _Run(self, **values): parameter = dict() if values.get("resetall")!="reset all": parameter["pool_filename"] = "" if values.get("types"): tt = values.get("types") if not isinstance(tt, list): tt = [tt] parameter["pool_type"] = tt operators = dict(pool_type="IN", pool_filename="=") fields = ("id", "title", "pool_type", "pool_filename") root = self.app.root recs = root.search.Search(parameter, fields, max=10000, operators=operators, sort="id", ascending=0) if len(recs["items"]) == 0: return "<h2>None found!</h2>", False user = values["original"]["user"] testrun = values["testrun"] result = [] cnt = 0 for rec in recs["items"]: obj = root.LookupObj(rec["id"]) if obj is None or not hasattr(obj, "TitleToFilename"): continue filename = obj.meta["pool_filename"] obj.TitleToFilename() if filename!=obj.meta["pool_filename"]: result.append(filename+" <> "+obj.meta["pool_filename"]) if testrun==False: obj.dbEntry.Commit(user=user) #obj.CommitInternal(user=user) cnt += 1 return "OK. %d filenames updated, %d different!<br>%s" % (cnt, len(result), "<br>".join(result)), True
nive/nive
nive/extensions/path.py
Python
gpl-3.0
8,830
# -*- coding: utf-8 -*- # # spherepy documentation build configuration file, created by # sphinx-quickstart on Sat Feb 7 21:35:42 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import mock MOCK_MODULES = ['numpy','six','six.moves','matplotlib','_csphi'] for mod in MOCK_MODULES: sys.modules[mod] = mock.Mock() import sphinx_bootstrap_theme # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../')) #print(sys.path) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', 'sphinx.ext.autodoc', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'spherepy' copyright = u'2015, Randy Direen, James Direen' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.0' # The full version, including alpha/beta/rc tags. release = '0.0.7' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. #html_theme = 'default' html_theme = 'bootstrap' html_theme_path = sphinx_bootstrap_theme.get_html_theme_path() html_sidebars = { '**': ['localtoc.html', 'searchbox.html'], 'using/windows': ['windowssidebar.html', 'searchbox.html'], } # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = { # "collapsiblesidebar": "true" #} html_theme_options = { 'navbar_title': "SpherePy", 'navbar_site_name': "Site", 'navbar_links': [ ("DireenTech", "http://www.direentech.com", True), ], 'navbar_sidebarrel': False, 'navbar_pagenav': True, 'navbar_pagenav_name': "This Page", 'globaltoc_depth': 2, 'globaltoc_includehidden': "true", 'navbar_class': "navbar", 'source_link_position': "nfooter", 'bootstrap_version': "3", } # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = "_static/logo_spherepy.png" #PUT COOL PICTURE NEXT TO SPHEREPY AT TOP LEFT #html_logo = "_static/icon_spherepy.ico" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = "_static/icon_spherepy.ico" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'spherepydoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'spherepy.tex', u'spherepy Documentation', u'Randy Direen, James Direen', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'spherepy', u'spherepy Documentation', [u'Randy Direen, James Direen'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'spherepy', u'spherepy Documentation', u'Randy Direen, James Direen', 'spherepy', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'http://docs.python.org/': None}
rdireen/spherepy
documentation/source/conf.py
Python
gpl-3.0
9,496
from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect from django.template import RequestContext from django.shortcuts import render_to_response from django.utils.translation import ugettext from django.contrib.auth.decorators import login_required from django.contrib.admin.views.decorators import staff_member_required from models import InvitationError, Invitation, InvitationStats from forms import InvitationForm, RegistrationFormInvitation from registration.signals import user_registered def apply_extra_context(context, extra_context=None): if extra_context is None: extra_context = {} for key, value in extra_context.items(): context[key] = callable(value) and value() or value return context @login_required def invite(request, success_url=None, form_class=InvitationForm, template_name='invitation/invitation_form.html', extra_context=None): """ Create an invitation and send invitation email. Send invitation email and then redirect to success URL if the invitation form is valid. Redirect named URL ``invitation_unavailable`` on InvitationError. Render invitation form template otherwise. **Required arguments:** None. **Optional arguments:** :success_url: The URL to redirect to on successful registration. Default value is ``None``, ``invitation_complete`` will be resolved in this case. :form_class: A form class to use for invitation. Takes ``request.user`` as first argument to its constructor. Must have an ``email`` field. Custom validation can be implemented here. :template_name: A custom template to use. Default value is ``invitation/invitation_form.html``. :extra_context: A dictionary of variables to add to the template context. Any callable object in this dictionary will be called to produce the end result which appears in the context. **Template:** ``invitation/invitation_form.html`` or ``template_name`` keyword argument. **Context:** A ``RequestContext`` instance is used rendering the template. Context, in addition to ``extra_context``, contains: :form: The invitation form. """ if request.method == 'POST': form = form_class(request.POST, request.FILES) if form.is_valid(): try: invitation = Invitation.objects.invite( request.user, form.cleaned_data["email"], form.cleaned_data["message"]) except InvitationError, e: print '****' print e print '****' return HttpResponseRedirect(reverse('invitation_unavailable')) invitation.send_email(request=request) if 'next' in request.REQUEST: return HttpResponseRedirect(request.REQUEST['next']) return HttpResponseRedirect(success_url or reverse('invitation_complete')) else: form = form_class() context = apply_extra_context(RequestContext(request), extra_context) return render_to_response(template_name, {'form': form}, context_instance=context) def register(request, invitation_key, wrong_key_template='invitation/wrong_invitation_key.html', redirect_to_if_authenticated='/', success_url=None, form_class=RegistrationFormInvitation, template_name='registration/registration_form.html', extra_context=None): """ Allow a new user to register via invitation. Send invitation email and then redirect to success URL if the invitation form is valid. Redirect named URL ``invitation_unavailable`` on InvitationError. Render invitation form template otherwise. Sends registration.signals.user_registered after creating the user. **Required arguments:** :invitation_key: An invitation key in the form of ``[\da-e]{40}`` **Optional arguments:** :wrong_key_template: Template to be used when an invalid invitation key is supplied. Default value is ``invitation/wrong_invitation_key.html``. :redirect_to_if_authenticated: URL to be redirected when an authenticated user calls this view. Defaults value is ``/`` :success_url: The URL to redirect to on successful registration. Default value is ``None``, ``invitation_registered`` will be resolved in this case. :form_class: A form class to use for registration. Takes the invited email as first argument to its constructor. :template_name: A custom template to use. Default value is ``registration/registration_form.html``. :extra_context: A dictionary of variables to add to the template context. Any callable object in this dictionary will be called to produce the end result which appears in the context. **Templates:** ``invitation/invitation_form.html`` or ``template_name`` keyword argument as the *main template*. ``invitation/wrong_invitation_key.html`` or ``wrong_key_template`` keyword argument as the *wrong key template*. **Context:** ``RequestContext`` instances are used rendering both templates. Context, in addition to ``extra_context``, contains: For wrong key template :invitation_key: supplied invitation key For main template :form: The registration form. """ if request.user.is_authenticated(): return HttpResponseRedirect(redirect_to_if_authenticated) try: invitation = Invitation.objects.find(invitation_key) except Invitation.DoesNotExist: context = apply_extra_context(RequestContext(request), extra_context) return render_to_response(wrong_key_template, {'invitation_key': invitation_key}, context_instance=context) if request.method == 'POST': form = form_class(invitation.email, request.POST, request.FILES) if form.is_valid(): new_user = form.save() invitation.mark_accepted(new_user) user_registered.send(sender="invitation", user=new_user, request=request) # return HttpResponseRedirect(success_url or reverse('invitation_registered')) # return HttpResponseRedirect(success_url or reverse('profiles-profile-detail', kwargs={'slug':new_user.username})) return HttpResponseRedirect(success_url or reverse('auth_login')) else: form = form_class(invitation.email) context = apply_extra_context(RequestContext(request), extra_context) return render_to_response(template_name, {'form': form}, context_instance=context) @staff_member_required def reward(request): """ Add invitations to users with high invitation performance and redirect refferring page. """ rewarded_users, invitations_given = InvitationStats.objects.reward() if rewarded_users: message = ugettext(u'%(users)s users are given a total of ' \ u'%(invitations)s invitations.') % { 'users': rewarded_users, 'invitations': invitations_given} else: message = ugettext(u'No user has performance above ' \ u'threshold, no invitations awarded.') request.user.message_set.create(message=message) return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
hzlf/openbroadcast
website/apps/invitation/views.py
Python
gpl-3.0
7,841
# -*- coding: utf-8 -*- # Generated by Django 1.10.1 on 2016-12-21 14:58 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('carts', '0007_auto_20161221_1337'), ] operations = [ migrations.AddField( model_name='cart', name='tax_percentage', field=models.DecimalField(decimal_places=3, default=0.085, max_digits=20), ), ]
michel-rodrigues/ecommerce2
source/carts/migrations/0008_cart_tax_percentage.py
Python
gpl-3.0
489
# -*- coding: utf-8 -*- from transH import TransH import pickle import numpy as np import sys def main(): if len(sys.argv) != 3: print '[Usage] python train.py train_data validation_data' exit(0) train_data, valid_data = sys.argv[1:] X, E, R = loadData(train_data) V = loadData(valid_data, E=E, R=R, mode='valid') # parameters gamma = 1 k = 50 alpha = 0.1 b = 5000 c = 0.25 transH = TransH(len(E), len(R), gamma, k, alpha, b, c) transH.fit(X, validationset=V) w = open('transH.model', 'w') pickle.dump((transH, E, R), w) def loadData(file_path, E=None, R=None, mode='train'): if mode == 'train': E, R = {}, {} e_ind, r_ind = 0, 0 X = [] f = open(file_path, 'r') for line in f: h, r, t = line.strip().split('\t') if not h in E: E[h] = e_ind e_ind += 1 if not t in E: E[t] = e_ind e_ind +=1 if not r in R: R[r] = r_ind r_ind += 1 X.append((E[h], R[r], E[t])) f.close() return np.array(X), E, R elif mode == 'valid': X = [] f = open(file_path, 'r') for line in f: h, r, t = line.strip().split('\t') X.append((E[h], R[r], E[t])) return np.array(X) if __name__ == "__main__": main()
Taka-Coma/graphEmbedding_impls
TransH/train.py
Python
gpl-3.0
1,186
""" Tools to put to good use INE data """
JoaquimPatriarca/senpy-for-gis
gasp/ine/__init__.py
Python
gpl-3.0
41
#!/usr/bin/env python from pylab import * # create some data to use for the plot dt = 0.001 t = arange(0.0, 10.0, dt) r = exp(-t[:1000]/0.05) # impulse response x = randn(len(t)) s = convolve(x,r)[:len(x)]*dt # colored noise # the main axes is subplot(111) by default plot(t, s) #axis([0, 1, 1.1*amin(s), 2*amax(s) ]) #xlabel('time (s)') #ylabel('current (nA)') #title('The original signal for channel i', fontsize=28) plt.xticks( range(5), ('', '', '', '','', '', '', '','2', '', '', '','3', '', '', '','4', '', '', '',) ) plt.yticks( range(1), ('', '', '', '','', '', '', '','2', '', '', '','3', '', '', '','4', '', '', '',) ) ## this is an inset axes over the main axes #a = axes([.65, .6, .2, .2], axisbg='y') #n, bins, patches = hist(s, 400, normed=1) #title('Probability') #setp(a, xticks=[], yticks=[]) # ## this is another inset axes over the main axes #a = axes([0.2, 0.6, .2, .2], axisbg='y') #plot(t[:len(r)], r) #title('Impulse response') #setp(a, xlim=(0,.2), xticks=[], yticks=[]) show()
lol/BCI-BO-old
orig_sig.py
Python
gpl-3.0
1,025
import glob import sys import serial def serial_ports(): """ Lists serial port names :raises EnvironmentError: On unsupported or unknown platforms :returns: A list of the serial ports available on the system """ if sys.platform.startswith('win'): ports = ['COM%s' % (i + 1) for i in range(256)] elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'): # this excludes your current terminal "/dev/tty" ports = glob.glob('/dev/tty[A-Za-z]*') elif sys.platform.startswith('darwin'): ports = glob.glob('/dev/tty.*') else: raise EnvironmentError('Unsupported platform') result = [] for port in ports: try: s = serial.Serial(port) s.close() result.append(port) except (OSError, serial.SerialException): pass return result
pliniopereira/ccd3
src/utils/rodafiltros/Leitura_portas.py
Python
gpl-3.0
920
#!/usr/bin/env python # -*- coding: utf-8 -*- # # ecp.py part of cellery (ceRNAs linking inference) # # Copyright 2016 Oscar Bedoya Reina <obedoya@igmm-linux-005> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. # # """ Methods to calculate ECP values (Endogenous Competition Potential) """ ######################################################## #~ Import libraries. ######################################################## from cellery import exceptions from itertools import product from multiprocessing import Queue,Process from numpy import array,empty,float32,float64,nan,zeros from clcECP import rtrnECP,rtrnECPMskd,rtrnECPDnsty,rtrnECPDnstyMskd import os import sqlite3 ######################################################## #~ Compute ECP values for all combinations of two arrays of arrays with # values. ######################################################## def cmpECP(aMrnVlsDtA,aMrnVlsDtB,aANmsA,aANmsB,fldrOutECPPrws, \ aALenA=False,aALenB=False,aMskRef=False,nThrds=10,intrvlSz=700, \ sqlFl=False,pntrCnts=True): """ Input: aMrnVlsDtA is an array A of arrays with values for miRNAs. aMrnVlsDtB is an array B of arrays with values for miRNAs. aANmsA is the array of variable names in the same position as the numbers in vrblAPos. aANmsB is the array of variable names in the same order as vrblBPos. fldrOutECPPrws is a folder to store partial ECP results. Optionally, aALenA is an array of object lengths in the same order that aAVlsA. aALenB is an array of object lengths in the same order that aAVlsB. aMskRef is a mask array for the miRNAs (i.e. arrays within array A and B). nThrds is the number of threads to run in parallel. intrvlSz is the size of the interval to run in multithread. sqlFl is a sql database to save the ECP values. If pntrCnts is True aAVlsA and aAVlsB are counts so 0 values shall be considered (excluded in shared counts). Output: aECPVlsAVlsB is an array with the ECP values for all combinations of array A and B. NOTE: The subarrays in arrays A and B must have the same dimensions (i.e. all the miRNA arrays must have the same size.). NOTE: Null values shall be numpy.nan. NOTE: aECPVlsAVlsB has arrays in A as rows and in B as columns. NOTE: if aALenA and aALenB ECP density is going to be calculated. NOTE: if aMskRef miRNA is going to be masked. """ def mltECPclc(qInJobs,qOutRslts,mthdECPclc,aMrnVlsDtA,aMrnVlsDtB, \ fldrOutECPPrws,aALenA,aALenB,aMskRef,pntrCnts): """ Input: qInJobs is a queue with pairs of intervals. qOutRslts is the queue to store position in arrayA, position in arrayB, and ECP value. mthdECPclc is the method to calculate the ECP value. aMrnVlsDtA is an array A of arrays with values for miRNAs. aMrnVlsDtB is an array B of arrays with values for miRNAs. fldrOutECPPrws is a folder to store partial ECP results. aALenA is an array of object lengths in the same order that aAVlsA. aALenB is an array of object lengths in the same order that aAVlsB. aMskRef is a mask array for the miRNAs (i.e. arrays within array A and B). If pntrCnts is True aAVlsA and aAVlsB are counts so 0 values shall be considered (excluded in shared counts). Output: qOutRslts is the queue to store position in arrayA, position in arrayB, and ECP values. """ for intrvlA,intrvB in iter(qInJobs.get,'STOP'): lECPVlsAVlsB = mthdECPclc(aMrnVlsDtA,aMrnVlsDtB, \ fldrOutECPPrws,intrvlA,intrvB,pntrCnts,aMskRef,aALenA, \ aALenB) qOutRslts.put(lECPVlsAVlsB) #-------------------------- #~ Check if there is mask for miRNAs if dir(aMskRef)[0]=='T': assert len(aMskRef) == len(aALenB[0]) == len(aALenA[0]) if dir(aALenB)[0]=='T': assert dir(aALenB)[1]=='T' mthdECPclc = rtrnECPDnstyMskd else: assert not aALenA and not aALenB mthdECPclc = rtrnECPMskd else: if dir(aALenB)[0]=='T': assert dir(aALenB)[1]=='T' mthdECPclc = rtrnECPDnsty else: assert not aALenA and not aALenB mthdECPclc = rtrnECP #-------------------------- #~ Create list of intervals for multithreading lenaMrnVlsDtA = len(aMrnVlsDtA) lenaMrnVlsDtB = len(aMrnVlsDtB) intrvlsMrnVlsA = [] for strt in xrange(0,lenaMrnVlsDtA,intrvlSz): cEnd = strt+intrvlSz if cEnd<lenaMrnVlsDtA: end = cEnd else: end = lenaMrnVlsDtA intrvlsMrnVlsA.append([strt,end]) intrvlsMrnVlsB = [] for strt in xrange(0,lenaMrnVlsDtB,intrvlSz): cEnd = strt+intrvlSz if cEnd<lenaMrnVlsDtB: end = cEnd else: end = lenaMrnVlsDtB intrvlsMrnVlsB.append([strt,end]) #-------------------------- #~ Run in parallel. aECPVlsAVlsB = zeros((lenaMrnVlsDtA,lenaMrnVlsDtB),dtype=float32) aECPVlsAVlsB.fill(nan)#fill all ECP with nan to start qInJobs = Queue() qOutRslts = Queue() cntVlABPrs=0 for intrvlA,intrvB in product(intrvlsMrnVlsA,intrvlsMrnVlsB): qInJobs.put((intrvlA,intrvB)) cntVlABPrs += 1 for t in xrange(nThrds): Process(target = mltECPclc,args=(qInJobs,qOutRslts,mthdECPclc, \ aMrnVlsDtA,aMrnVlsDtB,fldrOutECPPrws,aALenA,aALenB, \ aMskRef,pntrCnts)).start() lECPVlsAVlsBGlbl = []#store global results for cnt in range(cntVlABPrs): if cnt%50==0: print 'Running calculations on pair %s out of %s'%(cnt, \ cntVlABPrs) lECPVlsAVlsB = qOutRslts.get() lECPVlsAVlsBGlbl.extend(lECPVlsAVlsB) for t in xrange(nThrds): qInJobs.put('STOP') #-------------------------- #~ create array: aMrnVlsDtA in rows, aMrnVlsDtB in columns. for vlsAPos,vlsBPos,ECP in lECPVlsAVlsBGlbl: aECPVlsAVlsB[vlsAPos,vlsBPos] = ECP if sqlFl: mkSqlFlECP(lECPVlsAVlsBGlbl,sqlFl,aANmsA,aANmsB) return aECPVlsAVlsB ######################################################## #~ Make a sqlite3 database for ECP values between genes/lncRNAs of # interest. ######################################################## def mkSqlFlECP(lECPVlsAVlsBGlbl,sqlFl,aANmsA,aANmsB): """ Input: lECPVlsAVlsBGlbl is a list of tuples (vrblAPos,vrblBPos,ECP). vrblAPos is the position of the first variables, vrblBPos is the position of the second variable, ECP is the ECP value between vrblAPos and vrblBPos. A sqlite3 database will be created for the input list. aANmsA is the array of variable names in the same position as the numbers in vrblAPos. aANmsB is the array of variable names in the same order as vrblBPos. Output: A sqlite3 database will be created for the input list in the file sqlFl. """ conn = sqlite3.connect(sqlFl) c = conn.cursor() c.execute \ ('''CREATE TABLE records (id TEXT, vrblANm TEXT, vrblBNm TEXT, ECP REAL)''') lCnt = 0 for vrblAPos,vrblBPos,ECP in lECPVlsAVlsBGlbl: vrblANm,vrblBNm = aANmsA[vrblAPos],aANmsB[vrblBPos] lCnt+=1 c.execute('insert into records VALUES (?,?,?,?)', (str(lCnt), \ vrblANm,vrblBNm,float64(ECP))) # create indexes. Decrease complexity of querying c.execute("CREATE INDEX index_records on records (id);") conn.commit() conn.close() return 0 ######################################################## #~ Read a sqlite3 database for correlations between genes/lncRNAs of # interest. ######################################################## def rtrnSqlFlECP(sqlFl,srtdVrblANms,srtdVrblBNms,rtrnECPSgnd=False): """ Input: sqlFl is a sqlite3 database with the fields id, vrblANm, vrblBNm, and ECP. srtdVrblANms is a sorted lists of names present in the field vrblANm. srtdVrblBNms is a sorted lists of names present in the field vrblBNm. Optionally, rtrnECPSgnd can have values 'negative' or 'positive', in those cases only 'negative' or 'positive' ECP values are going to be retrieved respectively. Output: aECPVlsAVlsB is an array of size len(srtdVrblANms) x len(srtdVrblBNms) with correlation values ECP. In case the value is not present nan is going to be incldued in the cell. NOTE: If a name is not present in a database, nan values are going to be returned. NOTE: srtdVrblANms are going to be in rows, and srtdVrblBNms in columns. """ if rtrnECPSgnd: try: if rtrnECPSgnd not in {'negative','positive'}: raise exceptions.CelleryWarningObjct \ ('"negative" or "positive" are values, not recognized', \ rtrnECPSgnd) except exceptions.CelleryWarningObjct as err: print err #-------------------------- #~ make a dictionary of names and positions lenaAVlsA = len(srtdVrblANms) lenaAVlsB = len(srtdVrblBNms) dVrblANmPos = dict([(vrblANm,pos) for pos,vrblANm in \ enumerate(srtdVrblANms)]) dVrblBNmPos = dict([(vrblBNm,pos) for pos,vrblBNm in \ enumerate(srtdVrblBNms)]) #-------------------------- #~ make a output array aECPVlsAVlsB = zeros((lenaAVlsA,lenaAVlsB),dtype=float32) aECPVlsAVlsB.fill(nan)#fill all correlations with nan to start #-------------------------- #~ retrieve variable names conn = sqlite3.connect(sqlFl) c = conn.cursor() sVrblANmsInSql = set([str(vrblANm[0]) for vrblANm in \ c.execute('SELECT vrblANm FROM records')]) sVrblBNmsInSql = set([str(vrblBNm[0]) for vrblBNm in \ c.execute('SELECT vrblBNm FROM records')]) lVrblANmInSql = list(set(srtdVrblANms).intersection(sVrblANmsInSql)) lVrblBNmInSql = list(set(srtdVrblBNms).intersection(sVrblBNmsInSql)) try: lenSrtdVrblANms = len(srtdVrblANms) lenlVrblANmInSql = len(lVrblANmInSql) if lenSrtdVrblANms!=lenlVrblANmInSql: raise exceptions.CelleryWarningObjct \ ('Expression for %s variable A names were retrieved out of'% \ lenlVrblANmInSql,lenSrtdVrblANms) except exceptions.CelleryWarningObjct as err: print err pass try: lenSrtdVrblBNms = len(srtdVrblBNms) lenlVrblBNmInSql = len(lVrblBNmInSql) if lenSrtdVrblBNms!=lenlVrblBNmInSql: raise exceptions.CelleryWarningObjct \ ('Expression for %s variable B names were retrieved out of'% \ lenlVrblBNmInSql,lenSrtdVrblBNms) except exceptions.CelleryWarningObjct as err: print err pass #-------------------------- #~ retrieve data if rtrnECPSgnd == 'negative': cmmnd = \ 'SELECT * FROM records WHERE ECP<0 AND vrblANm in (%s) AND vrblBNm in (%s)' \ %(','.join(['"%s"'%v for v in lVrblANmInSql]),','.join(['"%s"'%v \ for v in lVrblBNmInSql])) elif rtrnECPSgnd == 'positive': cmmnd = \ 'SELECT * FROM records WHERE ECP>0 AND vrblANm in (%s) AND vrblBNm in (%s)' \ %(','.join(['"%s"'%v for v in lVrblANmInSql]),','.join(['"%s"'%v \ for v in lVrblBNmInSql])) else: cmmnd = 'SELECT * FROM records WHERE vrblANm in (%s) AND vrblBNm in (%s)' \ %(','.join(['"%s"'%v for v in lVrblANmInSql]),','.join(['"%s"'%v \ for v in lVrblBNmInSql])) for idX,vrblANm,vrblBNm,ECP in c.execute(cmmnd): ECP = float32(ECP) aECPVlsAVlsB[dVrblANmPos[vrblANm],dVrblBNmPos[vrblBNm]] = ECP conn.close() return aECPVlsAVlsB
oscarcbr/cellery
cellery/ecp.py
Python
gpl-3.0
11,283
import logging import pandas as pd from catalog.core.search_indexes import PublicationDocSearch from ..data_access import data_cache, IncludedStatistics from query import Query logger = logging.getLogger(__name__) def _create_count_dataframe(df): return df.groupby(['year_published']) \ .agg(dict(title='count', is_archived='sum', has_formal_description='sum', has_odd='sum', has_visual_documentation='sum')) \ .rename(columns={ 'title': 'count', 'is_archived': IncludedStatistics.code_availability_count.name, 'has_formal_description': IncludedStatistics.formal_description_count.name, 'has_odd': IncludedStatistics.odd_count.name, 'has_visual_documentation': IncludedStatistics.visual_documentation_count.name }).reindex(pd.RangeIndex(start=1995, stop=2018, name='year_published'), fill_value=0.0) def create_publication_counts_dataset(query: Query): publication_ids = [p.id for p in PublicationDocSearch().find(q=query.search, facet_filters=query.filters).source(['id']).scan()] publication_matches = data_cache.publications.loc[publication_ids] publication_match_counts = _create_count_dataframe(publication_matches).assign(group='matched') all_publication_counts = _create_count_dataframe(data_cache.publications).assign(group='all') return pd.concat([publication_match_counts, all_publication_counts])
comses/catalog
catalog/core/visualization/data_sources/publication_counts.py
Python
gpl-3.0
1,439
# Contains functionality for responses import numpy as np import astropy.io.fits as fits __all__ = ["RMF", "ARF"] class RMF(object): def __init__(self, filename): self._load_rmf(filename) pass def _load_rmf(self, filename): """ Load an RMF from a FITS file. Parameters ---------- filename : str The file name with the RMF file Attributes ---------- n_grp : numpy.ndarray the Array with the number of channels in each channel set f_chan : numpy.ndarray The starting channel for each channel group; If an element i in n_grp > 1, then the resulting row entry in f_chan will be a list of length n_grp[i]; otherwise it will be a single number n_chan : numpy.ndarray The number of channels in each channel group. The same logic as for f_chan applies matrix : numpy.ndarray The redistribution matrix as a flattened 1D vector energ_lo : numpy.ndarray The lower edges of the energy bins energ_hi : numpy.ndarray The upper edges of the energy bins detchans : int The number of channels in the detector """ # open the FITS file and extract the MATRIX extension # which contains the redistribution matrix and # anxillary information hdulist = fits.open(filename) # get all the extension names extnames = np.array([h.name for h in hdulist]) # figure out the right extension to use if "MATRIX" in extnames: h = hdulist["MATRIX"] elif "SPECRESP MATRIX" in extnames: h = hdulist["SPECRESP MATRIX"] data = h.data hdr = h.header hdulist.close() # extract + store the attributes described in the docstring n_grp = np.array(data.field("N_GRP")) f_chan = np.array(data.field('F_CHAN')) n_chan = np.array(data.field("N_CHAN")) matrix = np.array(data.field("MATRIX")) self.energ_lo = np.array(data.field("ENERG_LO")) self.energ_hi = np.array(data.field("ENERG_HI")) self.energ_unit = data.columns["ENERG_LO"].unit self.detchans = hdr["DETCHANS"] self.offset = self.__get_tlmin(h) # flatten the variable-length arrays self.n_grp, self.f_chan, self.n_chan, self.matrix = \ self._flatten_arrays(n_grp, f_chan, n_chan, matrix) return def __get_tlmin(self, h): """ Get the tlmin keyword for `F_CHAN`. Parameters ---------- h : an astropy.io.fits.hdu.table.BinTableHDU object The extension containing the `F_CHAN` column Returns ------- tlmin : int The tlmin keyword """ # get the header hdr = h.header # get the keys of all keys = np.array(list(hdr.keys())) # find the place where the tlmin keyword is defined t = np.array(["TLMIN" in k for k in keys]) # get the index of the TLMIN keyword tlmin_idx = np.hstack(np.where(t))[0] # get the corresponding value tlmin = np.int(list(hdr.items())[tlmin_idx][1]) return tlmin def _flatten_arrays(self, n_grp, f_chan, n_chan, matrix): if not len(n_grp) == len(f_chan) == len(n_chan) == len(matrix): raise ValueError("Arrays must be of same length!") # find all non-zero groups nz_idx = (n_grp > 0) # stack all non-zero rows in the matrix matrix_flat = np.hstack(matrix[nz_idx]) # stack all nonzero rows in n_chan and f_chan #n_chan_flat = np.hstack(n_chan[nz_idx]) #f_chan_flat = np.hstack(f_chan[nz_idx]) # some matrices actually have more elements # than groups in `n_grp`, so we'll only pick out # those values that have a correspondence in # n_grp f_chan_new = [] n_chan_new = [] for i,t in enumerate(nz_idx): if t: n = n_grp[i] f = f_chan[i] nc = n_chan[i] if np.size(f) == 1: f_chan_new.append(f) n_chan_new.append(nc) else: f_chan_new.append(f[:n]) n_chan_new.append(nc[:n]) n_chan_flat = np.hstack(n_chan_new) f_chan_flat = np.hstack(f_chan_new) # if n_chan is zero, we'll remove those as well. nz_idx2 = (n_chan_flat > 0) n_chan_flat = n_chan_flat[nz_idx2] f_chan_flat = f_chan_flat[nz_idx2] return n_grp, f_chan_flat, n_chan_flat, matrix_flat def apply_rmf(self, spec): """ Fold the spectrum through the redistribution matrix. The redistribution matrix is saved as a flattened 1-dimensional vector to save space. In reality, for each entry in the flux vector, there exists one or more sets of channels that this flux is redistributed into. The additional arrays `n_grp`, `f_chan` and `n_chan` store this information: * `n_group` stores the number of channel groups for each energy bin * `f_chan` stores the *first channel* that each channel for each channel set * `n_chan` stores the number of channels in each channel set As a result, for a given energy bin i, we need to look up the number of channel sets in `n_grp` for that energy bin. We then need to loop over the number of channel sets. For each channel set, we look up the first channel into which flux will be distributed as well as the number of channels in the group. We then need to also loop over the these channels and actually use the corresponding elements in the redistribution matrix to redistribute the photon flux into channels. All of this is basically a big bookkeeping exercise in making sure to get the indices right. Parameters ---------- spec : numpy.ndarray The (model) spectrum to be folded Returns ------- counts : numpy.ndarray The (model) spectrum after folding, in counts/s/channel """ # get the number of channels in the data nchannels = spec.shape[0] # an empty array for the output counts counts = np.zeros(nchannels) # index for n_chan and f_chan incrementation k = 0 # index for the response matrix incrementation resp_idx = 0 # loop over all channels for i in range(nchannels): # this is the current bin in the flux spectrum to # be folded source_bin_i = spec[i] # get the current number of groups current_num_groups = self.n_grp[i] # loop over the current number of groups for j in range(current_num_groups): current_num_chans = int(self.n_chan[k]) if current_num_chans == 0: k += 1 resp_idx += current_num_chans continue else: # get the right index for the start of the counts array # to put the data into counts_idx = int(self.f_chan[k] - self.offset) # this is the current number of channels to use k += 1 # add the flux to the subarray of the counts array that starts with # counts_idx and runs over current_num_chans channels counts[counts_idx:counts_idx + current_num_chans] += self.matrix[resp_idx:resp_idx + current_num_chans] * \ np.float(source_bin_i) # iterate the response index for next round resp_idx += current_num_chans return counts[:self.detchans] class ARF(object): def __init__(self, filename): self._load_arf(filename) pass def _load_arf(self, filename): """ Load an ARF from a FITS file. Parameters ---------- filename : str The file name with the RMF file Attributes ---------- """ # open the FITS file and extract the MATRIX extension # which contains the redistribution matrix and # anxillary information hdulist = fits.open(filename) h = hdulist["SPECRESP"] data = h.data hdr = h.header hdulist.close() # extract + store the attributes described in the docstring self.e_low = np.array(data.field("ENERG_LO")) self.e_high = np.array(data.field("ENERG_HI")) self.e_unit = data.columns["ENERG_LO"].unit self.specresp = np.array(data.field("SPECRESP")) if "EXPOSURE" in list(hdr.keys()): self.exposure = hdr["EXPOSURE"] else: self.exposure = 1.0 if "FRACEXPO" in data.columns.names: self.fracexpo = data["FRACEXPO"] else: self.fracexpo = 1.0 return def apply_arf(self, spec, exposure=None): """ Fold the spectrum through the ARF. The ARF is a single vector encoding the effective area information about the detector. A such, applying the ARF is a simple multiplication with the input spectrum. Parameters ---------- spec : numpy.ndarray The (model) spectrum to be folded exposure : float, default None Value for the exposure time. By default, `apply_arf` will use the exposure keyword from the ARF file. If this exposure time is not correct (for example when simulated spectra use a different exposure time and the ARF from a real observation), one can override the default exposure by setting the `exposure` keyword to the correct value. Returns ------- s_arf : numpy.ndarray The (model) spectrum after folding, in counts/s/channel """ assert spec.shape[0] == self.specresp.shape[0], "The input spectrum must " \ "be of same size as the " \ "ARF array." if exposure is None: return np.array(spec) * self.specresp * self.exposure else: return np.array(spec) * self.specresp * exposure
dhuppenkothen/clarsach
clarsach/respond.py
Python
gpl-3.0
10,929
import numpy as np import unittest import logging class _BaseTest(unittest.TestCase): # def energy_test(self, x, e): # e = self.pot.getEnergy(x) # print e # self.assertAlmostEqual(e, self.target_E, 4) def grad_t(self, x): log= logging.getLogger( "BaseTest.grad_t" ) e, g = self.pot.getEnergyGradient(x) e1 = self.pot.getEnergy(x) numerical_g = self.pot.NumericalDerivative(x) log.debug( "g= %r", g ) log.debug( "numerical_g= %r", numerical_g ) self.assertLess(np.max(np.abs(g - numerical_g)), 1e-3) self.assertAlmostEqual(e, e1, 4) def test_e_min(self): log= logging.getLogger( "BaseTest.test_e_min" ) e = self.pot.getEnergy(self.xmin) log.debug( "e= %r", e ) self.assertAlmostEqual(e, self.Emin, 4) def test_grad_min(self): log= logging.getLogger( "BaseTest.test_gra_min" ) e, g = self.pot.getEnergyGradient(self.xmin) log.debug( "e= %r", e ) log.debug( "g= %r", g) self.assertAlmostEqual(e, self.Emin, 4) self.assertLess(np.max(np.abs(g)), 1e-3) self.grad_t(self.xmin) def test_hess_min(self): log= logging.getLogger( "BaseTest.test_hess_min" ) h = self.pot.getHessian(self.xmin) eigenvals = np.linalg.eigvals(h) log.debug( "e= %r", eigenvals ) self.assertGreater(np.min(eigenvals), -1e-4) def test_hess_analytical_against_numerical(self): log= logging.getLogger( "BaseTest.test_hess_analytical_against_numerical" ) h = self.pot.getHessian(self.xmin) h_num = self.pot.NumericalHessian(self.xmin) np.testing.assert_almost_equal(h,h_num,decimal=4) def test_random(self): self.grad_t(self.xmin+self.xrandom)
js850/pele
pele/potentials/tests/_base_test.py
Python
gpl-3.0
1,834
__author__ = "Wren J. R. (uberfastman)" __email__ = "wrenjr@yahoo.com" # code snippets taken from: http://stackoverflow.com/questions/24419188/automating-pydrive-verification-process import datetime import logging from pathlib import Path from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from report.logger import get_logger from utils.app_config_parser import AppConfigParser logger = get_logger(__name__, propagate=False) # Suppress verbose googleapiclient info/warning logging logging.getLogger("googleapiclient").setLevel(level=logging.ERROR) logging.getLogger("googleapiclient.discovery").setLevel(level=logging.ERROR) logging.getLogger("googleapiclient.discovery_cache").setLevel(level=logging.ERROR) logging.getLogger("googleapiclient.discovery_cache.file_cache").setLevel(level=logging.ERROR) class GoogleDriveUploader(object): def __init__(self, filename, config): logger.debug("Initializing Google Drive uploader.") project_dir = Path(__file__).parents[1] logger.debug("Authenticating with Google Drive.") self.filename = Path(project_dir) / filename self.config = config self.gauth = GoogleAuth() auth_token = Path(project_dir) / Path(self.config.get("Drive", "google_drive_auth_token")) # Try to load saved client credentials self.gauth.LoadCredentialsFile(auth_token) if self.gauth.credentials is None: # Authenticate if they're not there self.gauth.LocalWebserverAuth() elif self.gauth.access_token_expired: # Refresh them if expired self.gauth.Refresh() else: # Initialize the saved creds self.gauth.Authorize() # Save the current credentials to a file self.gauth.SaveCredentialsFile(auth_token) def upload_file(self, test=False): logger.debug("Uploading file to Google Drive.") # Create GoogleDrive instance with authenticated GoogleAuth instance. drive = GoogleDrive(self.gauth) # Get lists of folders root_folders = drive.ListFile( {"q": "'root' in parents and mimeType='application/vnd.google-apps.folder' and trashed=false"}).GetList() google_drive_folder_path_default = self.config.get("Drive", "google_drive_folder_path_default") google_drive_folder_path = Path(self.config.get( "Drive", "google_drive_folder_path", fallback=google_drive_folder_path_default)).parts google_drive_root_folder_id = self.make_root_folder( drive, self.check_file_existence(google_drive_folder_path[0], root_folders, "root"), google_drive_folder_path[0] ) if not test: parent_folder_id = google_drive_root_folder_id parent_folder_content_folders = drive.ListFile({ "q": ( f"'{parent_folder_id}' in parents and " f"mimeType='application/vnd.google-apps.folder' and " f"trashed=false" ) }).GetList() for folder in google_drive_folder_path[1:]: # create folder chain in Google Drive parent_folder_id = self.make_parent_folder( drive, self.check_file_existence(folder, parent_folder_content_folders, parent_folder_id), folder, parent_folder_id ) parent_folder_content_folders = drive.ListFile({ "q": ( f"'{parent_folder_id}' in parents and " f"mimeType='application/vnd.google-apps.folder' and " f"trashed=false" ) }).GetList() # Check for season folder and create it if it does not exist season_folder_name = Path(self.filename).parts[-3] season_folder_id = self.make_parent_folder( drive, self.check_file_existence(season_folder_name, parent_folder_content_folders, parent_folder_id), season_folder_name, parent_folder_id ) season_folder_content_folders = drive.ListFile({ "q": ( f"'{season_folder_id}' in parents and " f"mimeType='application/vnd.google-apps.folder' and " f"trashed=false" ) }).GetList() # Check for league folder and create it if it does not exist league_folder_name = Path(self.filename).parts[-2].replace("-", "_") league_folder_id = self.make_parent_folder( drive, self.check_file_existence(league_folder_name, season_folder_content_folders, season_folder_id), league_folder_name, season_folder_id ) league_folder_content_pdfs = drive.ListFile({ "q": ( f"'{league_folder_id}' in parents and " f"mimeType='application/pdf' and " f"trashed=false" ) }).GetList() # Check for league report and create if if it does not exist report_file_name = Path(self.filename).parts[-1] report_file = self.check_file_existence(report_file_name, league_folder_content_pdfs, league_folder_id) else: all_pdfs = drive.ListFile({"q": "mimeType='application/pdf' and trashed=false"}).GetList() report_file_name = self.filename report_file = self.check_file_existence(report_file_name, all_pdfs, "root") league_folder_id = "root" if report_file: report_file.Delete() upload_file = drive.CreateFile( { "title": report_file_name, "mimeType": "application/pdf", "parents": [ { "kind": "drive#fileLink", "id": league_folder_id } ] } ) upload_file.SetContentFile(self.filename) # Upload the file. upload_file.Upload() upload_file.InsertPermission( { "type": "anyone", "role": "reader", "withLink": True } ) return "\nFantasy Football Report\nGenerated %s\n*%s*\n\n_Google Drive Link:_\n%s" % ( "{:%Y-%b-%d %H:%M:%S}".format(datetime.datetime.now()), upload_file['title'], upload_file["alternateLink"]) @staticmethod def check_file_existence(file_name, file_list, parent_id): drive_file_name = file_name google_drive_file = None for drive_file in file_list: if drive_file["title"] == drive_file_name: for parent_folder in drive_file["parents"]: if parent_folder["id"] == parent_id or parent_folder["isRoot"]: google_drive_file = drive_file return google_drive_file @staticmethod def make_root_folder(drive, folder, folder_name): if not folder: new_root_folder = drive.CreateFile( { "title": folder_name, "parents": [ { "kind": "drive#fileLink", "isRoot": True, "id": "root" } ], "mimeType": "application/vnd.google-apps.folder" } ) new_root_folder.Upload() root_folder_id = new_root_folder["id"] else: root_folder_id = folder["id"] return root_folder_id @staticmethod def make_parent_folder(drive, folder, folder_name, parent_folder_id): if not folder: new_parent_folder = drive.CreateFile( { "title": folder_name, "parents": [ { "kind": "drive#fileLink", "id": parent_folder_id } ], "mimeType": "application/vnd.google-apps.folder" } ) new_parent_folder.Upload() parent_folder_id = new_parent_folder["id"] else: parent_folder_id = folder["id"] return parent_folder_id if __name__ == "__main__": local_config = AppConfigParser() local_config.read(Path(__file__).parents[1] / "config.ini") reupload_file = local_config.get("Drive", "google_drive_reupload_file") google_drive_uploader = GoogleDriveUploader(reupload_file, local_config) upload_message = google_drive_uploader.upload_file() print(upload_message)
uberfastman/yahoo-fantasy-football-metrics
integrations/drive_integration.py
Python
gpl-3.0
8,962
#!/usr/bin/env python """rds-create-cpu-alarms Script used to create CPUUtilization alarms in AWS CloudWatch for all RDS instances. A upper-limit threshold needs to be defined. Usage: rds-create-cpu-alarms [options] <threshold> <sns_topic_arn> <region> rds-create-cpu-alarms -h | --help Options: -h --help Show this screen. --debug Don't send data to AWS """ import boto.ec2 import boto.rds2 from docopt import docopt from boto.ec2.cloudwatch import MetricAlarm from .constants import VERSION DEBUG = False def get_rds_instances(region): """ Retrieves the list of all RDS instances Args: region (str) Returns: (list) List of valid state RDS instances """ assert isinstance(region, str) rds = boto.rds2.connect_to_region(region) response = rds.describe_db_instances() rds_instances = (response[u'DescribeDBInstancesResponse'] [u'DescribeDBInstancesResult'] [u'DBInstances'] ) return rds_instances def get_existing_cpuutilization_alarm_names(aws_cw_connect): """ Creates a CPUUtilization alarm for all RDS instances Args: aws_cw_connect (CloudWatchConnection) Returns: (set) Existing CPUUtilization alarm names """ assert isinstance(aws_cw_connect, boto.ec2.cloudwatch.CloudWatchConnection) existing_alarms = aws_cw_connect.describe_alarms() existing_alarm_names = set() for existing_alarm in existing_alarms: existing_alarm_names.add(existing_alarm.name) return existing_alarm_names def get_cpuutilization_alarms_to_create(rds_instances, threshold, aws_cw_connect, sns_topic_arn): """ Creates a CPUUtilization alarm for all RDS instances Args: rds_instances (list) List of all RDS instances threshold (int) The upper limit after which alarm activates aws_cw_connect (CloudWatchConnection) sns_topic_arn (str) Returns: (set) All CPUUtilization alarms that will be created """ assert isinstance(rds_instances, list) assert isinstance(aws_cw_connect, boto.ec2.cloudwatch.CloudWatchConnection) assert isinstance(threshold, int) assert isinstance(sns_topic_arn, str) alarms_to_create = set() existing_alarms = get_existing_cpuutilization_alarm_names(aws_cw_connect) for instance in rds_instances: # initiate a CPUUtilization MetricAlarm object for each RDS instance cpu_utilization_alarm = MetricAlarm( name=u'RDS-{}-High-CPUUtilization'.format( instance[u'DBInstanceIdentifier'] ), namespace=u'AWS/RDS', metric=u'CPUUtilization', statistic='Average', comparison=u'>', threshold=threshold, period=60, evaluation_periods=50, alarm_actions=[sns_topic_arn], dimensions={u'DBInstanceIdentifier': instance[u'DBInstanceIdentifier'] } ) if cpu_utilization_alarm.name not in existing_alarms: alarms_to_create.add(cpu_utilization_alarm) return alarms_to_create def main(): args = docopt(__doc__, version=VERSION) global DEBUG if args['--debug']: DEBUG = True region = args['<region>'] sns_topic_arn = args['<sns_topic_arn>'] rds_instances = get_rds_instances(region) aws_cw_connect = boto.ec2.cloudwatch.connect_to_region(region) alarms_to_create = get_cpuutilization_alarms_to_create( rds_instances, int(args['<threshold>']), aws_cw_connect, sns_topic_arn ) if alarms_to_create: if DEBUG: for alarm in alarms_to_create: print('DEBUG:', alarm) else: print('New RDS CPUUtilization Alarms created:') for alarm in alarms_to_create: print(alarm) aws_cw_connect.create_alarm(alarm) if __name__ == '__main__': main()
percolate/rds-create-cpu-alarms
rds_create_cpu_alarms/main.py
Python
gpl-3.0
4,212
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file '/home/yc/code/calibre/calibre/src/calibre/gui2/dialogs/choose_format_device.ui' # # Created: Thu Oct 25 16:54:55 2012 # by: PyQt4 UI code generator 4.8.5 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: _fromUtf8 = lambda s: s class Ui_ChooseFormatDeviceDialog(object): def setupUi(self, ChooseFormatDeviceDialog): ChooseFormatDeviceDialog.setObjectName(_fromUtf8("ChooseFormatDeviceDialog")) ChooseFormatDeviceDialog.resize(507, 377) ChooseFormatDeviceDialog.setWindowTitle(_("Choose Format")) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(_fromUtf8(I("mimetypes/unknown.png"))), QtGui.QIcon.Normal, QtGui.QIcon.Off) ChooseFormatDeviceDialog.setWindowIcon(icon) self.vboxlayout = QtGui.QVBoxLayout(ChooseFormatDeviceDialog) self.vboxlayout.setObjectName(_fromUtf8("vboxlayout")) self.msg = QtGui.QLabel(ChooseFormatDeviceDialog) self.msg.setText(_fromUtf8("")) self.msg.setObjectName(_fromUtf8("msg")) self.vboxlayout.addWidget(self.msg) self.formats = QtGui.QTreeWidget(ChooseFormatDeviceDialog) self.formats.setAlternatingRowColors(True) self.formats.setIconSize(QtCore.QSize(64, 64)) self.formats.setAllColumnsShowFocus(True) self.formats.setObjectName(_fromUtf8("formats")) self.formats.headerItem().setText(0, _("Format")) self.formats.headerItem().setText(1, _("Existing")) self.formats.headerItem().setTextAlignment(1, QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.formats.headerItem().setText(2, _("Convertible")) self.vboxlayout.addWidget(self.formats) self.buttonBox = QtGui.QDialogButtonBox(ChooseFormatDeviceDialog) self.buttonBox.setOrientation(QtCore.Qt.Horizontal) self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok) self.buttonBox.setObjectName(_fromUtf8("buttonBox")) self.vboxlayout.addWidget(self.buttonBox) self.retranslateUi(ChooseFormatDeviceDialog) QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), ChooseFormatDeviceDialog.accept) QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), ChooseFormatDeviceDialog.reject) QtCore.QMetaObject.connectSlotsByName(ChooseFormatDeviceDialog) def retranslateUi(self, ChooseFormatDeviceDialog): pass
yeyanchao/calibre
src/calibre/gui2/dialogs/choose_format_device_ui.py
Python
gpl-3.0
2,630
""" PHCpy --- a package for Polynomial Homotopy Continuation ======================================================== PHCpy is a collection of Python modules to compute solutions of polynomial systems using PHCpack. A homotopy defines the deformation of a start system (system with known solutions) into the target system (system that has to be solved). Continuation or path tracking methods apply numerical predictor-corrector techniques to track the solution paths defined by the homotopy, starting at the known solutions of the start system and ending at the solutions of the target system. Available modules ----------------- solver exports the blackbox solver of PHCpack, a mixed volume calculator, a path tracker, functions to construct start systems, and deflation to recondition isolated singular solutions. solutions solutions of phcpy.solve are lists of PHCpack solution strings and this module exports operations to convert the solution strings into Python dictionaries, e.g. for evaluation. interface data transfer from string representations of polynomials and solutions as the interface between Python and the C interface of PHCpack. trackers offers functions to track solution paths defined by a homotopy between a given start system with known solutions and a target system. maps module to work with monomial maps, defined as solution of systems that have exactly two monomials in every equation (binomial systems). sets offers tools to work with positive dimensional solution sets. examples defines some interesting examples taken from the research literature, the test() solves all systems, performing a regression test. families polynomial system often occur in families and are defined for any number of equations and variables, e.g.: the cyclic n-roots system. schubert exports the hypersurface and quantum Pieri homotopies to compute isolated solutions to problems in enumerative geometry. polytopes functions to work with Newton polytopes, to compute mixed volumes of Newton polytopes, given by tuples of support sets. phcwulf defines a simple client/server interaction to solve random trinomials. Calling the blackbox solver --------------------------- Polynomials and solutions are represented as strings. Below is an illustration of a session with the blackbox solver on a system of two random trinomials, polynomials with three monomials with random coefficients. >>> from phcpy.solver import random_trinomials >>> f = random_trinomials() >>> print f[0] (0.583339727743+0.81222826966115*i)*x^0*y^0\ +(-0.730410130891-0.68300881450520*i)*x^5*y^5\ +(0.547878834338+0.83655769847920*i)*x^5*y^0; >>> print f[1] (0.830635910813+0.55681593338247*i)*x^0*y^4\ +(0.456430547798-0.88975904324518*i)*x^1*y^4\ +(0.034113254002-0.99941797357332*i)*x^2*y^1; >>> from phcpy.solver import solve >>> s = solve(f,silent=True) >>> len(s) 30 >>> print s[2] t : 1.00000000000000E+00 0.00000000000000E+00 m : 1 the solution for t : x : -9.99963006604849E-01 8.60147787997449E-03 y : 0.00000000000000E+00 0.00000000000000E+00 == err : 4.325E-17 = rco : 2.020E-01 = res : 1.665E-16 = >>> The solve command returned a list of 30 strings in s, each string represents a solution that makes the polynomials in f vanish. The module solutions offers function to evaluate the solutions in the polynomials given as strings. """ try: from phcpy.phcpy2c3 import py2c_PHCpack_version_string print(py2c_PHCpack_version_string() + ' works!') except: print('Is the phcpy2c3.so not suited for this platform?') # The version number is defined as a data attribute. __version__ = '0.4.1'
callmetaste/PHCpack
src/Python/PHCpy3/phcpy/__init__.py
Python
gpl-3.0
3,727
import time import RPi.GPIO as GPIO from flask import Flask, render_template # GPIO and Sensors ============================================================ # Objects to represent sensors used to get water level class WaterLevelSensor: # how high the sensor is above the top of the fish tank offset = 0 def __init__(self, echo, trig): self.echo_pin = echo self.trig_pin = trig GPIO.setup(self.trig_pin, GPIO.OUT, initial=0) GPIO.setup(self.echo_pin, GPIO.IN) # gets the time it took for the sound to return, in microseconds def pulse_in(self): GPIO.output(self.trig_pin, 1) time.sleep(0.05) GPIO.output(self.trig_pin, 0) start = time.clock() GPIO.wait_for_edge(self.echo_pin, GPIO.RISING) return time.clock() - start # returns how far away the water is from the top of the tank in centimeters def read_water_level(self): # the speed of sound is ~343 m/s val = self.pulse_in() * 0.000001715 # ((1 / 1,000,000) * 343) / 2,000 return val - ofset # Webpage ===================================================================== app = Flask(__name__) # Posts new readings to the webpage @route('/') def display_info(): reading = 0 render_template('info.html', height=reading) if __name__ == '__main__': GPIO.setmode(GPIO.BCM) app.run('127.0.0.1', 8000)
tvictor20/tvictor-advprog
aquaponics/app.py
Python
gpl-3.0
1,404
# ========================================== # Author: Richard Wang # Contact: rwthree@princeton.edu # # Performs simple parsing of an "Alpha" file # Contents are simply (Server #, Alpha) # Ex: # 1, 3 # 2, 0 # 3, 1 # ========================================== import random import Globals def updateAlphas(newAlphas, oldTargetList): for i in range(0, len(newAlphas)): found = False for j in range(0, len(Globals.ALPHAS)): if newAlphas[i]['replica'] == Globals.ALPHAS[j]['replica']: Globals.ALPHAS[j]['alphaTarget'] = newAlphas[i]['alphaTarget'] found = True if not found: Globals.ALPHAS.append({'replica': newAlphas[i]['replica'], 'alphaTarget': newAlphas[i]['alphaTarget'], 'alphaAssign': 0}) for i in range(0, len(oldTargetList)): for j in range(0, len(Globals.ALPHAS)): if oldTargetList[i]['replica'] == Globals.ALPHAS[j]['replica']: Globals.ALPHAS[j]['alphaAssign'] -= 1 def getAlphas(filename): file = open(filename, 'r+') alphalist = [] for line in file: line2 = line.replace('\n', '') line3 = line2.replace(' ', '') l = line3.split(',') for i, replica in enumerate(Globals.REPLICAS): if replica['no'] == int(l[0]): if replica['mac'] != '': alphalist.append({'replica': int(l[0]), 'alphaTarget': int(l[1]), 'alphaAssign': 0}) else: alphalist.append({'replica': int(l[0]), 'alphaTarget': 0, 'alphaAssign': 0}) file.close() return alphalist def distributeAlphas(newAlphas, oldAlphas, totalAssigns): sumTarget = sumAllAlphaTarget(newAlphas) sumOldAssigns = sumAllAlphaAssign(oldAlphas) for i in range(0, len(newAlphas)): for j in range(0, len(oldAlphas)): if newAlphas[i]['replica'] == oldAlphas[j]['replica'] and newAlphas[i]['alphaTarget'] != 0: numAssigns = sumOldAssigns + totalAssigns newAssigns = numAssigns / sumTarget - oldAlphas[j]['alphaAssign'] Globals.log.info('New Assigns: ' + str(newAssigns)) if newAssigns > 0: newAlphas[i]['alphaAssign'] = newAssigns return newAlphas def distributeEven(newAlphas, totalAssigns): remainingAssigns = totalAssigns - sumAllAlphaAssign(newAlphas) sumTarget = sumAllAlphaTarget(newAlphas) for i in range(0, len(newAlphas)): if sumTarget != 0: newAlphas[i]['alphaAssign'] += int(newAlphas[i]['alphaTarget'] * remainingAssigns / sumTarget) return newAlphas #def distributeOld(newAlphas, oldAlphas, totalAssigns): # remainingAssigns = totalAssigns - sumAllAlphaAssign(newAlphas) # totalAssigns = sumAllAlphaAssign(oldAlphas) + sumAllAlphaAssign(newAlphas) # # changes = True # while changes: # changes = False # # for i in range(0, len(newAlphas)): # if remainingAssigns == 0: # return newAlphas # # for j in range(0, len(oldAlphas)): # if newAlphas[i]['replica'] == oldAlphas[j]['replica']: # totalAssign = oldAlphas[j]['alphaAssign'] + newAlphas[i]['alphaAssign'] # totalTarget = newAlphas[i]['alphaTarget'] # if totalAssign == 0: totalAssign = 1 # if totalTarget == 0: totalTarget = 1 # if (totalAssigns / totalAssign) > (totalAssigns / totalTarget): # newAlphas[i]['alphaAssign'] += 1 # remainingAssigns -= 1 # changes = True # # return newAlphas def distributeRemaining(newAlphas, totalAssigns): remainingAssigns = totalAssigns - sumAllAlphaAssign(newAlphas) rand = random.randint(0, Globals.NUMREPLICAS - 1) while remainingAssigns > 0 and sumAllAlphaTarget(newAlphas) != 0: if newAlphas[rand]['alphaTarget'] != 0: newAlphas[rand]['alphaAssign'] += 1 remainingAssigns -= 1 rand += 1 if rand > Globals.NUMREPLICAS - 1: rand = 0 return newAlphas def printAlphas(alphas): Globals.ASSIGNLOG.write('Current Alpha Assignments:\n') for i in range(0, len(alphas)): Globals.ASSIGNLOG.write('\tReplica ' + str(alphas[i]['replica']) + ' is currently assigned ' + str(alphas[i]['alphaAssign']) + ' wants to have ' + str(alphas[i]['alphaTarget'])) Globals.ASSIGNLOG.write('\n') # ======================================= # Helper Functions # ======================================= def sumAllAlphaTarget(alphalist): sum = 0 for i in range(0, len(alphalist)): sum = sum + alphalist[i]['alphaTarget'] return sum def sumAllAlphaAssign(alphalist): sum = 0 for i in range(0, len(alphalist)): sum = sum + alphalist[i]['alphaAssign'] return sum
XianliangJ/collections
CNUpdates/updates/examples/loadwild/loadbalancer/lbtest/Alphas.py
Python
gpl-3.0
4,597
#! /usr/bin/env python3 import numpy as np import matplotlib.pyplot as plt plt.style.use("seaborn-colorblind") import galore import galore.plot vasprun = './test/MgO/vasprun.xml.gz' xmin, xmax = (-10, 2) fig = plt.figure() for i, l in enumerate(np.arange(0.05, 0.50, 0.05)): ax = fig.add_subplot(3, 3, i + 1) ax.set_title("$\gamma = {0:4.2f}$".format(l)) plotting_data = galore.process_pdos(input=[vasprun], lorentzian=l, xmin=xmin, xmax=xmax) galore.plot.plot_pdos(plotting_data, ax=ax) ax.legend().set_visible(False) fig.tight_layout() plt.show()
SMTG-UCL/galore
docs/source/api_demo_pdos_broadening.py
Python
gpl-3.0
614
# -*- coding: utf-8 -*- # # This file is part of the jabber.at homepage (https://github.com/jabber-at/hp). # # This project is free software: you can redistribute it and/or modify it under the terms of the GNU General # Public License as published by the Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # This project is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the # implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. # # You should have received a copy of the GNU General Public License along with this project. If not, see # <http://www.gnu.org/licenses/>. import os import re from contextlib import contextmanager from unittest import mock from celery import task from pyvirtualdisplay import Display from selenium.common.exceptions import NoSuchElementException from selenium.common.exceptions import TimeoutException from selenium.webdriver.firefox.webdriver import WebDriver from selenium.webdriver.support.wait import WebDriverWait from django.conf import settings from django.contrib.staticfiles.testing import StaticLiveServerTestCase from django.test import TestCase as DjangoTestCase VIRTUAL_DISPLAY = os.environ.get('VIRTUAL_DISPLAY', 'y').lower().strip() == 'y' class HomepageTestCaseMixin(object): def assertIsTask(self, t, expected): self.assertEqual(t, task(expected)) def assertTaskCount(self, mocked, count): """Assert that `count` Celery tasks have been called.""" self.assertEqual(mocked.call_count, count) def assertNoTasks(self, mocked): self.assertTaskCount(mocked, 0) def assertTaskCall(self, mocked, task, *args, **kwargs): self.assertTrue(mocked.called) a, k = mocked.call_args self.assertEqual(k, {}) # apply_async receives task args/kwargs as tuple/dict arg instance, called_args, called_kwargs = a self.assertIsTask(instance, task) self.assertEqual(args, called_args) self.assertEqual(kwargs, called_kwargs) @contextmanager def mock_celery(self): def run(self, args, kwargs): return self.run(*args, **kwargs) with mock.patch('celery.app.task.Task.apply_async', side_effect=run, autospec=True) as mocked: yield mocked class SeleniumMixin(object): @classmethod def setUpClass(cls): super().setUpClass() if VIRTUAL_DISPLAY: cls.vdisplay = Display(visible=0, size=(1024, 768)) cls.vdisplay.start() cls.selenium = WebDriver(executable_path=settings.GECKODRIVER_PATH) @classmethod def tearDownClass(cls): cls.selenium.quit() if VIRTUAL_DISPLAY: cls.vdisplay.stop() super().tearDownClass() class wait_for_css_property(object): def __init__(self, elem, prop, value): self.elem = elem self.prop = prop self.value = value def __call__(self, driver): if self.elem.value_of_css_property(self.prop) == self.value: return self.elem else: return False def wait_for_display(self, elem, wait=2): WebDriverWait(self.selenium, wait).until(lambda d: elem.is_displayed()) def wait_for_page_load(self, wait=2): WebDriverWait(self.selenium, wait).until(lambda driver: driver.find_element_by_tag_name('body')) def wait_for_valid_form(self, form=None, wait=2): """Wait until a form becomes valid according to HTML5 form validation. The registration form becomes valid only after a split second, for some reason. """ if form is None: form = self.find('form') WebDriverWait(self.selenium, wait).until( lambda driver: self.selenium.execute_script('return arguments[0].checkValidity() === true', form)) def wait_for_focus(self, elem): # when an element gets focus, it turns blue: wait = WebDriverWait(self.selenium, 10) wait.until(self.wait_for_css_property(elem, 'border-top-color', 'rgb(128, 189, 255)')) def wait_for_invalid(self, elem): wait = WebDriverWait(self.selenium, 10) wait.until(self.wait_for_css_property(elem, 'border-top-color', 'rgb(220, 53, 69)')) def wait_for_valid(self, elem): wait = WebDriverWait(self.selenium, 10) wait.until(self.wait_for_css_property(elem, 'border-top-color', 'rgb(40, 167, 69)')) def find(self, selector): """Find an element by CSS selector.""" return self.selenium.find_element_by_css_selector(selector) def get_classes(self, elem): """Get CSS classes from the passed Element.""" return re.split(r'\s+', elem.get_attribute('class').strip()) def get_validity(self, elem): """Get validity object from a HTML5 form field.""" return self.selenium.execute_script('return arguments[0].validity', elem) def get_valid(self, elem): val = self.get_validity(elem) return val['valid'] def assertNoElementExists(self, selector, wait=0): """Assert that no element with the passed selector is present on the page.""" if wait: with self.assertRaises(TimeoutException): WebDriverWait(self.selenium, wait).until(lambda d: self.find(selector)) else: with self.assertRaises(NoSuchElementException): self.find(selector) def assertDisplayed(self, elem): if isinstance(elem, str): elem = self.find(elem) self.assertTrue(elem.is_displayed()) def assertNotDisplayed(self, elem): if isinstance(elem, str): elem = self.find(elem) self.assertFalse(elem.is_displayed()) def assertClass(self, elem, cls): """Assert that an element has a CSS class.""" self.assertIn(cls, self.get_classes(elem)) def assertNotClass(self, elem, cls): """Assert that an element does **not** have a CSS class.""" self.assertNotIn(cls, self.get_classes(elem)) def assertCSSBorderColor(self, elem, color): """Assert that an element has a given border color.""" self.assertEqual(elem.value_of_css_property('border-right-color'), color) self.assertEqual(elem.value_of_css_property('border-left-color'), color) self.assertEqual(elem.value_of_css_property('border-top-color'), color) self.assertEqual(elem.value_of_css_property('border-bottom-color'), color) def assertNotValidated(self, fg, elem): """Assert that a Bootstrap input element is not validated.""" self.assertNotClass(fg, 'was-validated') for feedback in fg.find_elements_by_css_selector('.invalid-feedback'): self.assertFalse(feedback.is_displayed()) if self.selenium.switch_to.active_element != elem: # passed element is not currently active self.assertCSSBorderColor(elem, 'rgb(206, 212, 218)') else: self.assertCSSBorderColor(elem, 'rgb(128, 189, 255)') def assertInvalid(self, fg, elem, *errors): """Assert that a Bootstrap input element validates as invalid.""" self.assertClass(fg, 'was-validated') errors = set(['invalid-%s' % e for e in errors]) for feedback in fg.find_elements_by_css_selector('.invalid-feedback'): classes = set(self.get_classes(feedback)) if errors & classes: self.assertTrue(feedback.is_displayed(), '.%s is not displayed' % ('.'.join(classes))) else: self.assertFalse(feedback.is_displayed(), '.%s is displayed' % ('.'.join(classes))) self.wait_for_invalid(elem) self.assertFalse(self.get_valid(elem)) def assertValid(self, fg, elem): """Assert that a Bootstrap input element validates as valid.""" self.assertClass(fg, 'was-validated') for feedback in fg.find_elements_by_css_selector('.invalid-feedback'): self.assertFalse(feedback.is_displayed()) self.wait_for_valid(elem) self.assertTrue(self.get_valid(elem)) class TestCase(HomepageTestCaseMixin, DjangoTestCase): pass class SeleniumTestCase(SeleniumMixin, HomepageTestCaseMixin, StaticLiveServerTestCase): pass
jabber-at/hp
hp/core/tests/base.py
Python
gpl-3.0
8,383
from lokp.models import DBSession from lokp.protocols.activity_protocol import ActivityProtocol from lokp.review.review import BaseReview class ActivityReview(BaseReview): def __init__(self, request): super(ActivityReview, self).__init__(request) self.protocol = ActivityProtocol(DBSession)
CDE-UNIBE/lokp
lokp/review/activities.py
Python
gpl-3.0
314
#!/usr/bin/python import sys import re import numpy as np import matplotlib.pyplot as plt import argparse remove_comments = re.compile("(?!\#)", re.VERBOSE) parser = argparse.ArgumentParser() parser.add_argument("-f", nargs=1, default=None) parser.add_argument("files", nargs='*') args = parser.parse_args() if len(args.files) < 1: parser.error("I need at least one tsl file") data = [] for fn in args.files: f = open(fn,"rb") tab = [line.strip() for line in f.readlines()] f.close() header = np.array(tab[0][1:].split()) if args.f == None: print ("There are following fields available in %s" % fn) print header else: field = args.f[0] fno = np.where(header == field)[0][0] tab = np.array([ map(np.float64, line.split()) for line in filter(remove_comments.match, tab) ]) data.append(tab) fig = plt.figure() ax = fig.add_subplot(111) for i, fn in enumerate(data): ax.plot(fn[:, 1], fn[:, fno], label=args.files[i]) ax.legend() plt.ylabel(field) plt.xlabel(header[1]) plt.draw() plt.show()
askalbania/piernik
python/plot_tsl.py
Python
gpl-3.0
1,065
# -*- coding: utf-8 -*- import re import datetime import logging from urlparse import parse_qsl from mamchecker.model import depth_1st, problemCtxObjs, keysOmit, table_entry, ctxkey from mamchecker.hlp import datefmt, last from mamchecker.util import PageBase from google.appengine.ext import ndb def prepare( qs # url query_string (after ?) , skey # start key, filter is filled up with it. # student key normally, but can be other, e.g. school, too. # if a parent belongs to user then all children can be queried , userkey ): '''prepares the perameters for depth_1st >>> #see depth_1st >>> skey = ctxkey(['Sc1', 'Pe1', 'Te1','Cl1','St1']) >>> #qs= "Sc0&*&*&*&*&*" >>> qs= "q~r.be" >>> prepare(qs,skey,None)[0] ['Sc1', 'Pe1', 'Te1', 'Cl1', 'St1', [('query_string', '=', 'r.be')]] >>> qs= ' ' >>> prepare(qs,skey,None)[0] ['Sc1', 'Pe1', 'Te1', 'Cl1', 'St1', []] >>> qs= "1DK&*&d>3" >>> p = prepare(qs,skey,None)[0] ''' @last def filters(x): '''convert to GAE filters from lst is ["<field><operator><value>",...] ~ -> = q = query_string age fields: H = hours, S = seconds, M = minutes, d = days ''' AGES = {'d': 'days', 'H': 'hours', 'M': 'minutes', 'S': 'seconds'} ABBR = {'q': 'query_string'} filters = [] if not isinstance(x, str): return for le in x.split(','): #le = next(iter(x.split(','))) le = le.replace('~', '=') match = re.match(r'(\w+)([=!<>]+)([\w\d\.]+)', le) if match: grps = match.groups() name, op, value = grps if name in ABBR: name = ABBR[name] age = None # le='d<~3' if name in AGES: age = AGES[name] if name in AGES.values(): age = name if age: value = datetime.datetime.now( ) - datetime.timedelta(**{age: int(value)}) name = 'answered' filters.append((name, op, value)) return filters #qs = '' O = problemCtxObjs # q=query, qq=*->[], qqf=filter->gae filter (name,op,value) q = filter(None, [k.strip() for k, v in parse_qsl(qs, True)]) qq = [[] if x == '*' else x for x in q] qqf = [filters() if filters(x) else x for x in qq] # fill up to len(O) delta = len(O) - len(qqf) if delta > 0: ext = [str(v) for k, v in skey.pairs()] extpart = min(len(ext), delta) rest = delta - extpart qqf = ext[:extpart] + [[]] * rest + qqf keys = keysOmit(qqf) obj = keys and keys[-1].get() # parent to start from if obj and obj.userkey == userkey: return qqf, keys, O, True else: return qqf, [], O, False, userkey class Page(PageBase): def __init__(self, _request): super(self.__class__, self).__init__(_request) self.table = lambda: depth_1st( * prepare( self.request.query_string, self.request.student.key, self.user and self.user.key)) self.params = { 'table': self.table, 'table_entry': table_entry} def post_response(self): for urlsafe in self.request.get_all('deletee'): k = ndb.Key(urlsafe=urlsafe) k.delete() return self.get_response()
mamchecker/mamchecker
mamchecker/done/__init__.py
Python
gpl-3.0
3,545
# Copyright 2015-2020 The Wazo Authors (see the AUTHORS file) # SPDX-License-Identifier: GPL-3.0-or-later from wazo_auth.interfaces import ( BaseAuthenticationBackend, BaseMetadata, DEFAULT_XIVO_UUID, ) __all__ = [ 'BaseAuthenticationBackend', 'BaseMetadata', 'DEFAULT_XIVO_UUID', ]
wazo-pbx/xivo-auth
wazo_auth/__init__.py
Python
gpl-3.0
310
from __future__ import absolute_import, unicode_literals import logging from django import forms from django.contrib import messages from django.http import Http404 from django.utils.encoding import smart_str from easy_maps.models import Address from . import lib log = logging.getLogger(__name__) class AddressForm(forms.ModelForm): """ Address form validator Validate the address is unique and it's geocode. """ address = forms.CharField(max_length=255, required=True) class Meta: model = Address fields = ['address'] def _post_clean(self): super(AddressForm, self)._post_clean() if self.cleaned_data.get('address'): q = Address.objects.filter( address__icontains=self.cleaned_data['address'] ).exists() if q: message_ = ("The %s could not be %s because " "similar address already exists.") % ( self.instance.__class__.__name__, 'created' ) log.debug("%s : %s" % (message_, self.cleaned_data['address'])) self._update_errors(message_) def save(self, commit=True, request=None): log.info("Saving new address") try: instance = super(AddressForm, self).save(commit=commit) except ValueError as e: log.debug(smart_str(e)) messages.error(request, smart_str(e)) else: if instance and not self._valid_address(instance): message_ = ('Geocode error occurred saving %s: %s' % (instance.__class__.__name__, instance.address,)) messages.error(request, message_) instance.delete() return log.info("Adding address to fusion table.") if not request or not request.user: message_ = "Request or user not found." log.error(message_) raise Http404(message_) else: flow = lib.FlowClient(request) service, table_id = flow.get_service_and_table_id() fusion_table_address_exists = ( lib.FusionTableMixin.address_exists(instance, service, table_id)) added_to_fusion_table = False if fusion_table_address_exists is not None: log.debug("Address already exist in fusion table:" " %s" % (instance.address,)) else: log.info("Adding address to fusion table : %s" % instance.address) lib.FusionTableMixin.save(instance, service, table_id) added_to_fusion_table = True if instance: part = "Successfully added a new " message_ = "%s %s: %s" % ( part, instance.__class__.__name__, instance.address ) if added_to_fusion_table: f_part = part + "%s to fusion table: %s" f_message_ = f_part % ( instance.__class__.__name__, instance.address ) log.info(f_message_) messages.success(request, message_) log.info(message_) return instance def _valid_address(self, instance): if instance.geocode_error or not instance.computed_address: message_ = 'Geocode Error' log.debug("%s : %s" % (smart_str(str(message_)), self.cleaned_data['address'])) self._update_errors(message_) return False return True @staticmethod def get_addresses(): return Address.objects.only('address').order_by('-id').all()
jackton1/django_google_app
map_app/forms.py
Python
gpl-3.0
4,132
#!/usr/local/lib/python2.7.10/bin/python # -*- coding: utf-8 -*- """ Created on Sun Aug 09 13:15:13 2015 @author: Vedran Fetching a schedule from Google Docs, extracting information, converting to pdf and sending via e-mail. Copyright (C) 2015 Vedran Vukovic <wilheru@gmail.com> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import sys # for fetching command-line arguments for legal notices import os # for working with files and opening the temporary pdf file import pdfkit # for creating the temporary pdf file import urllib2 # for getting the original file from Google Drive from datetime import datetime # for printing today's date and time from textwrap import fill # for wrapping help, warranty & copyright text from getpass import getpass # for passwords import smtplib # for sending email from email.MIMEMultipart import MIMEMultipart # for multipart emails from email.MIMEText import MIMEText # for multipart emails from email.MIMEBase import MIMEBase # for multipart emails from email import Encoders # for multipart emails """ # fetchHTML(docsID) # This function fetches the google doc converted to the html format. # # PARAMETERS: # docsID - Google Docs ID of the file in question. It can be found in all # Google Docs URLs. Read the code for more details. # # RETURNS: # string html - html code of the Google Doc whose id is docsID # # NOTE: # Requires Internet connection and access to Google Docs servers """ def fetchHtml(docsID): address = "https://docs.google.com/document/d/" + docsID + \ "/export?format=html" req = urllib2.Request(address) # creates a html request try: response = urllib2.urlopen(req) # opens the url -> requires # Internet connection and access to Google Docs servers html = response.read() except urllib2.URLError: print "\n\nERROR! The URL with the schedule could not have been " \ "accessed. Check your internet connection." html = None return html """ # inputStr(query = "") # This function prints a query, reads an input string from console and # returns it. # # PARAMETERS: # query - The text to be printed before the prompt. If it doesn't end with # a soace, a space will be appended to it. # # RETURNS: # string response - user's console input converted to a string """ def inputStr(query = ""): theQuery = query[:] if query[-1] != " ": theQuery += " " response = str(raw_input(theQuery)) return response """ # inputInt(query = "") # This function prints a query, reads an input string from console, converts # it to an integer and returns it. # # PARAMETERS: # query - The text to be printed before the prompt. If it doesn't end with # a soace, a space will be appended to it. # # RETURNS: # string response - user's console input converted to an integer """ def inputInt(query = ""): try: response = int(raw_input(query)) except ValueError: print "\n\nERROR! The input should be an integer." response = None return response """ # cutHtml(html) # This function is responsible for going through the html string and # separating it into three parts. # # PARAMETERS: # html - The string containing the conent of a html file derived from the # schedule document. # # RETURNS: # string head - contains the text from the beginning of the html string # up to the table row containing the string "Day 1", # including the <tr ..> tag # string tail - the end part of the html string, containing the last </tr> in # the same table and everything after that. # list importantTDs - contains locations of all <td ..> tags located just # before the string "Day X", where X is a string # representation of any digit from 0 to 9 # NOTE: # All locations are relative to the beginning of the parameter string html, # using zero-based indexing. """ def cutHtml(html): allDays = [] # the list which will be populated with locations of all # strings that match the case-insensitive regex "day \d" for i in range(len(html)): if html[i:i+4].lower() == "day ": if html[i+4:i+5].lower() in ([str(x) for x in range(10)]): allDays.append(i) allTDs = [] # the list which will be populated with locations of all # strings that match "<td " for i in range(len(html)): if html[i:i+4].lower() == "<td ": allTDs.append(i) importantTDs = [] # the list which will be populated with locations of all # strings that match "<td " and are at most 150 positions before a location # of any "day \d" (which are contained in the list allDays) for i in range(len(allDays)): for j in range(len(allTDs)): if allTDs[j] - allDays[i] > -150 and allTDs[j] - allDays[i] < 0: importantTDs.append(allTDs[j]) head = html[:importantTDs[0]] tail = html[html.rfind("</tr></tbody></table>"):] return head, tail, importantTDs """ # getDataForDate(html, date, importantTDs) # This function selects the part of the html string containing the # schedule for the chosen date. # # PARAMETERS: # html - The string containing the conent of a html file derived from the # schedule document. # # date - The integer containing the date of the month chosen at the beginning # of the script. # # importantTDs - The list of integers containing all locations of <td ...> # tags that contain the case-insensitive regex "day \d"; # basically, the text between importantTDs[x] and # importantTDs[x+1] is the schedule for the day x since the # beginning of the course. # # RETURNS: # string middle - contains the part from the html string that renders as the # schedule for the given date """ def getDataForDate(html, date, importantTDs): day = date - 7 start = importantTDs[day] if day < len(importantTDs)-1: end = importantTDs[day+1] else: end = html.rfind("</tr></tbody></table>") middle = html[start:end] return middle """ # createMessage(date) # This function creates a multipart message containing the text from the # messages.txt file, the temporary pdf file with the schedule and other # components of an e-mail message. # # PARAMETERS: # fromAddr - The string containing the address of the sender. # # toAddrs - The list of strings containing the addresses of all receivers. # # fileName - The integer containing the date of the schedule. # # RETURNS: # MIMEMultipart msg - a MIME message object containing the message, # attachments and other necessary fields. """ def createMessage(fromAddr, toAddrs, ccAddrs, subject, messageText, fileName): msg = MIMEMultipart() msg.attach(MIMEText(messageText)) msg['From'] = fromAddr if toAddrs != None: msg['To'] = ', '.join(toAddrs) if ccAddrs != None: msg['Cc'] = ', '.join(ccAddrs) msg['Subject'] = subject part = MIMEBase('application', 'octet-stream') part.set_payload(open(fileName, 'rb').read()) Encoders.encode_base64(part) part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename \ (fileName)) msg.attach(part) return msg """ # sendEmail(username, password, server, port, fromAddr = None, toAddrs) # This function sends the email using the specified account and SMTP # server data. # # PARAMETERS: # username - The username used to log on to the server. # # password - The password used to log on to the server. # # server - The SMTP server that will be used to send the messages. # # port - The number of an open port on the SMTP server. This number, if not # a string, will be converted to a string automatically. # # fromAddr - The officiall e-mail address from which the server will send the # message. If None, the username will be used instead. # # toAddrs - The list of addresses to which the schedule will be delivered. # If the list is empty or None, the message will be sent to the # sender. # # RETURNS: # None """ def sendEmail(msg, username, password, server, port, fromAddr = None, \ toAddrs = None, ccAddrs = None, bccAddrs = None): if type(toAddrs) == "str": toAddrsN = [toAddrs] else: toAddrsN = toAddrs if type(ccAddrs) == "str": ccAddrsN = [ccAddrs] else: ccAddrsN = ccAddrs if type(bccAddrs) == "str": bccAddrsN = [bccAddrs] else: bccAddrsN = bccAddrs if fromAddr == "" or fromAddr == None: fromAddr = username if toAddrsN == None: toAddrsN = [] if ccAddrsN != None: toAddrsN += ccAddrsN if bccAddrsN != None: toAddrsN += bccAddrsN if toAddrsN == []: toAddrsN = [fromAddr] mailer = smtplib.SMTP(server + ":" + str(port)) try: mailer.starttls() # THIS HAS TO BE EXECUTED BEFORE LOGIN mailer.login(username,password) mailer.sendmail(fromAddr, toAddrs, msg.as_string()) except smtplib.SMTPRecipientsRefused: print "\n\nERROR! SMTPRecipientsRefused. All recipients refused to " \ "receive the message." except smtplib.SMTPDataError: print "\n\nERROR! SMTPDataError. The SMTP server refused to accept " \ "message data." except smtplib.SMTPSenderRefused: print "\n\nERROR! SMTPSenderRefused. The SMTP server didn't accept " \ "the sender address (fromAddr)." except smtplib.SMTPHeloError: print "\n\nERROR! SMTPHeloError. The server didn't properly reply " \ "to the HELO greeting." except smtplib.SMTPAuthenticationError: print "\n\nERROR! SMTPAuthenticationError. Authentication " \ "unsuccessful. Check your e-mail and password in the main " \ "part of the code." except smtplib.SMTPException: print "\n\nERROR! SMTPException. One potential cause is that TLS " \ "was not started before login. Check the script." finally: mailer.close() return """ # createPdfFromHtml(html, pdfFileName) # This function creates a pdf file from an html string using the WKHTMLTOPDF # tool via pdfkit library wrapper. # # PARAMETERS: # html - The html string on which the pdf file will be based. # # pdfFileName - The string containing the file name to which the pdf output # will be written. # # quietOption - Should the WKHTMLTOPDF print any output? # # RETURNS: # None """ def createPdfFromHtml(html, pdfFileName, quietOption = False): if quietOption == False: options = {} else: options = {'quiet': ''} pdfkit.from_string(html, fileName, options=options) if __name__=="__main__": commandLine = False # use the program from the command line argsSwitch = False # some arguments have been provided quietSwitch = False # no user prompt mailSwitch = False # send mail printSwitch = False # print if len(sys.argv) > 1: if sys.argv[1] == '-w' or sys.argv[1] == '--warranty': print fill('THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT ' \ 'PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN ' \ 'WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE ' \ 'PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED ' \ 'OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ' \ 'WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ' \ 'PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF ' \ 'THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, ' \ 'YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR ' \ 'CORRECTION. \n') commandLine = True elif sys.argv[1] == '-c' or sys.argv[1] == '--copyright': print fill('All rights granted under this License are granted ' \ 'for the term of copyright on the Program, and are ' \ 'irrevocable provided the stated conditions are met. ' \ 'This License explicitly affirms your unlimited ' \ 'permission to run the unmodified Program. The output ' \ 'from running a covered work is covered by this License ' \ 'only if the output, given its content, constitutes a ' \ 'covered work. This License acknowledges your rights of ' \ 'fair use or other equivalent, as provided by copyright ' \ 'law.\n' \ 'You may make, run and propagate covered works that you ' \ 'do not convey, without conditions so long as your ' \ 'license otherwise remains in force. You may convey ' \ 'covered works to others for the sole purpose of having ' \ 'them make modifications exclusively for you, or ' \ 'provide you with facilities for running those works, ' \ 'provided that you comply with the terms of this ' \ 'License in conveying all material for which you do not' \ 'control copyright. Those thus making or running the' \ 'covered works for you must do so exclusively on your ' \ 'behalf, under your direction and control, on terms ' \ 'that prohibit them from making any copies of your ' \ 'copyrighted material outside their relationship with ' \ 'you.\n' \ 'Conveying under any other circumstances is permitted ' \ 'solely under the conditions stated below. Sublicensing ' \ 'is not allowed; section 10 makes it unnecessary.\n' \ '\n For more information, visit ' \ '<http://www.gnu.org/licenses/>.') commandLine = True elif sys.argv[1] == '-h' or sys.argv[1] == '--help': print fill('This tool can be used as a command-line or an ' \ 'interactive one. Before using it, remember to install ' \ 'dependencies.\n\n' \ 'For warranty, copyright or help type' \ 'python and only one of the following:\n\n' \ 'rssso2015schedule.py [-w] [--warranty] [-c] ' \ '[--copyright] [-h] [--help]\n\n' \ 'To run the program from command line, type python ' \ ' and:\n\n' \ 'rssso2015.schedule.py date [-q] [--quiet] [-m]' \ '[--mail] [-p] [--print]\n\n' \ 'The arguments can be presented in any order. If -q or ' \ '--quiet are specified, the temporary pdf file will not ' \ 'be opened after creation and no prompts will be made. ' \ 'Errors will still be output to the standard output.'\ 'If -m or --message is specified, the mail will be sent. ' \ 'If -p or --print is specified, the file will be sent to ' \ 'the printer specified. If neither printing nor mailing ' \ 'is specified, the file will be mailed.\n\n' \ 'For more information on specifying e-mail destinations ' \ 'and printer names, please read the README.md file.\n\n') commandLine = True elif sys.argv[1].isdigit() == True: date = int(sys.argv[1]) argsSwitch = True if len(sys.argv) > 2: if "-q" in sys.argv[2:4] or "--quiet" in sys.argv[2:4]: quietSwitch = True if "-m" in sys.argv[2:4] or "--mail" in sys.argv[2:4]: mailSwitch = True if "-p" in sys.argv[2:4] or "--print" in sys.argv[2:4]: printSwitch = True if mailSwitch == False and printSwitch == False: mailSwitch = True if commandLine == False: if quietSwitch == False: print "RSSSO2015Schedule. Copyright (C) 2015 Vedran Vukovic\n" \ "This program comes with ABSOLUTELY NO WARRANTY; for " \ "details type `rssso2015schedule -w'.\n" \ "This is free software, and you are welcome to " \ "redistribute it\n" \ "under certain conditions; type `rssso2015schedule -c' " \ "for details.\n" \ "For details about using this as a command-line tool, " \ "type 'rssso2015schedule -h'\n\n" # ignore a single whitespace before and after the equal sign settingsTxt = open(".schedulerc").read().replace(" =", "=") \ .replace("= ", "=").split("\n") settings = {} for setting in settingsTxt: values = setting.split("=") if len(values) == 2: settings[values[0]] = values[1] docsId = settings["docsId"] fileName = settings["fileName"] fromAddr = settings["fromAddr"] if "fromAddrWithName" in settings: fromAddrWithName = settings["fromAddrWithName"] else: fromAddrWithName = fromAddr if "toAddrs" in settings: toAddrs = settings["toAddrs"].split(", ") else: toAddrs = None if "ccAddrs" in settings: ccAddrs = settings["ccAddrs"].split(", ") else: ccAddrs = None if "bccAddrs" in settings: bccAddrs = settings["bccAddrs"].split(", ") else: bccAddrs = None if "messageFile" in settings: messageFile = settings["messageFile"] else: messageFile = "" if "subject" in settings: subject = settings["subject"] else: subject = "" username = settings["username"] if "password" in settings: password = settings["password"] server = settings["server"] port = settings["port"] if "printerName" in settings: printerName = settings["printerName"] else: printerName = None del settings # no longer needed if quietSwitch == False: print 'Fetching the document from Google Docs...' html = fetchHtml(docsId) if html != None: if quietSwitch == False: print 'Document fetched.\n' # these two printouts are for convenience print "Today's date is: " + str(datetime.now().day) print "The time is: " + str(datetime.now().time()) if argsSwitch == False: # no arguments provided date = inputInt("Date? ") if date != None: # fileName and subject can have %d, which should be changed to # the selected date fileName = fileName.replace("%d", str(date)) subject = subject.replace("%d", str(date)) # dividing html to head, tail and importantTDs head, tail, importantTDs = cutHtml(html) # using importantTDs to grab only the relevant schedule data middle = getDataForDate(html, date, importantTDs) createPdfFromHtml(head + middle + tail, fileName, quietSwitch) if quietSwitch == False: os.system("xdg-open " + fileName) y = "n" if argsSwitch == True and quietSwitch == False: # QC y = inputStr("[Yy] = proceed?") else: y = "y" if y.lower() == "y": yy = "n" if argsSwitch == False: # should the file be mailed? yy = inputStr("[Yy] = send the e-mail? ") else: if mailSwitch == True: yy = "y" if yy.lower() == "y": try: if messageFile == "": messageText = "" else: messageText = open(messageFile, "r").read() \ .replace("%d", str(date)) msg = createMessage(fromAddrWithName, toAddrs, \ ccAddrs, subject, \ messageText, fileName) if password == "": password = getpass() # server = "smtp.gmail.com" # your server here sendEmail(msg, username, password, server, port, \ fromAddr, toAddrs, ccAddrs, bccAddrs) if quietSwitch == False: print "\nIf there were no error messages " \ "before this one, the e-mail should " \ "have been sent." except IOError: print "\n\nERROR! Message file not found!" else: print "\n\nE-mail sending aborted by user." yyy = "n" if argsSwitch == False: # should the file be printed? yyy = inputStr("[Yy] = print the file? ") else: if printSwitch == True: yyy = "y" if yyy.lower() == "y": os.system("lpr -P" + printerName + " " + fileName) if quietSwitch == False: print "\nIf there were no error messages before " \ "this one, the file should have been " \ "sent for printing." try: os.unlink(fileName) if quietSwitch == False: print "Temporary PDF file with the schedule should " \ "have been deleted." except WindowsError: print "\n\nERROR! PDF file with the schedule was " \ "probably opened, and could not have been deleted." if quietSwitch == False: print "\n\nRSSSO2015Schedule script: End of execution."
wilthewolf/rssso2015schedule
rssso2015schedule.py
Python
gpl-3.0
24,594
""" Almost all test cases covers both tag calling and template using. """ from __future__ import print_function, unicode_literals from django.conf import settings as django_settings from django.contrib.contenttypes.models import ContentType from django.http import HttpRequest from django.utils.six import assertCountEqual from wiki.conf import settings from wiki.forms import CreateRootForm from wiki.models import Article, ArticleForObject, ArticleRevision from wiki.templatetags.wiki_tags import (article_for_object, login_url, wiki_form, wiki_render) from ..base import TemplateTestCase if not django_settings.configured: django_settings.configure() # XXX article_for_object accepts context, but not using it class ArticleForObjectTemplatetagTest(TemplateTestCase): template = """ {% load wiki_tags %} {% article_for_object obj as anything %} {{ anything }} """ def setUp(self): super(ArticleForObjectTemplatetagTest, self).setUp() from wiki.templatetags import wiki_tags wiki_tags._cache = {} def test_obj_arg_is_not_a_django_model(self): from wiki.templatetags import wiki_tags with self.assertRaises(TypeError): article_for_object({}, '') with self.assertRaises(TypeError): article_for_object({'request': 100500}, {}) with self.assertRaises(TypeError): self.render({'obj': 'tiger!'}) self.assertEqual(len(wiki_tags._cache), 0) def test_obj_is_not_in__cache_and_articleforobject_is_not_exist(self): from wiki.templatetags.wiki_tags import _cache as cache obj = Article.objects.create() article_for_object({}, obj) self.assertIn(obj, cache) self.assertIsNone(cache[obj]) self.assertEqual(len(cache), 1) self.render({'obj': obj}) self.assertIn(obj, cache) self.assertIsNone(cache[obj]) self.assertEqual(len(cache), 1) def test_obj_is_not_in__cache_and_articleforobjec_is_exist(self): from wiki.templatetags.wiki_tags import _cache as cache a = Article.objects.create() content_type = ContentType.objects.get_for_model(a) ArticleForObject.objects.create( article=a, content_type=content_type, object_id=1 ) output = article_for_object({}, a) self.assertEqual(output, a) self.assertIn(a, cache) self.assertEqual(cache[a], a) self.assertEqual(len(cache), 1) self.render({'obj': a}) self.assertIn(a, cache) self.assertEqual(cache[a], a) self.assertEqual(len(cache), 1) def test_obj_in__cache_and_articleforobject_is_not_exist(self): model = Article.objects.create() from wiki.templatetags import wiki_tags wiki_tags._cache = {model: 'spam'} article_for_object({}, model) self.assertIn(model, wiki_tags._cache) self.assertIsNone(wiki_tags._cache[model]) self.assertEqual(len(wiki_tags._cache), 1) self.render({'obj': model}) self.assertIn(model, wiki_tags._cache) self.assertIsNone(wiki_tags._cache[model]) self.assertEqual(len(wiki_tags._cache), 1) self.assertNotIn('spam', wiki_tags._cache.values()) def test_obj_in__cache_and_articleforobjec_is_exist(self): article = Article.objects.create() content_type = ContentType.objects.get_for_model(article) ArticleForObject.objects.create( article=article, content_type=content_type, object_id=1 ) from wiki.templatetags import wiki_tags wiki_tags._cache = {article: 'spam'} output = article_for_object({}, article) self.assertEqual(output, article) self.assertIn(article, wiki_tags._cache) self.assertEqual(wiki_tags._cache[article], article) output = self.render({'obj': article}) self.assertIn(article, wiki_tags._cache) self.assertEqual(wiki_tags._cache[article], article) expected = 'Article without content (1)' self.assertIn(expected, output) # TODO manage plugins in template class WikiRenderTest(TemplateTestCase): template = """ {% load wiki_tags %} {% wiki_render article pc %} """ def tearDown(self): from wiki.core.plugins import registry registry._cache = {} super(WikiRenderTest, self).tearDown() keys = ['article', 'content', 'preview', 'plugins', 'STATIC_URL', 'CACHE_TIMEOUT' ] def test_if_preview_content_is_none(self): # monkey patch from wiki.core.plugins import registry registry._cache = {'ham': 'spam'} article = Article.objects.create() output = wiki_render({}, article) assertCountEqual(self, self.keys, output) self.assertEqual(output['article'], article) self.assertIsNone(output['content']) self.assertIs(output['preview'], False) self.assertEqual(output['plugins'], {'ham': 'spam'}) self.assertEqual(output['STATIC_URL'], django_settings.STATIC_URL) self.assertEqual(output['CACHE_TIMEOUT'], settings.CACHE_TIMEOUT) # Additional check self.render({'article': article, 'pc': None}) def test_called_with_preview_content_and_article_have_current_revision(self): article = Article.objects.create() ArticleRevision.objects.create( article=article, title="Test title", content="Some beauty test text" ) content = ( """This is a normal paragraph\n""" """\n""" """Headline\n""" """========\n""" ) expected_markdown = ( """<p>This is a normal paragraph</p>\n""" """<h1 id="wiki-toc-headline">Headline</h1>""" ) # monkey patch from wiki.core.plugins import registry registry._cache = {'spam': 'eggs'} output = wiki_render({}, article, preview_content=content) assertCountEqual(self, self.keys, output) self.assertEqual(output['article'], article) self.assertMultiLineEqual(output['content'], expected_markdown) self.assertIs(output['preview'], True) self.assertEqual(output['plugins'], {'spam': 'eggs'}) self.assertEqual(output['STATIC_URL'], django_settings.STATIC_URL) self.assertEqual(output['CACHE_TIMEOUT'], settings.CACHE_TIMEOUT) output = self.render({'article': article, 'pc': content}) self.assertIn(expected_markdown, output) def test_called_with_preview_content_and_article_dont_have_current_revision( self): article = Article.objects.create() content = ( """This is a normal paragraph\n""" """\n""" """Headline\n""" """========\n""" ) # monkey patch from wiki.core.plugins import registry registry._cache = {'spam': 'eggs'} output = wiki_render({}, article, preview_content=content) assertCountEqual(self, self.keys, output) self.assertEqual(output['article'], article) self.assertMultiLineEqual(output['content'], '') self.assertIs(output['preview'], True) self.assertEqual(output['plugins'], {'spam': 'eggs'}) self.assertEqual(output['STATIC_URL'], django_settings.STATIC_URL) self.assertEqual(output['CACHE_TIMEOUT'], settings.CACHE_TIMEOUT) self.render({'article': article, 'pc': content}) class WikiFormTest(TemplateTestCase): template = """ {% load wiki_tags %} {% wiki_form form_obj %} """ def test_form_obj_is_not_baseform_instance(self): context = {'test_key': 'test_value'} form_obj = 'ham' with self.assertRaises(TypeError): wiki_form(context, form_obj) self.assertEqual(context, {'test_key': 'test_value'}) with self.assertRaises(TypeError): self.render({'test_key': 100500}) self.assertEqual(context, {'test_key': 'test_value'}) def test_form_obj_is_baseform_instance(self): context = {'test_key': 'test_value'} # not by any special reasons, just a form form_obj = CreateRootForm() wiki_form(context, form_obj) self.assertEqual(context, {'test_key': 'test_value', 'form': form_obj}) self.render({'form_obj': form_obj}) self.assertEqual(context, {'test_key': 'test_value', 'form': form_obj}) class LoginUrlTest(TemplateTestCase): template = """ {% load wiki_tags %} {% login_url as some_url %} {{ some_url }} """ def test_no_request_in_context(self): with self.assertRaises(KeyError): login_url({}) with self.assertRaises(KeyError): self.render({}) def test_login_url_if_no_query_string_in_request(self): r = HttpRequest() r.META = {} r.path = 'best/test/page/ever/' output = login_url({'request': r}) expected = '/_accounts/login/?next=best/test/page/ever/' self.assertEqual(output, expected) output = self.render({'request': r}) self.assertIn(expected, output) def test_login_url_if_query_string_is_empty(self): r = HttpRequest() r.META = {'QUERY_STRING': ''} r.path = 'best/test/page/ever/' output = login_url({'request': r}) expected = '/_accounts/login/?next=best/test/page/ever/' self.assertEqual(output, expected) output = self.render({'request': r}) self.assertIn(expected, output) def test_login_url_if_query_string_is_not_empty(self): r = HttpRequest() r.META = {'QUERY_STRING': 'title=Main_page&action=raw'} r.path = 'best/test/page/ever/' context = {'request': r} output = login_url(context) expected = ( '/_accounts/login/' '?next=best/test/page/ever/%3Ftitle%3DMain_page%26action%3Draw' ) self.assertEqual(output, expected) output = self.render({'request': r}) self.assertIn(expected, output)
cXhristian/django-wiki
tests/core/test_template_tags.py
Python
gpl-3.0
10,375
#!/usr/bin/python -tt # An incredibly simple agent. All we do is find the closest enemy tank, drive # towards it, and shoot. Note that if friendly fire is allowed, you will very # often kill your own tanks with this code. ################################################################# # NOTE TO STUDENTS # This is a starting point for you. You will need to greatly # modify this code if you want to do anything useful. But this # should help you to know how to interact with BZRC in order to # get the information you need. # # After starting the bzrflag server, this is one way to start # this code: # python agent0.py [hostname] [port] # # Often this translates to something like the following (with the # port name being printed out by the bzrflag server): # python agent0.py localhost 49857 ################################################################# import sys import math import time import random from bzrc import BZRC, Command class dumb_agent(object): """Class handles all command and control logic for a teams tanks.""" new_angles = [] running_time = [] shooting_time = 2 ENEMY_TANK_MIN_DISTANCE = 1 ENEMY_TANK_MAX_DISTANCE = 5 OBSTACLE_MAX_DISTANCE = 10 OBSTACLE_MIN_DISTANCE = 1 BULLET_MAX_DISTANCE = 10 BULLET_MIN_DISTANCE = 1 FLAG_MIN_DISTANCE = 1 FLAG_MAX_DISTANCE = 5 FLAG_MAX_SPEED = 5 def __init__(self, bzrc): self.bzrc = bzrc self.constants = self.bzrc.get_constants() random.seed(); mytanks, othertanks, flags, shots = self.bzrc.get_lots_o_stuff() self.new_angles = [] self.running_time = [] for tank in mytanks: self.new_angles.append(self.normalize_angle(tank.angle - math.pi/3)) self.running_time.append(random.uniform(3,8)) self.shooting_time = random.uniform(1.5,2.5) self.commands = [] def tick(self, time_diff): """Some time has passed; decide what to do next.""" mytanks, othertanks, flags, shots = self.bzrc.get_lots_o_stuff() obstacles = self.bzrc.get_obstacles() self.mytanks = mytanks self.othertanks = othertanks self.flags = flags self.shots = shots self.enemies = [tank for tank in othertanks if tank.color != self.constants['team']] self.friendlies = [tank for tank in othertanks if tank.color == self.constants['team']] self.obstacles = obstacles self.commands = [] shoot = False if self.shooting_time > 0 : self.shooting_time = self.shooting_time - time_diff else: shoot = True self.shooting_time = random.uniform(1.5,2.5) i = 0 "we need a new speed, a new angle, and whether or not to shoot" for tank in mytanks: speed = 0 angle = 0 if self.running_time[i] > 0: self.running_time[i] = self.running_time[i] - time_diff speed = self.FLAG_MAX_SPEED angle = 0 else: if self.new_angles[i]+.001 > tank.angle and self.new_angles[i]-.001 < tank.angle: self.running_time[i] = random.uniform(3,8) self.new_angles[i] = self.normalize_angle(tank.angle - math.pi/3) else: if self.new_angles[i] > 0: if tank.angle - self.new_angles[i] > 1: angle = -1 else: angle = -(tank.angle-self.new_angles[i]) elif self.new_angles[i] < 0: if self.new_angles[i] - tank.angle < -1: angle = -1 else: angle = self.new_angles[i] - tank.angle command = Command(tank.index, speed, angle, shoot) self.commands.append(command) i = i + 1 results = self.bzrc.do_commands(self.commands) def normalize_angle(self, angle): """Make any angle be between +/- pi.""" angle -= 2 * math.pi * int (angle / (2 * math.pi)) if angle <= -math.pi: angle += 2 * math.pi elif angle > math.pi: angle -= 2 * math.pi return angle def get_desired_movement(self, tank, flags, shots, obstacles): final_angle = 0 final_speed = 0 vectors = [] vectors.extend(self.get_repulsive_vectors(tank, shots)) vectors.extend(self.get_attractive_vectors(tank, flags)) vectors.extend(self.get_tangential_vectors(tank, obstacles)) for speed, angle in vectors: final_speed += speed final_angle += angle return final_speed, final_angle def main(): # Process CLI arguments. try: execname, host, port = sys.argv except ValueError: execname = sys.argv[0] print >>sys.stderr, '%s: incorrect number of arguments' % execname print >>sys.stderr, 'usage: %s hostname port' % sys.argv[0] sys.exit(-1) # Connect. #bzrc = BZRC(host, int(port), debug=True) bzrc = BZRC(host, int(port)) agent = dumb_agent(bzrc) prev_time = time.time() # Run the agent try: while True: time_diff = time.time() - prev_time prev_time = prev_time + time_diff #print >> sys.stderr, 'time dif %f' % time_diff agent.tick(time_diff) except KeyboardInterrupt: print "Exiting due to keyboard interrupt." bzrc.close() if __name__ == '__main__': main() # vim: et sw=4 sts=4
bweaver2/bzrFlag
bzagents/dumb_agent.py
Python
gpl-3.0
5,712
# -*- coding: utf-8 -*- from ..base import ComponentAPI class CollectionsJOB(object): """Collections of JOB APIS""" def __init__(self, client): self.client = client self.execute_task = ComponentAPI( client=self.client, method='POST', path='/api/c/compapi/job/execute_task/', description=u'根据作业模板ID启动作业', ) self.fast_execute_script = ComponentAPI( client=self.client, method='POST', path='/api/c/compapi/job/fast_execute_script/', description=u'快速执行脚本', ) self.fast_push_file = ComponentAPI( client=self.client, method='POST', path='/api/c/compapi/job/fast_push_file/', description=u'快速分发文件', ) self.get_agent_status = ComponentAPI( client=self.client, method='POST', path='/api/c/compapi/job/get_agent_status/', description=u'查询Agent状态', ) self.get_task_detail = ComponentAPI( client=self.client, method='GET', path='/api/c/compapi/job/get_task_detail/', description=u'查询作业模板详情', ) self.get_task_ip_log = ComponentAPI( client=self.client, method='GET', path='/api/c/compapi/job/get_task_ip_log/', description=u'根据作业实例ID查询作业执行日志', ) self.get_task_result = ComponentAPI( client=self.client, method='GET', path='/api/c/compapi/job/get_task_result/', description=u'根据作业实例 ID 查询作业执行状态', )
tjm-1990/blueking
blueking/component/apis/job.py
Python
gpl-3.0
1,608
from datetime import datetime from django.contrib.auth.models import User from django.db import models from uccaApp.models import Layers, Users, Constants class Projects(models.Model): id = models.AutoField(primary_key=True) name = models.CharField(max_length=50) description = models.CharField(max_length=Constants.DESCRIPTION_MAX_LENGTH) tooltip = models.CharField(max_length=Constants.TOOLTIPS_MAX_LENGTH) layer = models.ForeignKey(Layers,null=False,blank=False,db_column="layer_id", on_delete=models.PROTECT, default='') created_by = models.ForeignKey(User, null=True, blank=True) is_active = models.BooleanField(default=True) created_at = models.DateTimeField(default=datetime.now, blank=True) updated_at = models.DateTimeField(auto_now=True, blank=True) class Meta: db_table = "projects"
omriabnd/UCCA-App
Server/uccaApp/models/Projects.py
Python
gpl-3.0
847
import os import webapp2 from app import routes webapp2_config = {'webapp2_extras.sessions': {'secret_key': 'hfgskahjfgd736987qygukr3279rtigu', 'webapp2_extras.jinja2': {'template_path': os.path.join(os.path.abspath(os.path.dirname(__file__)), 'templates')}}} application = webapp2.WSGIApplication(debug=True, config=webapp2_config) routes.add_routes(application)
Terhands/saskdance
app/main.py
Python
gpl-3.0
373
# -*- coding: utf-8 -*- # Generated by Django 1.10.3 on 2016-11-29 10:44 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('main', '0004_auto_20161129_0947'), ] operations = [ migrations.AlterField( model_name='pokemon', name='qr_code', field=models.TextField(default=''), ), migrations.AlterField( model_name='pokemon', name='qr_code_image', field=models.ImageField(blank=True, null=True, upload_to='qr'), ), ]
petersterling1/poke-qr-viewer
main/migrations/0005_auto_20161129_1044.py
Python
gpl-3.0
633
# -*- coding: utf-8 -*- """ HipparchiaBuilder: compile a database of Greek and Latin texts Copyright: E Gunderson 2016-21 License: GNU GENERAL PUBLIC LICENSE 3 (see LICENSE in the top level directory of the distribution) """ import re from string import punctuation try: from builder.dbinteraction.connection import setconnection except: setconnection = None from builder.parsers.betacodeandunicodeinterconversion import cleanaccentsandvj from builder.parsers.transliteration import stripaccents euripides = { "Al.": ("035", "Alcestis"), "Alc.": ("035", "Alcestis"), "Andr.": ("039", "Andromacha"), "Ba.": ("050", "Bacchae"), "Cyc.": ("034", "Cyclops"), "El.": ("042", "Electra"), "Epigr.": ("031", "Epigrammata"), "U1": ("022", "Epinicium in Alcibiadem (fragmenta)"), "Fr.": ("020", "Fragmenta"), "U3": ("033", "Fragmenta"), "U4": ("029", "Fragmenta"), "U5": ("025", "Fragmenta Alexandri"), "Antiop.": ("024", "Fragmenta Antiopes"), "Hyps.": ("026", "Fragmenta Hypsipyles"), "Oen.": ("030", "Fragmenta Oenei"), "Phaeth.": ("023", "Fragmenta Phaethontis"), "U6": ("032", "Fragmenta Phaethontis incertae sedis"), "U7": ("027", "Fragmenta Phrixei (P. Oxy. 34.2685)"), "U8": ("028", "Fragmenta fabulae incertae"), "U9": ("021", "Fragmenta papyracea"), "Hec.": ("040", "Hecuba"), "Hel.": ("047", "Helena"), "Heracl.": ("037", "Heraclidae"), "HF": ("043", "Hercules"), "Hipp.": ("038", "Hippolytus"), "Ion": ("046", "Ion"), "IA": ("051", "Iphigenia Aulidensis"), "IT": ("045", "Iphigenia Taurica"), "Med.": ("036", "Medea"), "Or.": ("049", "Orestes"), "Ph.": ("048", "Phoenisae"), "Rh.": ("052", "Rhesus"), "Supp.": ("041", "Supplices"), "Tr.": ("044", "Troiades") } neposstrings = { 'Ages.': 'Ag', 'Alcib.': 'Alc', 'Arist.': 'Ar', # 'Att', 'Cat.': 'Ca', 'Cato,': 'Ca', 'Cato': 'Ca', 'Chab.': 'Cha', 'Chabr.': 'Cha', 'Chabr,': 'Cha', # 'Cim', # 'Con', 'Datam.': 'Dat', 'Dion.': 'Di', 'Dion,': 'Di', 'Epam.': 'Ep', 'Eun.': 'Eum', 'Eun,': 'Eum', 'Hamilc.': 'Ham', 'Hann.': 'Han', 'Hannib.': 'Han', 'Iphicr.': 'Iph', 'Iphic.': 'Iph', 'Lyt.': 'Lys', 'Mil.': 'Milt', # 'Paus', 'Pelop.': 'Pel', 'Ph.': 'Phoc', 'Regg.': 'Reg', # 'Them', 'Thras.': 'Thr', 'Tim.': 'Timol', # 'Timoth' } neposnumbers = { '002': 'Them', '004': 'Paus', '005': 'Cim', '006': 'Lys', '007': 'Alc', '009': 'Con', '011': 'Iph', '013': 'Timoth', '014': 'Dat', '015': 'Ep', '016': 'Pel', '017': 'Ag', '018': 'Eum', '019': 'Phoc', '020': 'Timol', '021': 'Reg', '022': 'Ham', '023': 'Han', '025': 'Att', } seneca = { 'ad Helv.': ('1017', '012:12:'), 'ad Marc.': ('1017', '012:6:'), 'ad Polyb.': ('1017', '012:11:'), 'Agm.': ('1017', '007:'), 'Agam.': ('1017', '007:'), 'Apoc.': ('1017', '011:'), 'Apocol.': ('1017', '011:'), 'Apocol.p.': ('1017', '011:'), 'Ben.': ('1017', '013:'), 'Benef.': ('1017', '013:'), 'Brev. Vit.': ('1017', '012:10:'), 'Clem.': ('1017', '014:'), 'de Clem.': ('1017', '014:'), 'de Prov.': ('1017', '012:1:'), 'Ep': ('1017', '015:'), 'Ep.': ('1017', '015:'), 'Consol. ad Marc.': ('1017', '012:6:'), 'Cons. ad Marc.': ('1017', '012:6:'), 'Cons. ad Helv.': ('1017', '012:12:'), 'Cons. ad Polyb.': ('1017', '012:11:'), 'Cons. Helv.': ('1017', '012:12:'), 'Cons. Marc.': ('1017', '012:6:'), 'Cons. Polyb.': ('1017', '012:11:'), 'Const.': ('1017', '012:2:'), 'Const. Sap.': ('1017', '012:2:'), 'Contr': ('1014', '001:'), # the father... 'Contr.': ('1014', '001:'), # the father... 'Controv.': ('1014', '001:'), # the father... 'Exc. Contr.': ('1014', '002:'), # the father... 'Exc. Controv.': ('1014', '002:'), # the father... 'Excerpt. Contr.': ('1014', '002:'), # the father... 'Excerpt. Controv.': ('1014', '002:'), # the father... 'Helv.': ('1017', '012:12:'), 'Herc. Fur.': ('1017', '001:'), 'Herc Oet.': ('1017', '009:'), 'Herc. Oet.': ('1017', '009:'), 'Hipp.': ('1017', '005:'), 'Hippol.': ('1017', '005:'), 'Ira,': ('1017', '012:3'), # note that we just sent I, II, and III to the same place... 'Lud. Mort. Claud.': ('1017', '011:'), 'Med.': ('1017', '004:'), 'Mort. Claud.': ('1017', '011:'), 'N. Q.': ('1017', '016:'), 'Oct.': ('1017', '010:'), 'Octav.': ('1017', '010:'), 'Ot. Sap.': ('1017', '012:8:'), 'Oed.': ('1017', '006:'), 'Oedip': ('1017', '006:'), 'Oedip.': ('1017', '006:'), 'Oet.': ('1017', '009:'), # 'Orest.': ('1017', '006:'), # ?! 'Phaedr.': ('1017', '005:'), 'Phoen.': ('1017', '003:'), 'Polyb.': ('1017', '012:11:'), 'Prov': ('1017', '012:1:'), 'Prov.': ('1017', '012:1:'), 'Q. N.': ('1017', '016:'), 'Suas.': ('1014', '003:'), # the father... 'Thyest.': ('1017', '008:'), 'Tranq.': ('1017', '012:9:'), 'Tranq. An.': ('1017', '012:9:'), 'Tranq. Vit.': ('1017', '012:9:'), 'Troad.': ('1017', '002'), 'Vit. B.': ('1017', '012:7:'), 'Vit. Beat.': ('1017', '012:7:'), } """ lingering issue: lt1017w012 | Dialogi de ira1: 3 de ira2: 4 de ira3: 5 """ sallust = { 'C.': '001', 'Cat.': '001', 'J': '002', 'J.': '002', 'Jug.': '002', 'H.': '003', 'Hist.': '003' } suetonius = { 'aug.': 'Aug', 'cal.': 'Cal', 'cl.': 'Cl', 'dom.': 'Dom', 'gal.': 'Gal', 'jul.': 'Jul', 'nero': 'Nero', 'otho': 'Otho', 'tib.': 'Tib', 'tit.': 'Tit', 'vesp.': 'Ves', # the one that breaks the pattern 'vit.': 'Vit', } """ GREEK """ def perseusworkmappingfixer(entrytext: str) -> str: """ some perseus references are broken; attempt to fix them turn something like Perseus:abo:tlg,0006,008:2 into Perseus:abo:tlg,0006,041:2 :param entrytext: :return: """ thumbprint = re.compile(r'<bibl n="Perseus:abo:tlg,(....),(...):.*?<title>(.*?)</title>.*?</bibl>') fixentry = re.sub(thumbprint, conditionalworkidswapper, entrytext) return fixentry def conditionalworkidswapper(match): """ sample match[0] <bibl n="Perseus:abo:tlg,0011,007:897" default="NO" valid="yes"><author>S.</author> <title>OC</title> 897</bibl> match[1] 0011 match[2] 007 match[3] OC :param match: :return: """ greekauthorstofix = {'0006': euripides} newtext = match[0] entriesfound = re.findall(r'<bibl.*?</bibl>', newtext) if len(entriesfound) > 1: # fix the last entry tailtext = entriesfound[-1] # funky entries like "0033,004:4(3).72" can recuse infinitely: need re.escape headtext = re.sub(re.escape(tailtext), str(), newtext) thumbprint = re.compile(r'<bibl n="Perseus:abo:tlg,(....),(...):.*?<title>(.*?)</title>.*?</bibl>') tailtext = re.sub(thumbprint, conditionalworkidswapper, tailtext) # ok, but what about fixing the other entries that remain... # look out for a RecursionError if you miscode: not currently present after a fix to 'thumbprint' headtext = re.sub(thumbprint, conditionalworkidswapper, headtext) newtext = headtext + tailtext return newtext if match[1] in greekauthorstofix: works = greekauthorstofix[match[1]] if match[3] in works: try: item = works[match[3]] newtext = re.sub(r'tlg,(....),(...)', r'tlg,\1,'+item[0], match[0]) except KeyError: # print('keyerror for' + match[3]) pass else: # print(match[3] + ' not in works of ' + match[1]) pass # print(' in:', match[0]) # print('out:', newtext) return newtext """ ROMAN DRAMA """ def latindramacitationformatconverter(entrytext: str, dbconnection=None) -> str: """ plautus is cited by act, etc vs by line attempt to fix this... :param entrytext: :return: """ needcleanup = False if not dbconnection: needcleanup = True dbconnection = setconnection() dbcursor = dbconnection.cursor() # dbconnection.setautocommit() authorstofix = {'phi,0119': 'Plautus', 'phi,0134': 'Terence'} citationfinder = re.compile(r'(<cit>.*?</cit>)') punct = re.compile('[{s}]'.format(s=re.escape(punctuation))) locusfinder = re.compile(r'</author>\s(.*?)\s(.*?)</bibl></cit>') quotefinder = re.compile(r'<quote lang="la">(.*?)</quote>') querytemplate = """ SELECT level_00_value FROM {t} WHERE wkuniversalid = %s and stripped_line ~* %s """ citations = re.findall(citationfinder, entrytext) for au in authorstofix: adb = re.sub(r'phi,', 'lt', au) targetcitation = re.compile(r'<bibl n="Perseus:abo:{a},(...):(.*?)"[^>]*?>'.format(a=au)) citationswap = re.compile(r'(<bibl n="Perseus:abo:{a},...:)(.*?)("[^>]*?>)'.format(a=au)) for c in citations: t = re.search(targetcitation, c) if t: q = re.search(quotefinder, c) if q: lineval = None hit = None quote = cleanaccentsandvj(stripaccents(q[1].lower())) quote = re.sub(punct, str(), quote) wkid = t[1] # loc = t[2] trialnumber = 0 for direction in ['reverse', 'forward', 'ends']: seeking = quote while seeking and not hit: trialnumber += 1 hit = lookforquote(adb, wkid, seeking, querytemplate, dbcursor) if not hit: seeking = shinkquote(seeking, direction) if hit: lineval = hit[0] # print('success on try #', trialnumber) # "success on try # 6" ! if lineval: newcitation = re.sub(locusfinder, r'</author> \1 {lv}</bibl></cit>'.format(lv=lineval), c) newcitation = re.sub(citationswap, r'\1ZZZ" class="rewritten\3', newcitation) newcitation = re.sub('ZZZ', lineval, newcitation) c = re.escape(c) try: entrytext = re.sub(c, newcitation, entrytext) except re.error: # re.error: bad escape \s at position 88 pass if needcleanup: dbconnection.connectioncleanup() return entrytext def lookforquote(adb: str, wkid: str, quote: str, querytemplate: str, dbcursor) -> tuple: """ the search proper :param adb: :param wkid: :param quote: :param querytemplate: :param dbcursor: :return: """ data = ('{a}w{w}'.format(a=adb, w=wkid), quote) # print(querytemplate.format(t=adb), data) dbcursor.execute(querytemplate.format(t=adb), data) hit = dbcursor.fetchone() return hit def shinkquote(quote: str, direction: str) -> str: """ sometimes quotes span lines; this hides them from us :param quote: :return: """ minimal = 2 newquote = str() qs = quote.split(' ') if len(qs) > minimal and direction == 'reverse': # newquote = ' '.join(qs[1:-1]) newquote = ' '.join(qs[:-1]) elif len(qs) > minimal and direction == 'forward': newquote = ' '.join(qs[1:]) elif len(qs) > minimal and direction == 'ends': newquote = ' '.join(qs[-1:1]) return newquote """ MISC LATIN FIXES """ def oneofflatinworkremapping(entrytext: str) -> str: """ hand off some oddballs :param entrytext: :return: """ fixers = [fixciceroverrinesections, fixciceromiscsections, fixfrontinus, fixmartial, fixnepos, fixpropertius, fixseneca, fixsallust, fixsuetonius, fixvarro] fixedentry = entrytext for f in fixers: fixedentry = f(fixedentry) return fixedentry def fixciceroverrinesections(entrytext: str) -> str: """ this sort of thing is not helpful <bibl n="Perseus:abo:phi,0474,005:5:21:section=53" default="NO" valid="yes"><author>id.</author> Verr. 2, 5, 21, § 53</bibl> "Perseus:abo:phi,0474,005:5:21:section=53" alternate version.... <bibl n="Perseus:abo:phi,0474,005:13:37" default="NO" valid="yes"><author>Cic.</author> Verr. 1, 13, 37</bibl> note that V 1.13.37 should be 1.1.13.37 insteat (and that is still wrong because of the '13') :param entrytext: :return: """ findsection = re.compile(r'<bibl n="Perseus:abo:phi,0474,005:(.*?:)section=(.*?)" (.*?)><author>(.*?)</author> Verr\. (.), (.*?)</bibl>') altfind = re.compile(r'<bibl n="Perseus:abo:phi,0474,005:(.*?:)(.*?)" (.*?)><author>(.*?)</author> Verr\. (.), (.*?)</bibl>') # x = re.findall(findsection, entrytext) # if x: # print(x[0]) newentry = re.sub(findsection, ciceroverrinehelper, entrytext) newentry = re.sub(altfind, ciceroverrinehelper, newentry) return newentry def ciceroverrinehelper(regexmatch) -> str: """ need to assign the right book to the citation the also contains the 'chapter' which we do not use in: <bibl n="Perseus:abo:phi,0474,005:5:21:section=53" default="NO" valid="yes"><author>id.</author> Verr. 2, 5, 21, § 53</bibl> you get re.findall(findsection, a) [('5:21:', '53', 'default="NO" valid="yes"', 'id.', '2', '5, 21, § 53')] :param regexmatch: :return: """ returntext = regexmatch.group(0) passage = regexmatch.group(1) section = regexmatch.group(2) tail = regexmatch.group(3) auth = regexmatch.group(4) vbook = regexmatch.group(5) vcit = regexmatch.group(6) bb = vbook if vbook == '1': vbook = '1:1' bb = '1, 1' if len(passage.split(':')) > 1: passage = passage.split(':')[0] passage = passage + ':' verrinetemplate = '<bibl n="Perseus:abo:phi,0474,005:{b}:{p}{s}" {t} class="rewritten"><author>{a}</author> Verr. {bb}, {c}</bibl>' newentry = verrinetemplate.format(p=passage, b=vbook, s=section, t=tail, a=auth, bb=bb, c=vcit) return newentry def fixciceromiscsections(entrytext: str) -> str: """ RUN THE VERRINES FIRST (because it needs 'section=') <quote lang="la">omnes de tuā virtute commemorant,</quote> <bibl n="Perseus:abo:phi,0474,058:1:1:13:section=37" default="NO" valid="yes"><author>Cic.</author> Q. Fr. 1, 1, 13, § 37</bibl> this should just be 1.1.37 the problem is confined almost exclusively to lt0474w058 a couple of items will remain in lt0474w005 (Verr.) [these got broken and will remain broken?] a couple in lt0474w047 (Parad. Stoic.) :param entrytext: :return: """ sectionfinder = re.compile(r'"Perseus:abo:phi,0474,(...):([^"]*?):section=(.*?)"') newentry = re.sub(sectionfinder, ciceromiscsectionhelper, entrytext) return newentry def ciceromiscsectionhelper(regexmatch) -> str: """ do the heavy lifting for fixciceromiscsections() :param regexmatch: :return: """ returntext = regexmatch.group(0) thework = regexmatch.group(1) passage = regexmatch.group(2) section = regexmatch.group(3) ps = passage.split(':') if len(ps) > 1: ps = ps[:-1] passage = ':'.join(ps) cicsectiontemplate = '"Perseus:abo:phi,0474,{w}:{p}:{s}"' newentry = cicsectiontemplate.format(w=thework, p=passage, s=section) #print('{a}\t{b}\t{c}'.format(a=newentry, b=passage, c=returntext)) return newentry def fixcicerochapters(entrytext: str, disabled=True) -> str: """ this sort of thing is not helpful <bibl "Perseus:abo:phi,0474,015:chapter=19" default="NO" valid="yes"><author>Cic.</author> Sull. 19 <hi rend="ital">fin.</hi></bibl> n="Perseus:abo:phi,0474,015:chapter=19" "chapter=NN" is only Cicero issue the example chosen sends you to Pro Sulla CHAPTER 19 to look for 'sententia', but that word appears in SECTIONS 55, 60, and 63... the code below will rewrite to give you a valid reference, but it will send you to the wrong place... CURRENTLY DISABLED RETAIN THE NOTES... :param entrytext: :return: """ if disabled: return entrytext findchapter = re.compile(r'"Perseus:abo:phi,0474,(...):chapter=(.*?)"') # x = re.findall(findchapter, entrytext) # if x: # print(x) newentry = re.sub(findchapter, r'"Perseus:abo:phi,0474,\1:\2" class="rewritten"', entrytext) return newentry def fixfrontinus(entrytext: str) -> str: """ n="Perseus:abo:phi,1245,001:Aquaed. 104" but Aq. is 002 :param entrytext: :return: """ findaquad = re.compile(r'"Perseus:abo:phi,1245,001:Aquaed\.(.*?)"') newentry = re.sub(findaquad, r'"Perseus:abo:phi,1245,002:\1" class="rewritten"', entrytext) return newentry def fixmartial(entrytext: str) -> str: """ all of martial has been assigned to work 001 :param entrytext: :return: """ findmartial = re.compile(r'"Perseus:abo:phi,1294,001:(.*?)"') findspectacles = re.compile(r'"Perseus:abo:phi,1294,002:(Spect. )(.*?)"') newentry = re.sub(findmartial, r'"Perseus:abo:phi,1294,002:\1" class="rewritten"', entrytext) newentry = re.sub(findspectacles, r'"Perseus:abo:phi,1294,001:\2"', newentry) return newentry def fixnepos(entrytext: str) -> str: """ like many others the work is a string and not a number <bibl n="Perseus:abo:phi,0588,001:Arist. 1:4" default="NO" valid="yes"><author>Nep.</author> Arist. 1, 4</bibl> but there is another problem: crazy worknumbers <bibl n="Perseus:abo:phi,0588,021:2:2" default="NO" valid="yes"><author>Nep.</author> Reg. 2, 2</bibl> :param entrytext: :return: """ findnepos = re.compile(r'<bibl n="Perseus:abo:phi,0588,(...):(.*?)\s(.*?)"(.*?)><author>(.*?)</author>\s(.*?)\s(.*?)</bibl>') newentry = re.sub(findnepos, neposhelper, entrytext) return newentry def neposhelper(regexmatch) -> str: """ use dict to substitute :param regexmatch: :return: """ returntext = regexmatch.group(0) work = regexmatch.group(1) life = regexmatch.group(2) pasg = regexmatch.group(3) tail = regexmatch.group(4) auth = regexmatch.group(5) wnam = regexmatch.group(6) wloc = regexmatch.group(7) nepostemplate = '<bibl n="Perseus:abo:phi,0588,{work}:{life}:{loc}" class="rewritten" {tail}><author>{au}</author> {wn} {ll}</bibl>' if work == '001': # the work is a string and not a number try: knownsubstitute = neposstrings[life] except KeyError: # print('unk nepos: "{w}"'.format(w=work)) return returntext else: # crazy worknumbers # this also means you need to adjust the way '{life}:{loc}' looks too by adding the life name up front: 'Att:...' try: knownsubstitute = neposnumbers[work] except KeyError: # print('unk neposnumber: "{w}"\t{wn}'.format(w=work, wn=wnam)) return returntext knownsubstitute = '{ks}:{v}'.format(ks=knownsubstitute, v=life) newentry = nepostemplate.format(work='001', life=knownsubstitute, loc=pasg, tail=tail, au=auth, wn=wnam, ll=wloc) # if work != '001': # print(newentry) return newentry def fixpropertius(entrytext: str) -> str: """ <bibl n="Perseus:abo:phi,1224,001:1:8:29" default="NO"><author>Prop.</author> 1, 8, 29</bibl> but propertius is lt0620 :param entrytext: :return: """ findprop = re.compile(r'("Perseus:abo:phi),1224,(.*?" default="NO")(><author>Prop\.</author>)') newentry = re.sub(findprop, r'\1,0620,\2 class="rewritten"\3', entrytext) return newentry def fixsallust(entrytext: str) -> str: """ n="Perseus:abo:phi,0631,001:J. 62:8" --> jugurtha :param entrytext: :return: """ findsallust = re.compile(r'"Perseus:abo:phi,0631,001:(.*?)\s(.*?)"') newentry = re.sub(findsallust, sallusthelper, entrytext) return newentry def sallusthelper(regexmatch) -> str: """ work some substitution magic on the sallust match the key work is done by the seneca dict() above :param regexmatch: :return: """ returntext = regexmatch.group(0) work = regexmatch.group(1).strip() pasg = regexmatch.group(2) sallusttemplate = '"Perseus:abo:phi,0631,{wk}:{loc}" class="rewritten"' try: knownsubstitute = sallust[work] except KeyError: # print('unk sallust: "{w}"'.format(w=work)) return returntext newentry = sallusttemplate.format(wk=knownsubstitute, loc=pasg) return newentry def fixseneca(entrytext: str) -> str: """ plenty of problems given that StE and StY are at times confused...: 1014 vs 1017 bad: "Perseus:abo:phi,1014,001:Ep. 29"; EM is 1017,015 "Perseus:abo:phi,1014,001:Tranq. 15"; TQ is 1017,012:9:15 "Perseus:abo:phi,1014,001:Contr. 1 praef"; de Ira 1 vs 2 vs 3 remains a problem 'ib.' remains a problem :param entrytext: :return: """ findem = re.compile(r'"Perseus:abo:phi,1014,001:(.*?)(\d.*?)"') newentry = re.sub(findem, senecahelper, entrytext) return newentry def senecahelper(regexmatch) -> str: """ work some substitution magic on the seneca match the key work is done by the seneca dict() above :param regexmatch: :return: """ returntext = regexmatch.group(0) work = regexmatch.group(1).strip() pasg = regexmatch.group(2) senecatemplate = '"Perseus:abo:phi,{au},{wk}{loc}" class="rewritten"' try: knownsubstitute = seneca[work] except KeyError: # print('unk seneca: "{w}"'.format(w=work)) return returntext newentry = senecatemplate.format(au=knownsubstitute[0], wk=knownsubstitute[1], loc=pasg) return newentry def fixsuetonius(entrytext: str) -> str: """ "Perseus:abo:phi,1348,001:life=aug.:82" --> "Perseus:abo:phi,1348,001:Aug.:82" :param entrytext: :return: """ findlife = re.compile(r'"Perseus:abo:phi,1348,001:life=(.*?)(:.*?)"') newentry = re.sub(findlife, suetoniushelper, entrytext) return newentry def suetoniushelper(regexmatch): """ substitution magic via the suetonius dict() :param regexmatch: :return: """ returntext = regexmatch.group(0) work = regexmatch.group(1).strip() pasg = regexmatch.group(2) suetoniustemplate = '"Perseus:abo:phi,1348,001:{wk}{loc}" class="rewritten"' try: knownsubstitute = suetonius[work] except KeyError: # print('unk suetonius: "{w}"'.format(w=work)) return returntext newentry = suetoniustemplate.format(wk=knownsubstitute, loc=pasg) return newentry def fixvarro(entrytext: str) -> str: """ the DLL citations are wrong: "Perseus:abo:phi,0684,001:L. L. 5:section=59" RR too: "Perseus:abo:phi,0684,001:R. R. 1:32:2" lingering issue: 'ibidem' "Perseus:abo:phi,0684,001:ib. 3:15:2" lingering issue: DNE "Perseus:abo:phi,0684,001:Sent. Mor. p. 28" "Perseus:abo:phi,0684,001:Fragm. p. 241" lingering issue: guaranteed mishit "Perseus:abo:phi,0684,001:Sat. Men. 95:10" lingering issue: might-be-right-might-not "Perseus:abo:phi,0684,001:1:22" [is wrong since 'vernum' is in 002 but not in 001] vernum,</quote> <bibl n="Perseus:abo:phi,0684,001:33:3" :param entrytext: :return: """ finddll = re.compile(r'"(Perseus:abo:phi,0684,001:)(L. L. )(.*?):(section=)(.*?)"') findrr = re.compile(r'"(Perseus:abo:phi,0684,)(001):(R. R. )(.*?)"') findmenn = re.compile(r'"(Perseus:abo:phi,0684,001:Sat. Menip\. )(.*?)"') newentry = re.sub(finddll, r'"\1\3:\5" class="rewritten"', entrytext) newentry = re.sub(findrr, r'"Perseus:abo:phi,0684,002:\4" class="rewritten"', newentry) newentry = re.sub(findmenn, r'"Perseus:abo:phi,0684,011:\2" class="rewritten"', newentry) return newentry
e-gun/HipparchiaBuilder
builder/lexica/repairperseuscitations.py
Python
gpl-3.0
22,053
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2011, De Verkeersonderneming <rits@verkeersonderneming.nl> # # This file is part of PyRITS - A tool for processing and analyzing transport # management system data. # # PyRITS is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PyRITS is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """Calculate driving times and driving delays for database Erniesoft. The following type codes are used in the driving times algorithm. The description of each type code is described in the table below:: typecode | description ----------+--------------------------------------------------- 0 | Basic Sign Of Life 1 | User Login 2 | User Logout 3 | Driver Switch 10 | Activity Started 11 | Activity Cancelled 12 | Activity Report 13 | Activity End 14 | Activity Join 15 | Activity Leave 16 | Activity Switch 17 | PTO alarm 18 | PTO start 19 | PTO end 20 | Task Received 21 | Task Accepted 22 | Task Refused 23 | Task Busy 24 | Task Cancelled 25 | Task Finished 30 | Outgoing Phone Started 31 | Incoming Phone Started 32 | Outgoing Phone Finished 33 | Incoming Phone Finished 40 | Navigation Started 41 | Navigation Cancelled 42 | Navigation ETA update 43 | Navigation Destination Reached 50 | Start of Speed Limit Violation 51 | Start of Peak RPM violation 52 | Start of Average RPM violation 53 | Acceleration Limit Violation 54 | Acceleration Limit Violation End 55 | Engine Idle Violation 56 | Enter Geofence 57 | Exit Geofence 58 | Panic Alert 59 | Maximum Activity Duration Exceeded 60 | End of Speed Limit Violation 61 | End of Peak RPM Lmit Violation 62 | End of Average RPM Limit Violation 65 | Update of Speed Limit Violation 66 | Update of Peak RPM Limit Violation 67 | Update of Average RPM Limit Violation 71 | Contact ON 72 | Contact OFF 73 | System Shutdown 75 | Violation of Engine Lock 76 | Motion Alert (Vehicle Moves while contact is off) 77 | Vehicle Immobilization 78 | Power Disconnect (Run on Battery) 79 | Battery Low 82 | Driving Times State Event 83 | Driving Times Driving Warning 84 | Driving Times Driving Violation 85 | Driving Times Total Driving Warning 90 | Temperature Sensor Violation 91 | Temperature Sensor Violation Reminder 92 | Coolant Temperature Violation 93 | Coolant Temperature Violation Reminder 97 | Wrong Trailer Tethered 98 | Trailer Tethered 99 | Trailer Untethered 200 | GPRS Status Info """ import sys import os import math import datetime import logging import csv import psycopg2.extras import pyrits.config import pyrits.std import pyrits.HTML import pyrits.erniesoft.query __author__ = "Serrano Pereira" __copyright__ = "Copyright 2011, De Verkeersonderneming" __credits__ = ["Serrano Pereira <serrano.pereira@gmail.com>"] __license__ = "GPL3" __version__ = "0.1.2" __maintainer__ = "Serrano Pereira" __email__ = "serrano.pereira@gmail.com" __status__ = "Production" __date__ = "2011/11/24" DEBUG = 0 class Standard(object): """Super class for :class:`DrivingDelays` and :class:`DrivingTimes`.""" def get_vehicles_from_date_range(self, date_start=None, date_end=None): """Return all vehicle codes that occur in a date range.""" cursor = self.connection.cursor() if not date_start: cursor.execute("""SELECT DISTINCT terminalcode FROM hist_bcacties WHERE terminalcode IS NOT NULL;""") else: cursor.execute("""SELECT DISTINCT terminalcode FROM hist_bcacties WHERE tijd BETWEEN '%s' AND '%s' AND terminalcode IS NOT NULL;""" % (date_start, date_end)) vehicle_codes = [] for t in cursor: vehicle_codes.append(t[0]) cursor.close() return vehicle_codes class DrivingDelays(Standard): """Driving delays calculator. It calculates the clean driving times, the planned driving delays, and the realized driving delays. """ def __init__(self, connection): self.connection = connection self.location_combinations = {} self.reset() def reset(self): """Set variables with default values.""" self.task_routes = {} self.task_delays = {} self.clean_driving_times = {} self.route_distances = {} self.static_tasks = [] def __set_static_tasks(self): """Set a variable with all static tasks id's. Static tasks are tasks for which the vehicle has not moved. The task id's for static tasks are saved to variable `self.static_tasks`. The static tasks are required for method :meth:`__set_clean_driving_times`. """ cursor = self.connection.cursor() cursor.execute("SELECT task_id FROM driving_times WHERE static = 't';") for task_id in cursor: self.static_tasks.append(task_id[0]) cursor.close() def __set_clean_driving_times(self, vehicle_code): """Calculate the clean driving times. In order to calculate the clean driving times, first the driving distance for each route is retrieved from the database. Than this distances and the average driving time for a given distance is used to calculate the clean driving time. If the clean driving time is already present in the database, it will not calculate the clean driving time. It will set the clean driving time for static tasks to 0. .. note:: Usage of method :meth:`__set_clean_driving_times_from_modelit` is favored over this method. This method was removed from the default routines. """ # First set the static tasks. self.__set_static_tasks() # This query lists the visited locations in the realized order. This # is used to determine the route for each task. cursor = self.connection.cursor() cursor.execute("""SELECT r.ritregelnr, r.locatiecode FROM tbl_ritregels AS r INNER JOIN hist_bcacties AS a ON a.elipsref = r.ritregelnr INNER JOIN task_to_vehicle as v ON v.task_id = r.ritregelnr WHERE v.vehicle_code='%s' GROUP BY r.ritregelnr,r.locatiecode ORDER BY MIN(a.tijd);""" % (vehicle_code)) # Define location A and location B for each task. location_a = None for task_id,location_b in cursor: # Convert location names consisting of spaces to an empty string. if location_b: location_b = location_b.strip() # Add a location tuple to the tuples list if both locations are set. if bool(location_a) and bool(location_b): tuple = [location_a,location_b] # By sorting the tuple, retrieval from the database will be # faster if to be retrieved tuples are sorted as well. tuple.sort() # Save the tuple. self.task_routes[task_id] = tuple # For the next loop location A is the current location. if location_b: location_a = location_b else: location_a = None # Set the route distance for each task from the database. for task_id, route in self.task_routes.iteritems(): cursor.execute(pyrits.erniesoft.query.sget('get_route_info', {':location_a': route[0], ':location_b': route[1]} )) tuple = cursor.fetchone() if tuple: driving_time, distance = tuple # Save clean driving time in seconds for each task. if driving_time != None: self.clean_driving_times[task_id] = driving_time # Save route distance in meters for each task. if distance != None: self.route_distances[task_id] = distance # Close database cursor. cursor.close() # Calculate the clean driving time for each task based on the route # distances. for task_id, distance in self.route_distances.iteritems(): # Set the clean driving time to 0 if it's a static task. if task_id in self.static_tasks: self.clean_driving_times[task_id] = 0 continue # Skip if the clean driving time is already known. if task_id in self.clean_driving_times: continue # Calculate the clean driving time from the distance. # Convert distance to kilimeters (from meters). distance = distance / 1000.0 # Calculate clean driving time in seconds (from hours). driving_time = pyrits.std.driving_time_from_distance(distance) * 3600.0 # Save the clean driving time for each task to a variable. self.clean_driving_times[task_id] = driving_time def __set_clean_driving_times_from_modelit(self): """Sets clean driving times and planned delays using Modelit data. This method replaces methods :ref:`__set_clean_driving_times` and :ref:`calculate_planned_delays`. In the Erniesoft database, two values are calculated by Modelit: * freeflowmin * delaymin Sometimes freeflowmin is set to 0, while delaymin has a positive value. This means that delaymin could not be calculated, and in this case the value for freeflowmin is set in field delaymin. Sometimes the value for delaymin is negative. The cause of this is unknown and such values should be ignored. This methods uses these two values to set `clean_driving_time` and `planned_delay` in table driving_times. """ cursor = self.connection.cursor() cursor2 = self.connection.cursor() # Set the static tasks. self.__set_static_tasks() # Get Modelit data from the database. cursor.execute("""SELECT r.ritregelnr, r.freeflowmin, r.delaymin FROM tbl_ritregels r WHERE r.freeflowmin IS NOT NULL -- It's unclear why some delays are negative, so skip these records. AND r.delaymin > 0;""") # Before saving clean driving times (minutes) and planned delays (minutes) # from Modelit to the driving_times table, some preprocessing needs # to be done. for task_id,freeflowmin,delaymin in cursor: # Sometimes freeflowmin is set to 0, while delaymin has a positive # value. This means that delaymin could not be calculated, and in # this case the value for freeflowmin is set in field delaymin. if freeflowmin == 0 and delaymin > 0: freeflowmin = delaymin delaymin = 'NULL' # Set the clean driving time to 0 if the vehicle hasn't moved. if task_id in self.static_tasks: freeflowmin = 0 # Convert minutes to seconds. if isinstance(freeflowmin, float): freeflowmin *= 60 if isinstance(delaymin, float): delaymin *= 60 # Save clean driving times (seconds) and planned delays (seconds) # to table driving_times. cursor2.execute("""UPDATE driving_times SET (clean_driving_time, planned_delay) = (%s, %s) WHERE task_id = %d;""" % (freeflowmin, delaymin, task_id)) self.connection.commit() def calculate_realized_delays(self, vehicle_code): """Calculate the realized driving delays. In order to calculate the realized delays, the clean driving times are defined as well and both are saved to the ``driving_times`` table in the database. .. note:: Usage of method :meth:`calculate_delays_using_modelit` is favored over this method. This method was removed from the default routines. """ # Reset all values. self.reset() # Only vehicles with a code that start with a "W" have a board computer # on board. Other vehicles lack the required information and are skipped. if not vehicle_code.startswith("W"): logging.info("Vehicle with code %s is skipped; codes must start with 'W'" % vehicle_code) return 1 # First define clean driving times which are required for calculating # the delays. logging.info("Calculating clean driving times for vehicle %s..." % vehicle_code) self.__set_clean_driving_times(vehicle_code) logging.info("Calculating realized delays for vehicle %s..." % vehicle_code) # Get database cursor. cursor = self.connection.cursor() cursor2 = self.connection.cursor() # Calculate the delay for each task and save the delay to the database. cursor.execute(pyrits.erniesoft.query.sget('driving_times_from_tasks', {':task_ids': str(self.task_routes.keys()).strip('[]')})) for task_id,real_time in cursor: # Get the clean driving time. if task_id not in self.clean_driving_times: continue clean_time = self.clean_driving_times[task_id] # Get the route distance. distance = self.route_distances[task_id] # Calculate the delay in seconds. delay = real_time - clean_time # If the realized delay is less than 3 minutes, set it to 0. if -180 < delay < 180: delay = 0 # Save delay to database. cursor2.execute("""UPDATE driving_times SET (clean_driving_time,route_distance,delay) = (%d,%d,%d) WHERE task_id=%d;""" % (clean_time,distance,delay,task_id)) # Commit the database transaction. self.connection.commit() # Close database cursor. cursor.close() cursor2.close() return 0 def calculate_delays_using_modelit(self): """Calculate the realized driving delays. In order to calculate the realized delays, the clean driving times (and the planned delays) are first defined with method :meth:`__set_clean_driving_times_from_modelit`. Then the realized delays are calculated and saved to the ``driving_times`` table in the database. """ # Reset all values. self.reset() # First define clean driving times which are required for calculating # the delays. logging.info("Setting clean driving times and planned delays...") self.__set_clean_driving_times_from_modelit() logging.info("Calculating realized delays...") # Get database cursor. cursor = self.connection.cursor() cursor2 = self.connection.cursor() # Calculate the delay for each task and save the delay to the database. cursor.execute("""SELECT task_id, clean_driving_time, driving_time FROM driving_times WHERE clean_driving_time IS NOT NULL AND driving_time IS NOT NULL;""") for task_id, clean_driving_time, driving_time in cursor: # Calculate the realized delay in seconds. delay = driving_time - clean_driving_time # If the realized delay is less than 3 minutes, set it to 0. if -180 < delay < 180: delay = 0 # Save delay to database. cursor2.execute("""UPDATE driving_times SET (delay) = (%d) WHERE task_id = %d;""" % (delay,task_id)) # Commit the database transaction. self.connection.commit() # Close database cursor. cursor.close() cursor2.close() # Finally, populate table driving_times_rides. logging.info("Setting driving times and delays per ride...") self.set_driving_times_rides() return 0 def set_driving_times_rides(self): """Populate table :ref:`driving_times_rides <db-erniesoft-driving_times_rides>`.""" cursor = self.connection.cursor() # Purge the table. cursor.execute("DELETE FROM driving_times_rides;") self.connection.commit() # Populate the table. query = """INSERT INTO driving_times_rides SELECT ride_id, MIN(task_start), MAX(task_end), SUM(clean_driving_time), SUM(driving_time), SUM(planned_delay), SUM(delay), BOOL_OR(overnight), BOOL_OR(static) FROM driving_times GROUP BY ride_id;""" cursor.execute(query) self.connection.commit() cursor.close() def calculate_planned_delays(self): """Calculate the planned driving delays. A task can be visualized as follows:: DR DL TT |-------------------------|------|----------| ETA ETA Previous task Current task DR = Driving time without delay* DL = Planned delay TT = Terminal time; estimated time for loading/unloading/etc.* ETA = Estimated Time of Arrival; these mark the estimated beginning and end of a task.* * Value is obtained from the database. The formula used for calculating the planned driving delay is:: DL = ETA(curr. task) - ETA(prev. task) - TT - DR The clean driving times that are required for this methods are obtained from the database. Hence it is required to run :meth:`calculate_realized_delays` before running this methods as that method saves the clean driving times to the database. .. note:: This method is no longer part of the default routine because method :meth:`__set_clean_driving_times_from_modelit` saves both the clean driving times and the planned delays to the database. """ # Reset all values. self.reset() logging.info("Calculating planned delays...") # Get database cursor. cursor = self.connection.cursor() # Get ride and order information, sorted by ride so that we can # perform calculations per ride. We need to list each task, hence the # "LEFT OUTER" joins. # Marco said that we could just sort by [ritlink,ETA], but this is not # reliable as the ETA's sometimes contain errors which would result in # wrongly ordered tasks and thus wrongly calculated planned delays. # 'r.volgorde' denotes the planned order, and that is what the ETA's # are based on. cursor.execute("""SELECT r.ritlink,r.ritregelnr,r.actiecode,r.locatiecode,r.eta,o.opdrachttype,c.defaultlengteuurexport,c.defaultlengteuurimport,t.clean_driving_time FROM tbl_ritregels AS r LEFT OUTER JOIN tbl_orders AS o ON r.orderlink = o.ordernr LEFT OUTER JOIN stm_actiecodes AS c ON c.actiecode = r.actiecode LEFT OUTER JOIN driving_times AS t ON t.task_id = r.ritregelnr ORDER BY r.ritlink, r.volgorde;""") # Calculate the planned delay for each ride. eta_prev = None previous_ride = None previous_task_str = None previous_location = None for ride_id,task_id,task_str,location,eta_current,task_type,term_time_export, \ term_time_import,clean_driving_time in cursor: # Check if we entered a new ride. If so, do not use the last ETA. if previous_ride != ride_id: eta_prev = None # Check for required values and calculate the delay. if eta_prev and eta_current and clean_driving_time and task_type in ("EXPORT","IMPORT"): # Sometimes, the current date precedes the previous date # because of incorrectly defined ETA's. Handle this by either # skipping the task or raising an error. if eta_prev > eta_current: #raise ValueError("The previous ETA (%s) does not precede the current ETA (%s) for task #%d." % (eta_prev,eta_current,task_id)) logging.info("Task #%d is skipped because the previous ETA does not precede the current ETA." % task_id) # Continue with the next task. eta_prev = eta_current previous_ride = ride_id previous_task_str = task_str previous_location = location continue # Calculate the time difference in seconds between the two ETA's. eta_delta = pyrits.std.total_seconds(eta_current - eta_prev) # There are some situation that we need to correct for if both # ETA's are the same. if eta_delta == 0: # If the ETA's are the same, this will for sure result in # a negative delay. So just set the delay to 0. self.task_delays[task_id] = 0 # Continue with the next task. eta_prev = eta_current previous_ride = ride_id previous_task_str = task_str previous_location = location continue # Calculate the planned terminal time in seconds. if task_type == "EXPORT": # Convert hours to seconds terminal_time = term_time_export * 3600.0 elif task_type == "IMPORT": # Convert hours to seconds terminal_time = term_time_import * 3600.0 # Calculate planned delay. delay = eta_delta - terminal_time - clean_driving_time # Debugging if DEBUG and task_id == 125229: print "%s - %s - %s - %s = %s" % (eta_current, eta_prev, terminal_time, clean_driving_time, delay) print "%s - %s - %s = %s" % (eta_delta/3600.0, terminal_time/3600.0, clean_driving_time/3600.0, delay/3600.0) # Save the delay. self.task_delays[task_id] = delay # Set variables for the next loop. eta_prev = eta_current previous_ride = ride_id previous_task_str = task_str previous_location = location # Calculate the planned delay for each task and save the delay to the # database. for task_id,delay in self.task_delays.iteritems(): # Save delay to database. cursor.execute("UPDATE driving_times SET (planned_delay) = (%d) WHERE task_id=%d;" % (delay,task_id)) # Commit the database transaction. self.connection.commit() # Close database cursor. cursor.close() return 0 class DrivingTimes(Standard): """realized driving times calculator.""" def __init__(self, connection): self.connection = connection self.datetime_format = "%Y-%m-%d %H:%M:%S" self.field_names = ('ritregelnr','terminalcode','type','tijd','speed', 'kmstand','startactie','actie','actiecode','sessionseq') self.reset() def reset(self): """Set variables with default values.""" self.in_queue = False self.data = None self.tasks = [] self.task_tuples = [] self.driving_tuples = [] self.driving_tuples_queue = [] self.driving_tuple = [None,None] self.driving_times = {} self.overnight_tasks = [] self.arrivals = [] self.static_tasks = [] def mark_value(self, value): """Returns a marked version of a variable. Marked variables are colored yellow in the HTML output. """ return "::"+str(value) def unmark_value(self, value): """Returns the unmarked version of a marked variable.""" if not isinstance(value, str) or not value.startswith('::'): return value value = value[2:] if len(value) == 19: try: value = datetime.datetime.strptime(value, self.datetime_format) except: pass return value def purge(self): """Purge the driving times table.""" cursor = self.connection.cursor() cursor.execute("DELETE FROM driving_times;") self.connection.commit() cursor.close() def calculate_vehicle(self, vehicle_code, date_start=None, date_end=None): """Run all methods necessary for calculating the driving times. Calculates the realized driving times for a specific vehicle. If the date range is also set, it will only calculate the driving times for that date range. """ # Reset all values. self.reset() # Create database cursor. cursor = self.connection.cursor(cursor_factory=psycopg2.extras.DictCursor) # Only vehicles with a code that start with a "W" have a board computer # on board. Other vehicles lack the required information and should be # skipped. if not vehicle_code.startswith("W"): logging.info("Vehicle with code %s is skipped; codes must start with 'W'" % vehicle_code) return 1 if not (date_start and date_end): logging.info("Vehicle: %s" % (vehicle_code)) else: logging.info("Date range: %s to %s; Vehicle: %s" % (date_start, date_end, vehicle_code)) logging.info("Obtaining data...") # Select the right query and get all ride actions from the database. if not (date_start and date_end): query = pyrits.erniesoft.query.sget('actions_from_vehicle', {':vehicle_code': vehicle_code}) else: query = pyrits.erniesoft.query.sget('actions_from_date_range', {':date_start': date_start, ':date_end': date_end, ':vehicle_code': vehicle_code}) cursor.execute(query) logging.info("Processing data...") # Load all data into memory. self.data = pyrits.std.DataDict(cursor, self.field_names) cursor.close() # Check if any traces were found. If not, abort. if len(self.data) == 0: logging.info("No ride actions found. Nothing to do.") return 1 logging.info("Calculating driving times...") # Define tasks. self.set_tasks(self.data) # Check if any tasks were found. Sometimes we do find actions, but # they are not linked to any task. if len(self.tasks) == 0: logging.info("No tasks found. Nothing to do.") return 1 # Set the driving tuples. self.set_driving_tuples(self.data) # Set task tuples. self.set_task_tuples(self.data, self.tasks) # Calculate driving time for each task. self.set_driving_times(self.data, self.driving_tuples, self.task_tuples) # Set tasks for which the vehicle has not moved. self.set_static_tasks(self.arrivals, self.task_tuples, self.data) # Succesful termination. return 0 def calculate_ride(self, ride_number): """Run all methods necessary for calculating the driving times. Calculates the realized driving times for a specific ride ID. """ # Reset all values. self.reset() logging.info("Calculating driving times for ride #%s" % ride_number) logging.info("Accessing database...") # Connect to the database and execute the query. cursor = self.connection.cursor(cursor_factory=psycopg2.extras.DictCursor) cursor.execute(pyrits.erniesoft.query.sget('actions_from_ride', {':ridenum': ride_number})) logging.info("Processing data...") # Load all drive data into memory. self.data = pyrits.std.DataDict(cursor, self.field_names) cursor.close() # Check if any actions were found. If not, abort. if len(self.data) == 0: logging.info("No actions for this ride number. Nothing to do.") return 1 logging.info("Calculating driving times...") # Define tasks. self.set_tasks(self.data) # Check if any tasks were found. Sometimes we do find actions, but # they are not linked to any task. if len(self.tasks) == 0: logging.info("No tasks found. Nothing to do.") return 1 # Set the driving tuples. self.set_driving_tuples(self.data) # Set task tuples. self.set_task_tuples(self.data, self.tasks) # Calculate driving times. self.set_driving_times(self.data, self.driving_tuples, self.task_tuples) # Set tasks for which the vehicle has not moved. self.set_static_tasks(self.arrivals, self.task_tuples, self.data) # Succesful termination. return 0 def save_to_database(self): """Save the driving times to the database. Previously calculated driving times will be saved to a table ``driving_times`` in the database. """ cursor = self.connection.cursor() cursor2 = self.connection.cursor() logging.info("Saving driving times to database...") for task_id, task_name in self.get_tasks(unique=1): # Check if this task has been overnight. overnight = 'f' if task_id in self.overnight_tasks: overnight = 't' # Check if this is a static task. static = 'f' if task_id in self.static_tasks: static = 't' # Get the ride number for this task. query = "SELECT ritlink FROM tbl_ritregels WHERE ritregelnr = %d;" % (task_id) cursor2.execute(query) ride_id = cursor2.fetchone() # Construct insert query. columns = { 'task_start': self.task_times[task_id][0], 'task_end': self.task_times[task_id][1], 'driving_time': self.driving_times[task_id], 'overnight': overnight, 'static': static, } query = "INSERT INTO driving_times (task_id,ride_id,%%s) VALUES (%d,%d,%%s);" % (task_id, ride_id[0]) query = pyrits.erniesoft.query.construct_insert_query(query, columns) # Insert the driving time into the database. Note that each task # should have a corresponding driving time in self.driving_times. # If this is not the case, the tasks are computed wrongly. #try: cursor.execute(query) #except: # raise KeyError(task_id) # Commit the transaction. self.connection.commit() cursor.close() cursor2.close() def output_html(self, filename, open_file=0): """Save the realized driving times to a HTML file `filename`. If `open_file` is set to 1, the HTML file is automatically opened in the default web browser. """ logging.info("Writing results to %s" % filename) # Process data for output. marked_data = self.mark_driving_times(self.data, self.driving_tuples, self.task_tuples, self.tasks) formatted_driving_times = pyrits.std.driving_times_formatted(self.tasks, self.driving_times) # Open output file for writing. f = open(filename, 'w') # Write HTML header. f_header = open(os.path.join(pyrits.std.module_path(), 'include/header.html'), 'r') f.writelines(f_header) f_header.close() # Write driving times to output file. html = pyrits.HTML.Table(formatted_driving_times, header_row = ('Taak ID','Taak','Reistijd (s)','Reistijd (h:m:s)')) f.writelines(html) # Write marked data to output file. field_names = list(self.field_names) field_names.append('taak') html = pyrits.HTML.Table(marked_data, header_row = field_names) f.writelines(html) # Write HTML footer. f_footer = open(os.path.join(pyrits.std.module_path(), 'include/footer.html'), 'r') f.writelines(f_footer) f_footer.close() # Close output file. f.close() # Open the HTML file in a browser. if open_file: logging.info("Opening results in web browser.") pyrits.std.open_in_browser(filename) # ----------------------------------- # Setters def set_driving_tuples(self, data): """Define the start and end positions of actual driving. Driving tuples will be calculated and saved to a list `self.driving_tuples`. A driving tuple is a list of two integers. The two integers represent the start and end position respectively. The integers indicate row numbers of the actions list `data`. """ contact_off = 0 parking = 0 washing = 0 resting = 0 departed = None arrived = None moved = 0 driving = 0 driving_state = None last_km_position = None last_driving_time = None drive_state = None finished_tasks = [] # Trace types to be ignored (as indicated by T. Verduijn). ignored_types = (82,56,42,57) for i,row in enumerate(data): # Reset values. skip_current = 0 # First we set some variables which can tell us whether the # vehicle is driving or not. # Are we driving right now? If were are not driving, this is set # to None. is_driving = self.driving_tuple[0] # Find out if the vehicle has moved. if last_km_position: if row['kmstand'] > last_km_position: moved = 1 else: moved = 0 # Sometimes, a started "PA" action is never ended. To account for # this, if parking, and we come across an action which is neither # "UN" or "PA", then unset parking. if parking: if row['actie'] not in ("", "UN", "PA"): parking = 0 # Find out if the vehicle has arrived. if row['actie'] == "AankAdres": if row['type'] == 10: # From this point, put driving time tuples in the queue # because we don't know if the driver is going to cancel # this activity. self.in_queue = True # Arrived at destination. elif row['type'] == 13: # Arrived at destination. arrived = 1 # Keep track of arrival actions. This is needed later by # method set_static_tasks(). self.arrivals.append(i) # The driver didn't cancel the activity, so the driving # times in the queue can be cleared. self.driving_tuples_queue = [] # Reset values. departed = 0 self.in_queue = False if is_driving: # If driving, reset driving tuple. Because # self.in_queue is set to False now, current driving # times will be saved anyway. This is to prevent that. self.driving_tuple = [None,None] is_driving = False # Arrival cancelled. elif row['type'] == 11: # The driver didn't arrive after all. So put the driving # times from the queue in the actual list. self.driving_tuples.extend(self.driving_tuples_queue) # Reset values. self.driving_tuples_queue = [] self.in_queue = False # Find out if the vehicle has departed. elif row['actie'] == "VertrAdres": # Departure has started. if row['type'] == 13: if row['ritregelnr'] not in finished_tasks: # We've offcially departed. departed = 1 # We need to keep track of finished tasks because # sometimes the driver tells the board computer that # he departed more than once. finished_tasks.append(row['ritregelnr']) # Reset values. arrived = 0 driving_state = None # Find out if the driver is using the driving button. elif row['actie'] == "DR": # The driver seems to use the drive button. Whenever # driving_state is not set to None, the driving_state has more # control of deciding when the vehicle is driving. if row['type'] == 10: driving_state = 1 elif row['type'] in (13,11): driving_state = 0 # Find out if the vehicle is parking. elif row['actie'] == "PA": if row['type'] == 10: parking = 1 elif row['type'] in (13,11): parking = 0 # Washing the vehicle? elif row['actie'] == "Wassen": if row['type'] == 10: washing = 1 elif row['type'] in (13,11): washing = 0 # Find out if the driver has decided to take a nap. elif row['actie'] == "NachtRust": if row['type'] == 10: resting = 1 elif row['type'] in (13,11): resting = 0 # "Action" is set to something else. else: # Find out if the contact is off. if row['type'] == 72: contact_off = 1 elif row['type'] == 71: contact_off = 0 # The contact was just turned on, so do not log any # movements for this round. skip_current = 1 # Decide when the vehicle is driving based on the set variables. if skip_current: pass elif is_driving and row['type'] in ignored_types: # Ignore these types when already driving. pass elif not contact_off and not parking and not resting and not washing: # The vehicle may be driving. if departed or departed == None: # The vehicle has departed. It's now clear to decide if the # vehicle is driving. if driving_state: # If driving_state is not set to None (the driver has # used the driving button), give this variable more # control over the driving boolean. self.set_driving_tuple(1, i) elif driving_state == 0: # Check if the driver marked the end of driving. self.set_driving_tuple(0, i) driving_state = None # The driver is not using the driving button. So decide # when the vehicle is moving based on other variables. elif moved: # The vehicle has moved. It must be driving. self.set_driving_tuple(1, i, offset=-1) # Set the last driving time. last_driving_time = row['tijd'] else: # The vehicle hasn't moved. It's probably not driving. self.set_driving_tuple(0, i, offset=-1) elif not departed: # Don't ever set to driving when the vehicle has not even # departed. self.set_driving_tuple(0, i, offset=-1) else: # Either the contact is off, it's parking, or the driver is # resting. So it can't be driving. self.set_driving_tuple(0, i, offset=-1) # What's the current position of the kilometer counter? last_km_position = row['kmstand'] def set_driving_tuple(self, driving, index, offset=0): """Construct and save a single driving tuple.""" if driving: if self.driving_tuple[0] == None: self.driving_tuple[0] = index+offset else: if self.driving_tuple[0] == None: return if self.driving_tuple[1] == None: self.driving_tuple[1] = index+offset if self.in_queue: self.driving_tuples_queue.append(self.driving_tuple) else: self.driving_tuples.append(self.driving_tuple) self.driving_tuple = [None,None] else: raise ValueError def set_tasks(self, data): """Returns the tasks list (actual order). This method uses the order as defined in the "actions" table. The order defined in this table is how the tasks were actually started. Returns list of tuples (task_no,task_name). """ self.tasks = [] last_task = None for row in data: if row['ritregelnr'] and row['ritregelnr'] != last_task: last_task = row['ritregelnr'] tuple = (row['ritregelnr'],row['actiecode']) self.tasks.append(tuple) def set_task_tuples(self, data, tasks): """Set the task tuples which define start and end of a task. Each tuple is a list [task_id, task_start_id, task_end_id]. The start and end id's are indexes of the main data object `data`. The end of the current task and the start of the next task is defined as the departure from an address. This method also keeps a list `self.overnight_tasks` to which all task numbers marked with an overnight are saved. By default, a task is marked 'overnight' if the time between contact on and contact off has been longer than five hours (setting 'time-diff-for-overnight'). """ # List which will hold all the task tuples. self.task_tuples = [] # Dictionary which will hold the start and end time for each task. self.task_times = {} # List which will hold all the task for which an overnight occurred. self.overnight_tasks = [] # Keep track of the last handled task. last_finished_task = None # The index of the current task in tasks list. current_task_index = 0 # Set the first task to the first task ID (ritregelnr). Task ID's # are always linked to a single task and are unique within a ride. current_task = tasks[current_task_index][0] # Assume that every drive starts with the first task. task_tuple = [current_task,0,None] # Set the start time for the first task. self.task_times[current_task] = [data[0]['tijd'], None] # Get total number of tasks. n_tasks = len(tasks) # Last time that the contact was turned off. Required for defining # overnight tasks. last_contact_off = None i = None for i, row in enumerate(data): # Find out if the vehicle has departed. if self.has_departed(row, tasks, n_tasks, current_task_index, last_finished_task): # We've officially ended a task. # We need to keep track of the last finished task because # sometimes the driver tells the board computer that # he left the address twice or more in a row. last_finished_task = current_task # Set the end time for this task. if current_task in self.task_times: self.task_times[current_task][1] = row['tijd'] else: # In the rare situation where a tasks is ended, but was # never started, only set the end time. self.task_times[current_task] = [None, row['tijd']] # Set the current task to the next task, if any. if current_task_index+1 < n_tasks: # Set to next task. current_task_index += 1 current_task = tasks[current_task_index][0] # Set the start time for the next task. if current_task not in self.task_times: self.task_times[current_task] = [data[i+1]['tijd'], None] else: # There is no follow up task in tasks, so set to # undefined. current_task = None # The previous task has ended, so set the end row for # the task tuple and add the tuple to the task tuples. task_tuple[2] = i self.task_tuples.append(task_tuple) # As the last task has ended, a new task is started. task_tuple = [current_task,i+1,None] # We are inside a task. if current_task: # Keep track of tasks for which there has been an overnight. if row['type'] == 72: last_contact_off = row['tijd'] elif row['type'] == 71: if last_contact_off: # Decide whether there has been an overnight (the # time difference between contact on and contact # off is > 5 hours). time_delta = row['tijd'] - last_contact_off if pyrits.std.total_seconds(time_delta) > pyrits.config.cfg.get('time-diff-for-overnight'): self.overnight_tasks.append(current_task) # Reset values. last_contact_off = None if i: # The very last task has ended, so set the end row for the task tuple. task_tuple[2] = i self.task_tuples.append(task_tuple) def has_departed(self, row, tasks, len_tasks, current_task_index, last_finished_task): """Returns True if the vehicle has departed from a location. Departure marks the end of the current task and the beginning of the next task. """ # Check if this trace is set as departure. if row['actie'] == "VertrAdres" and row['type'] == 13 and row['ritregelnr'] != last_finished_task: return True # If multiple rides are analyzed, it is possible that a new tasks # is entered, without leaving an address. In this case, end the current # task. elif row['ritregelnr'] and current_task_index+1 < len_tasks and row['ritregelnr'] == tasks[current_task_index+1][0]: return True return False def set_static_tasks(self, arrivals, task_tuples, data): """Set tasks for which the vehicle has not moved between departure and arrival. Sometimes the vehicle has not even moved before arrival at desitnation. For these tasks we want and expected driving time of 0. If the kilometer counter position at departure is the same as for arrival, then we assume the vehicle has not moved. Departure is defined as the start of a task obtained from `task_tuples` set by :meth:`set_task_tuples`. Arrival of each task `arrivals` was set by :meth:`set_driving_tuples` in variable `self.arrivals`. """ # Check each arrival id. for arrival_id in arrivals: # First get the departure and task id to which this arrival id # belongs. departure_id = None for task,start,end in task_tuples: if start <= arrival_id <= end: task_id = task departure_id = start break # Continue with the next arrival id if no task id was found. if departure_id == None: continue # If we have a departure id, we can get the kilometer counter # position of both departure and arrival. km_departure = data[departure_id]['kmstand'] km_arrival = data[arrival_id]['kmstand'] # Now determine if the vehicle has moved. If not, add it to our # list of tasks for which the vehicle has not moved. if km_departure == km_arrival: self.static_tasks.append(task_id) def set_driving_times(self, data, driving_tuples, task_tuples): """Set the driving time for each task.""" self.driving_times = {} # Set driving time for all tasks (that are present in task_tuples) to 0. # The first item in each tuple is the task number/ritregelnr. # Note that we do not use self.tasks for this, but this shouldn't # matter because if the tasks tuples were calculated correctly, all # tasks should be present in task_tuples as well. for t in task_tuples: self.driving_times[t[0]] = 0 # Calculate total driving time (in seconds) for all tasks. for t in driving_tuples: # Get the current task. task = self.get_taskno_from_driving_tuple(t, task_tuples) # If there is no task for this driving tuple, skip it. if not task: continue # Get the duration for the current driving time. time_delta = data[t[1]]['tijd'] - data[t[0]]['tijd'] # Set driving time. self.driving_times[task] += pyrits.std.total_seconds(time_delta) # ----------------------------------- # Getters def get_data(self): """Returns the main data object.""" return self.data def get_driving_times(self): """Returns a list of all driving times.""" return self.driving_times def get_driving_tuples(self): """Returns a list of all driving tuples.""" return self.driving_tuples def get_tasks(self, unique=0): """Returns a list of all task id's. If `unique` is set to True, a task id's is only returned once. """ if not unique: return self.tasks else: seen = [] result = [] for tuple in self.tasks: if tuple[0] in seen: continue seen.append(tuple[0]) result.append(tuple) return result def get_task_tuples(self): """Returns a list of all task tuples.""" return self.task_tuples def get_driving_indexes(self, driving_tuples): """Return a list of indexes where the vehicle was driving. Indexes are indexes of the `self.data` object. """ indexes = [] for start,end in driving_tuples: for x in range(start, end+1): indexes.append(x) return indexes def get_taskno_from_driving_tuple(self, driving_tuple, task_tuples): """Return the task id that belongs to a driving tuple. Note that all id's used here are indexes of the main data object. """ for task,start,end in task_tuples: if driving_tuple[0] >= start and driving_tuple[1] <= end: return task return None def get_taskno_from_arrival_id(self, arrival_id, task_tuples): """Return the task id that belongs to an arrival id. Note that all id's used here are indexes of the main data object. """ for task,start,end in task_tuples: if start <= arrival_id <= end: return task return None def get_taskstr_from_taskno(self, tasks, n): """Return task name from a task number. The task number must be a ride rule number. """ for task_id,name in tasks: if task_id == n: return name return None # ----------------------------------- # Generators def mark_driving_times(self, data, driving_tuples, task_tuples, tasks): """Marks certain items in the main data object `data`. Some items need to be colored in the HTML object and are thus marked for coloring by this method. """ driving_indexes = self.get_driving_indexes(driving_tuples) for i,row in enumerate(data): # Get the current task. for task_id, start, end in task_tuples: if start <= i <= end: current_task = task_id break # Decide which cells to color. for key, value in row.iteritems(): if key == 'tijd' and i in driving_indexes: row[key] = self.mark_value(value) if key == 'actie' and value in ('AankAdres','VertrAdres'): row[key] = self.mark_value(value) # Add the current task to the row. row['taak'] = "%s_%s" % (self.get_taskstr_from_taskno(tasks, current_task), current_task) # Color the task if it's an overnight task. if current_task in self.overnight_tasks: row['taak'] = self.mark_value(row['taak']) yield row class Preprocess(object): """Perform some precalculations on the database. These precalculations must be performed before calculating driving times, delays, etc. """ def __init__(self, connection): self.connection = connection def start(self): self.task_to_vehicle() self.task_to_route() logging.info("Done") def task_to_vehicle(self): """Fill the table `task_to_vehicle`. Table `task_to_vehicle` is required for calculating the driving times. Sometimes more vehicles are found for a single task in the activities table. Table `task_to_vehicle` is created to define the vehicle actually used for each task. Warning: After execution of this function there may still be tasks missing from the `task_to_vehicle` table because some `hist_bcacties` records lack a vehicle code (this is an inconsistency in the database). """ logging.info("Defining vehicle for each task...") cursor = self.connection.cursor() cursor2 = self.connection.cursor() # First purge the table. cursor.execute("DELETE FROM task_to_vehicle;") self.connection.commit() # First set one vehicle code per task (doesn't matter which one, but # this is so tasks that are excluded from the next query have a # vehicle code set as well). This also means that tasks for # which no task was finished, the vehicle code is set anyway. This # is probably not desired as this might lead to incorrect driving times # for such tasks. Is there a better way to do this? cursor.execute("""INSERT INTO task_to_vehicle (task_id,vehicle_code) SELECT elipsref, MAX(terminalcode) FROM hist_bcacties WHERE elipsref IS NOT NULL AND terminalcode IS NOT NULL GROUP BY elipsref;""") self.connection.commit() # Finally update the vehicle code for each task returned by the # following query. This query only shows vehicles for which a task # was actually finished (typecode=13). # NOTE: Notice the MAX() in that query. This means that if we still # find more than one vehicle code for a task (this is not supposed to # happen), we pick the last one just so the program doesn't crash. cursor.execute("""SELECT r.ritregelnr, MAX(a.terminalcode) FROM hist_bcacties a INNER JOIN tbl_ritregels r ON (a.elipsref = r.ritregelnr) WHERE a.typecode=13 AND a.actie = r.actiecode AND a.terminalcode IS NOT NULL GROUP BY r.ritregelnr;""") for task, vehicle in cursor: cursor2.execute("UPDATE task_to_vehicle SET (vehicle_code) = ('%s') WHERE task_id=%d;" % (vehicle,task)) self.connection.commit() # Warning: At this point, there may still be tasks missing from the # task_to_vehicle table because some `hist_bcacties` records lack # a vehicle code (this is an inconsistency in the database). cursor2.close() cursor.close() def task_to_route(self): """Fill the table `task_to_route`. Table `task_to_route` is used to easily obtain the from and to location for a task. """ logging.info("Defining route for each task...") cursor = self.connection.cursor() cursor2 = self.connection.cursor() # First purge the table. cursor.execute("DELETE FROM task_to_route;") self.connection.commit() # Define routes. cursor.execute("""SELECT r.ritlink, r.ritregelnr, r.locatiecode, r.locatiepostcode, r.locatieplaats, r.locatieland, v.vehicle_code FROM tbl_ritregels r INNER JOIN hist_bcacties a ON a.elipsref = r.ritregelnr INNER JOIN task_to_vehicle v ON v.task_id = r.ritregelnr GROUP BY r.ritlink,r.ritregelnr,r.locatiepostcode,v.vehicle_code ORDER BY v.vehicle_code, MIN(a.tijd);""") previous_vehicle = None from_task = None from_code = None from_postcode = None from_city = None from_country = None for ride_id,task_id,to_code,to_postcode,to_city,to_country,vehicle in cursor: # Skip unsupported vehicles. if not vehicle.startswith("W"): previous_vehicle = None from_task = None from_code = None from_postcode = None from_city = None from_country = None continue # If a new vehicle is encountered, do not count this as a route. # Reset "from" location so that the "to" location is saved for the # next loop. if previous_vehicle != vehicle: from_postcode = None # Convert post codes consisting of spaces to an empty string. if to_postcode: to_postcode = to_postcode.strip() # Save route to the database. if from_postcode and to_postcode: columns = {'from_task': from_task, 'from_code': from_code, 'from_postcode': from_postcode, 'from_city': from_city, 'from_country': from_country, 'from_country': from_country, 'to_code': to_code, 'to_postcode': to_postcode, 'to_city': to_city, 'to_country': to_country } query = "INSERT INTO task_to_route (task_id,ride_id,%%s) VALUES (%d,%d,%%s);" % (task_id,ride_id) query = pyrits.erniesoft.query.construct_insert_query(query, columns) cursor2.execute(query) # Set values for the next loop. from_task = task_id from_code = to_code from_postcode = to_postcode from_city = to_city from_country = to_country previous_vehicle = vehicle # Commit changes. self.connection.commit() cursor2.close() cursor.close()
figure002/pyrits
pyrits/erniesoft/std.py
Python
gpl-3.0
61,599
# This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load in import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "data/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory # Any results you write to the current directory are saved as output. import numpy as np import pandas as pd import xgboost as xgb import gc import sklearn print('Loading data ...') train = pd.read_csv('data/train_2016.csv') prop = pd.read_csv('data/properties_2016.csv') for c, dtype in zip(prop.columns, prop.dtypes): if dtype == np.float64: prop[c] = prop[c].astype(np.float32) df_train = train.merge(prop, how='left', on='parcelid') x_train = df_train.drop(['parcelid', 'logerror', 'transactiondate', 'propertyzoningdesc', 'propertycountylandusecode'], axis=1) y_train = df_train['logerror'].values print(x_train.shape, y_train.shape) train_columns = x_train.columns for c in x_train.dtypes[x_train.dtypes == object].index.values: x_train[c] = (x_train[c] == True) del df_train; gc.collect() split = 90000 x_train, y_train, x_valid, y_valid = x_train[:split], y_train[:split], x_train[split:], y_train[split:] x_train = x_train.values.astype(np.float32, copy=False) x_valid = x_valid.values.astype(np.float32, copy=False) d_train = xgb.DMatrix(x_train, label=y_train) d_valid = xgb.DMatrix(x_valid, label=y_valid) del x_train, x_valid; gc.collect() params = {} params['eta'] = 0.02 params['objective'] = 'reg:linear' params['eval_metric'] = 'mae' params['max_depth'] = 10 params['silent'] = 0 watchlist = [(d_train, 'train'), (d_valid, 'valid')] clf = xgb.train(params, d_train, 10000, watchlist, early_stopping_rounds=100, verbose_eval=10) del d_train, d_valid; gc.collect() print("Prepare for the prediction ...") sample = pd.read_csv('data/sample_submission.csv') sample['parcelid'] = sample['ParcelId'] df_test = sample.merge(prop, on='parcelid', how='left') del sample, prop; gc.collect() x_test = df_test[train_columns] del df_test; gc.collect() for c in x_test.dtypes[x_test.dtypes == object].index.values: x_test[c] = (x_test[c] == True) x_test = x_test.values.astype(np.float32, copy=False) print("Start prediction ...") d_test = xgb.DMatrix(x_test) p_test = clf.predict(d_test) del x_test; gc.collect() print("Start write result ...") sub = pd.read_csv('data/sample_submission.csv') for c in sub.columns[sub.columns != 'ParcelId']: sub[c] = p_test sub.to_csv('out/xgb.csv', index=False, float_format='%.4f')
SXBK/kaggle
zillow/xgb.py
Python
gpl-3.0
2,782
# annotation parser that processes our hashtags import re def get_abstracts(filename): output = [] abstract_buffer = [] notes_buffer = [] last_abstract_no = 0 last_pmid = 0 last_biviewid = 0 with open(filename, 'rb') as f: for line in f: # fast forward to abstract 1 m = re.match("Abstract 1 of [1-9][0-9]*", line) if m: record_abstract=True last_abstract_no = 1 break for line in f: m = re.match("Abstract ([1-9][0-9]*) of [1-9][0-9]*", line.strip()) if m: record_abstract = True output.append({"abstract": "\n".join(abstract_buffer), "notes": notes_buffer, "pmid": last_pmid, "biviewid": last_biviewid, "annotid": last_abstract_no}) abstract_buffer, notes_buffer = [], [] last_abstract_no = int(m.group(1)) continue m = re.match("BiviewID ([0-9]+); PMID ([0-9]+)", line) if m: record_abstract = False last_biviewid = int(m.group(1)) last_pmid = int(m.group(2)) continue if line.strip(): if record_abstract: abstract_buffer.append(line) else: notes_buffer.append(line) else: output.append({"abstract": "\n".join(abstract_buffer), "notes": notes_buffer}) return output def main(): a = get_abstracts("data/drug_trials_in_cochrane_BCW.txt") b = get_abstracts("data/drug_trials_in_cochrane_IJM.txt") i = 128 print print a[i] print print b[i] if __name__ == '__main__': main()
ijmarshall/cochrane-nlp
parse_annotations.py
Python
gpl-3.0
1,910
#!/usr/bin/env python # vim: set expandtab shiftwidth=4: # http://www.voip-info.org/wiki/view/asterisk+manager+events import asterisk.manager import sys,os,time import simplejson as json from stompy.simple import Client import ConfigParser from sqlobject import * from handlers.command_handler_factory import CommandHandlerFactory from handlers.command_constants import Protocol #sys.stdout = open("/var/log/requests/connector2.log","a") #sys.stderr = open("/var/log/requests/connector-err2.log","a") import logging logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S', filename='/tmp/myapp.log', filemode='a+') import fcntl lockfile = os.path.normpath('/tmp/' + os.path.basename(__file__) + '.lock') exclusive_lock = open(lockfile, 'w') try: fcntl.lockf(exclusive_lock, fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: print "Another instance is already running, quitting." time.sleep(1) sys.exit(-1) config = ConfigParser.ConfigParser() config.read('/opt/ucall/etc/config.ini') stomp_host = config.get('STOMP', 'host') stomp_username = config.get('STOMP', 'username') stomp_password = config.get('STOMP', 'password') print '='*80 print 'Stomp host:', stomp_host print 'Stomp username:', stomp_username print 'Stomp password:', stomp_password print '='*80 ami_host = config.get('AMI', 'host') ami_username = config.get('AMI', 'username') ami_password = config.get('AMI', 'password') print 'AMI host:', ami_host print 'AMI username:', ami_username print 'AMI password:', ami_password print '='*80 sql_dsn = config.get('SQL', 'dsn') print 'SQL:', sql_dsn print '='*80 stomp = Client(stomp_host) stomp.connect(stomp_username, stomp_password) stomp.agent_channel = 'jms.queue.msg.' connection = connectionForURI(sql_dsn) sqlhub.processConnection = connection manager = asterisk.manager.Manager() #try: #try: manager.connect(ami_host) manager.login(ami_username, ami_password) manager.destination = stomp asteriskProtocolVersion = None if manager.version == '1.0': asteriskProtocolVersion = Protocol.ASTERISK_1_0 elif manager.version == '1.1': asteriskProtocolVersion = Protocol.ASTERISK_1_1 else: sys.exit() command_handler = CommandHandlerFactory(asteriskProtocolVersion).create_command_handler() manager.register_event('Shutdown', command_handler.handle_Shutdown) manager.register_event('Hangup', command_handler.handle_Hangup) manager.register_event('Link', command_handler.handle_Link) manager.register_event('Bridge', command_handler.handle_Bridge) manager.register_event('Dial', command_handler.handle_Dial) manager.register_event('Newstate', command_handler.handle_Newstate) manager.register_event('QueueMemberAdded', command_handler.handle_QueueMemberAdded) manager.register_event('QueueMemberRemoved', command_handler.handle_QueueMemberRemoved) manager.register_event('QueueMemberPaused', command_handler.handle_QueueMemberPaused) manager.register_event('QueueMember', command_handler.handle_QueueMember) manager.message_loop() manager.logoff() #except asterisk.manager.ManagerSocketException, (errno, reason): # print "Error connecting to the manager: %s" % reason #except asterisk.manager.ManagerAuthException, reason: # print "Error logging in to the manager: %s" % reason #except asterisk.manager.ManagerException, reason: # print "Error: %s" % reason #except: # sys.exit() #finally: manager.close()
gryzz/uCall
utils/asterisk-connector/ami2stomp.py
Python
gpl-3.0
3,524
# Doxyfile 1.7.4 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. # # All text after a hash (#) is considered a comment and will be ignored. # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" "). #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # http://www.gnu.org/software/libiconv for the list of possible encodings. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or a sequence of words surrounded # by quotes) that should identify the project. PROJECT_NAME = # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer # a quick idea about the purpose of the project. Keep the description short. PROJECT_BRIEF = # With the PROJECT_LOGO tag one can specify an logo or icon that is # included in the documentation. The maximum height of the logo should not # exceed 55 pixels and the maximum width should not exceed 200 pixels. # Doxygen will copy the logo to the output directory. PROJECT_LOGO = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, # Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, # Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English # messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, # Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, # Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = YES # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = YES # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful if your file system # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like regular Qt-style comments # (thus requiring an explicit @brief command for a brief description.) JAVADOC_AUTOBRIEF = NO # If the QT_AUTOBRIEF tag is set to YES then Doxygen will # interpret the first line (until the first dot) of a Qt-style # comment as the brief description. If set to NO, the comments # will behave just like regular Qt-style comments (thus requiring # an explicit \brief command for a brief description.) QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 4 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = NO # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for # Java. For instance, namespaces will be presented as packages, qualified # scopes will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources only. Doxygen will then generate output that is more tailored for # Fortran. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for # VHDL. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given extension. # Doxygen has a built-in mapping, but you can override or extend it using this # tag. The format is ext=language, where ext is a file extension, and language # is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C, # C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make # doxygen treat .inc files as Fortran files (default is PHP), and .f files as C # (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions # you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. EXTENSION_MAPPING = # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also makes the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. # Doxygen will parse them like normal C++ but will assume all classes use public # instead of private inheritance when no explicit protection keyword is present. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate getter # and setter methods for a property. Setting this option to YES (the default) # will make doxygen replace the get and set methods by a property in the # documentation. This will only work if the methods are indeed getting or # setting a simple type. If this is not the case, or you want to show the # methods anyway, you should set this option to NO. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES # When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and # unions are shown inside the group in which they are included (e.g. using # @ingroup) instead of on a separate page (for HTML and Man pages) or # section (for LaTeX and RTF). INLINE_GROUPED_CLASSES = NO # When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum # is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically # be useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. TYPEDEF_HIDES_STRUCT = NO # The SYMBOL_CACHE_SIZE determines the size of the internal cache use to # determine which symbols to keep in memory and which to flush to disk. # When the cache is full, less often used symbols will be written to disk. # For small to medium size projects (<1000 input files) the default value is # probably good enough. For larger projects a too small cache size can cause # doxygen to be busy swapping symbols to and from disk most of the time # causing a significant performance penalty. # If the system has enough physical memory increasing the cache will improve the # performance by keeping more symbols in memory. Note that the value works on # a logarithmic scale so increasing the size by one will roughly double the # memory usage. The cache size is given by this formula: # 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols SYMBOL_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = YES # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base # name of the file that contains the anonymous namespace. By default # anonymous namespaces are hidden. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = NO # If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen # will list include files with double quotes in the documentation # rather than with sharp brackets. FORCE_LOCAL_INCLUDES = NO # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = NO # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen # will sort the (brief and detailed) documentation of class members so that # constructors and destructors are listed first. If set to NO (the default) # the constructors will appear in the respective orders defined by # SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. # This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO # and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the # hierarchy of group names into alphabetical order. If set to NO (the default) # the group names will appear in their defined order. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to # do proper type resolution of all parameters of a function it will reject a # match between the prototype and the implementation of a member function even # if there is only one candidate or it is obvious which candidate to choose # by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen # will still accept a match between prototype and implementation in such cases. STRICT_PROTO_MATCHING = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or macro consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and macros in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # If the sources in your project are distributed over multiple directories # then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy # in the documentation. The default is NO. SHOW_DIRECTORIES = NO # Set the SHOW_FILES tag to NO to disable the generation of the Files page. # This will remove the Files entry from the Quick Index and from the # Folder Tree View (if specified). The default is YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the # Namespaces page. # This will remove the Namespaces entry from the Quick Index # and from the Folder Tree View (if specified). The default is YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command <command> <input-file>, where <command> is the value of # the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed # by doxygen. The layout file controls the global structure of the generated # output files in an output format independent way. The create the layout file # that represents doxygen's defaults, run doxygen with the -l option. # You can optionally specify a file name after the option, if omitted # DoxygenLayout.xml will be used as the name of the layout file. #LAYOUT_FILE = DoxygenLayout.xml #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = YES # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # The WARN_NO_PARAMDOC option can be enabled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = YES # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is # also the default input encoding. Doxygen uses libiconv (or the iconv built # into libc) for the transcoding. See http://www.gnu.org/software/libiconv for # the list of possible encodings. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh # *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py # *.f90 *.f *.for *.vhd *.vhdl FILE_PATTERNS = # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = NO # The EXCLUDE tag can be used to specify files and/or directories that should # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command <filter> <input-file>, where <filter> # is the value of the INPUT_FILTER tag, and <input-file> is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. # If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. # Doxygen will compare the file name with each pattern and apply the # filter if there is a match. # The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty or if # non of the patterns match the file name, INPUT_FILTER is applied. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file # pattern. A pattern will override the setting for FILTER_PATTERN (if any) # and it is also possible to disable source filtering for a specific pattern # using *.ext= (so without naming a filter). This option only has effect when # FILTER_SOURCE_FILES is enabled. FILTER_SOURCE_PATTERNS = #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C and C++ comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES (the default) # and SOURCE_BROWSER tag is set to YES, then the hyperlinks from # functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will # link to the source code. # Otherwise they will link to the documentation. REFERENCES_LINK_SOURCE = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = NO # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. Note that when using a custom header you are responsible # for the proper inclusion of any scripts and style sheets that doxygen # needs, which is dependent on the configuration options used. # It is adviced to generate a default header using "doxygen -w html # header.html footer.html stylesheet.css YourConfigFile" and then modify # that header. Note that the header is subject to change so you typically # have to redo this when upgrading to a newer version of doxygen or when changing the value of configuration settings such as GENERATE_TREEVIEW! HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # stylesheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the HTML output directory. Note # that these files will be copied to the base HTML output directory. Use the # $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these # files. In the HTML_STYLESHEET file, use the file name only. Also note that # the files will be copied as-is; there are no commands or markers available. HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. # Doxygen will adjust the colors in the stylesheet and background images # according to this color. Hue is specified as an angle on a colorwheel, # see http://en.wikipedia.org/wiki/Hue for more information. # For instance the value 0 represents red, 60 is yellow, 120 is green, # 180 is cyan, 240 is blue, 300 purple, and 360 is red again. # The allowed range is 0 to 359. HTML_COLORSTYLE_HUE = 220 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of # the colors in the HTML output. For a value of 0 the output will use # grayscales only. A value of 255 will produce the most vivid colors. HTML_COLORSTYLE_SAT = 100 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to # the luminance component of the colors in the HTML output. Values below # 100 gradually make the output lighter, whereas values above 100 make # the output darker. The value divided by 100 is the actual gamma applied, # so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, # and 100 does not change the gamma. HTML_COLORSTYLE_GAMMA = 80 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting # this to NO can help when comparing the output of multiple runs. HTML_TIMESTAMP = YES # If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, # files or namespaces will be aligned in HTML using tables. If set to # NO a bullet list will be used. HTML_ALIGN_MEMBERS = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. For this to work a browser that supports # JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox # Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). HTML_DYNAMIC_SECTIONS = NO # If the GENERATE_DOCSET tag is set to YES, additional index files # will be generated that can be used as input for Apple's Xcode 3 # integrated development environment, introduced with OSX 10.5 (Leopard). # To create a documentation set, doxygen will generate a Makefile in the # HTML output directory. Running make will produce the docset in that # directory and running "make install" will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find # it at startup. # See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html # for more information. GENERATE_DOCSET = NO # When GENERATE_DOCSET tag is set to YES, this tag determines the name of the # feed. A documentation feed provides an umbrella under which multiple # documentation sets from a single provider (such as a company or product suite) # can be grouped. DOCSET_FEEDNAME = "Doxygen generated docs" # When GENERATE_DOCSET tag is set to YES, this tag specifies a string that # should uniquely identify the documentation set bundle. This should be a # reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen # will append .docset to the name. DOCSET_BUNDLE_ID = org.doxygen.Project # When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify # the documentation publisher. This should be a reverse domain-name style # string, e.g. com.mycompany.MyDocSet.documentation. DOCSET_PUBLISHER_ID = org.doxygen.Publisher # The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compiled HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING # is used to encode HtmlHelp index (hhk), content (hhc) and project file # content. CHM_INDEX_ENCODING = # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and # QHP_VIRTUAL_FOLDER are set, an additional index file will be generated # that can be used as input for Qt's qhelpgenerator to generate a # Qt Compressed Help (.qch) of the generated HTML documentation. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can # be used to specify the file name of the resulting .qch file. # The path specified is relative to the HTML output folder. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#namespace QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#virtual-folders QHP_VIRTUAL_FOLDER = doc # If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to # add. For more information please see # http://doc.trolltech.com/qthelpproject.html#custom-filters QHP_CUST_FILTER_NAME = # The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see # <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters"> # Qt Help Project / Custom Filters</a>. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's # filter section matches. # <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes"> # Qt Help Project / Filter Attributes</a>. QHP_SECT_FILTER_ATTRS = # If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can # be used to specify the location of Qt's qhelpgenerator. # If non-empty doxygen will try to run qhelpgenerator on the generated # .qhp file. QHG_LOCATION = # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files # will be generated, which together with the HTML files, form an Eclipse help # plugin. To install this plugin and make it available under the help contents # menu in Eclipse, the contents of the directory containing the HTML and XML # files needs to be copied into the plugins directory of eclipse. The name of # the directory within the plugins directory should be the same as # the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before # the help appears. GENERATE_ECLIPSEHELP = NO # A unique identifier for the eclipse help plugin. When installing the plugin # the directory name containing the HTML and XML files should also have # this name. ECLIPSE_DOC_ID = org.doxygen.Project # The DISABLE_INDEX tag can be used to turn on/off the condensed index at # top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. DISABLE_INDEX = YES # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values # (range [0,1..20]) that doxygen will group on one line in the generated HTML # documentation. Note that a value of 0 will completely suppress the enum # values from appearing in the overview section. ENUM_VALUES_PER_LINE = 4 # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. # If the tag value is set to YES, a side panel will be generated # containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). # Windows users are probably better off using the HTML help feature. GENERATE_TREEVIEW = NO # By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories, # and Class Hierarchy pages using a tree view instead of an ordered list. USE_INLINE_TREES = NO # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 # When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open # links to external symbols imported via tag files in a separate window. EXT_LINKS_IN_WINDOW = NO # Use this tag to change the font size of Latex formulas included # as images in the HTML documentation. The default is 10. Note that # when you change the font size after a successful doxygen run you need # to manually remove any form_*.png images from the HTML output directory # to force them to be regenerated. FORMULA_FONTSIZE = 10 # Use the FORMULA_TRANPARENT tag to determine whether or not the images # generated for formulas are transparent PNGs. Transparent PNGs are # not supported properly for IE 6.0, but are supported on all modern browsers. # Note that when changing this option you need to delete any form_*.png files # in the HTML output before the changes have effect. FORMULA_TRANSPARENT = YES # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax # (see http://www.mathjax.org) which uses client side Javascript for the # rendering instead of using prerendered bitmaps. Use this if you do not # have LaTeX installed or if you want to formulas look prettier in the HTML # output. When enabled you also need to install MathJax separately and # configure the path to it using the MATHJAX_RELPATH option. USE_MATHJAX = NO # When MathJax is enabled you need to specify the location relative to the # HTML output directory using the MATHJAX_RELPATH option. The destination # directory should contain the MathJax.js script. For instance, if the mathjax # directory is located at the same level as the HTML output directory, then # MATHJAX_RELPATH should be ../mathjax. The default value points to the # mathjax.org site, so you can quickly see the result without installing # MathJax, but it is strongly recommended to install a local copy of MathJax # before deployment. MATHJAX_RELPATH = http://www.mathjax.org/mathjax # When the SEARCHENGINE tag is enabled doxygen will generate a search box # for the HTML output. The underlying search engine uses javascript # and DHTML and should work on any modern browser. Note that when using # HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets # (GENERATE_DOCSET) there is already a search function so this one should # typically be disabled. For large projects the javascript based search engine # can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. SEARCHENGINE = NO # When the SERVER_BASED_SEARCH tag is enabled the search engine will be # implemented using a PHP enabled web server instead of at the web client # using Javascript. Doxygen will generate the search PHP script and index # file to put on the web server. The advantage of the server # based approach is that it scales better to large projects and allows # full text search. The disadvantages are that it is more difficult to setup # and does not have live searching capabilities. SERVER_BASED_SEARCH = NO #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. # Note that when enabling USE_PDFLATEX this option is only used for # generating bitmaps for formulas in the HTML output, but not in the # Makefile that is written to the output directory. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4 # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for # the generated latex document. The footer should contain everything after # the last chapter. If it is left blank doxygen will generate a # standard footer. Notice: only use this tag if you know what you are doing! LATEX_FOOTER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO # If LATEX_SOURCE_CODE is set to YES then doxygen will include # source code with syntax highlighting in the LaTeX output. # Note that which sources are shown also depends on other settings # such as SOURCE_BROWSER. LATEX_SOURCE_CODE = NO #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load stylesheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = NO # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. # This is useful # if you want to understand what is going on. # On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # pointed to by INCLUDE_PATH will be searched when a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition that # overrules the definition found in the source code. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all references to function-like macros # that are alone on a line, have an all uppercase name, and do not end with a # semicolon, because these will confuse the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. # Optionally an initial location of the external documentation # can be added for each tagfile. The format of a tag file without # this location is as follows: # # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths or # URLs. If a location is present for each tag, the installdox tool # does not have to be run to correct the links. # Note that each tag file must have a unique name # (where the name does NOT include the path) # If a tag file is not located in the directory in which doxygen # is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option also works with HAVE_DOT disabled, but it is recommended to # install and use dot, since it yields more powerful graphs. CLASS_DIAGRAMS = YES # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see # http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = NO # The DOT_NUM_THREADS specifies the number of dot invocations doxygen is # allowed to run in parallel. When set to 0 (the default) doxygen will # base this on the number of processors available in the system. You can set it # explicitly to a value larger than 0 to get control over the balance # between CPU load and processing speed. DOT_NUM_THREADS = 0 # By default doxygen will write a font called Helvetica to the output # directory and reference it in all dot files that doxygen generates. # When you want a differently looking font you can specify the font name # using DOT_FONTNAME. You need to make sure dot is able to find the font, # which can be done by putting it in a standard location or by setting the # DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory # containing the font. DOT_FONTNAME = Helvetica # The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. # The default size is 10pt. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the output directory to look for the # FreeSans.ttf font (which doxygen will put there itself). If you specify a # different font using DOT_FONTNAME you can set the path where dot # can find it using this tag. DOT_FONTPATH = # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # the CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT options are set to YES then # doxygen will generate a call dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable call graphs # for selected functions only using the \callgraph command. CALL_GRAPH = NO # If the CALLER_GRAPH and HAVE_DOT tags are set to YES then # doxygen will generate a caller dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable caller # graphs for selected functions only using the \callergraph command. CALLER_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will generate a graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are svg, png, jpg, or gif. # If left blank png will be used. DOT_IMAGE_FORMAT = png # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The MSCFILE_DIRS tag can be used to specify one or more directories that # contain msc files that are included in the documentation (see the # \mscfile command). MSCFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of # nodes that will be shown in the graph. If the number of nodes in a graph # becomes larger than this value, doxygen will truncate the graph, which is # visualized by representing a node as a red box. Note that doxygen if the # number of direct children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note # that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not # seem to support this out of the box. Warning: Depending on the platform used, # enabling this option may lead to badly anti-aliased labels on the edges of # a graph (i.e. they become hard to read). DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = NO # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES
makestuff/scripts
gendox/Doxyfile.py
Python
gpl-3.0
72,294
import os import rospy import rospkg from python_qt_binding import QT_BINDING from rqt_gui_py.plugin import Plugin from python_qt_binding import loadUi from python_qt_binding.QtCore import QTimer # Attempt to load QWidget from pyqt4 try: from python_qt_binding.QtGui import QWidget # if not load from pyqt5 except ImportError: from python_qt_binding.QtWidgets import QWidget from robosub_msgs.msg import control, control_status state_types = { 0: "NONE", 1: "ABSOLUTE", 2: "RELATIVE", 3: "ERROR" } class Control(Plugin): def __init__(self, context): super(Control, self).__init__(context) # Give QObjects reasonable names self.setObjectName('Control') # Create QWidget self._widget = QWidget() # Get path to UI file which should be in the "resource" folder of # this package ui_file = os.path.join(rospkg.RosPack().get_path('robosub'), 'src/rqt/rqt_control/resource', 'Control.ui') # Extend the widget with all attributes and children from UI file loadUi(ui_file, self._widget) self.control_timer = QTimer(self) self.control_timer.timeout.connect(self.control_missed) self.control_timer.start(1000) self.control_status_timer = QTimer(self) self.control_status_timer.timeout.connect(self.control_status_missed) self.control_status_timer.start(1000) # Give QObjects reasonable names self._widget.setObjectName('Control') if context.serial_number() > 1: self._widget.setWindowTitle(self._widget.windowTitle() + (' (%d)' % context.serial_number())) # Add widget to the user interface context.add_widget(self._widget) self._widget.statusActive.hide() self._widget.controlActive.hide() self.con_sub = rospy.Subscriber('control', control, self.control_callback, queue_size=1) self.cs_sub = rospy.Subscriber('control_status', control_status, self.control_status_callback, queue_size=1) img_file = os.path.join(rospkg.RosPack().get_path('robosub'), 'src/rqt/resource/robosub_logo.png') self._widget.setStyleSheet(".QWidget {background-image: url(" + img_file + "); background-repeat: no-repeat;" + "background-position:bottom right}") def control_missed(self): if not self._widget.controlStale.isVisible(): self._widget.controlStale.show() self._widget.controlActive.hide() def control_status_missed(self): if not self._widget.statusStale.isVisible(): self._widget.statusStale.show() self._widget.statusActive.hide() def control_status_callback(self, m): try: self.control_status_timer.stop() except RuntimeError: pass if self._widget.statusStale.isVisible(): self._widget.statusStale.setVisible(False) self._widget.statusActive.setVisible(True) # Set the states self._widget.forwardStatusState.setText(m.forward_state) self._widget.strafeStatusState.setText(m.strafe_left_state) self._widget.diveStatusState.setText(m.dive_state) self._widget.rollStatusState.setText(m.roll_right_state) self._widget.pitchStatusState.setText(m.pitch_down_state) self._widget.yawStatusState.setText(m.yaw_left_state) self._widget.forwardGoal.setText("{:.4f}".format(m.forward_goal)) self._widget.strafeGoal.setText("{:.4f}".format(m.strafe_left_goal)) self._widget.diveGoal.setText("{:.4f}".format(m.dive_goal)) self._widget.rollGoal.setText("{:.4f}".format(m.roll_right_goal)) self._widget.pitchGoal.setText("{:.4f}".format(m.pitch_down_goal)) self._widget.yawGoal.setText("{:.4f}".format(m.yaw_left_goal)) self.control_status_timer.start(1000) def control_callback(self, m): try: self.control_timer.stop() except RuntimeError: pass if self._widget.controlStale.isVisible(): self._widget.controlStale.hide() self._widget.controlActive.show() # Set the states self._widget.forwardState.setText(state_types[m.forward_state]) self._widget.strafeState.setText(state_types[m.strafe_state]) self._widget.diveState.setText(state_types[m.dive_state]) self._widget.rollState.setText(state_types[m.roll_state]) self._widget.pitchState.setText(state_types[m.pitch_state]) self._widget.yawState.setText(state_types[m.yaw_state]) self._widget.forwardValue.setText("{:.4f}".format(m.forward)) self._widget.strafeValue.setText("{:.4f}".format(m.strafe_left)) self._widget.diveValue.setText("{:.4f}".format(m.dive)) self._widget.rollValue.setText("{:.4f}".format(m.roll_right)) self._widget.pitchValue.setText("{:.4f}".format(m.pitch_down)) self._widget.yawValue.setText("{:.4f}".format(m.yaw_left)) self.control_timer.start(1000) def shutdown_plugin(self): self.cs_sub.unregister() self.con_sub.unregister() self.control_timer.stop() self.control_status_timer.stop() def save_settings(self, plugin_settings, instance_settings): # TODO save intrinsic configuration, usually using: # instance_settings.set_value(k, v) pass def restore_settings(self, plugin_settings, instance_settings): # TODO restore intrinsic configuration, usually using: # v = instance_settings.value(k) pass
PalouseRobosub/robosub
src/rqt/rqt_control/src/rqt_control/control.py
Python
gpl-3.0
5,890
#!/usr/bin/env python # -*- coding: UTF-8 -*- # # Solver object # Copyright (C) 2011-2012, Tomi Leppänen (aka Tomin) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # """Solver object""" class Solver(): """Solver object This object solves sudokus. It can be used with tools to create sudoku solver application or combined with Runner object to make life easier. See Runner object in sudoku.runner for more information about it. """ def __init__(self, sudoku): """Constructor sudoku parameter is an list created by parse_sudoku in sudoko.tools. """ self.sudoku = sudoku self.done = False # if Solver should be stopped self.good = False # if sudoku is completed self.split_mode = False # if split mode is on or not :) self.split_numbers = 10 self.split_request = False # if split is requested or not def __str__(self): s = None for row in self.sudoku: for col in row: if s == None: s = str(col) else: s = s+","+str(col) return s def get_grid(self,row,col): """checks which grid is being procecced""" return [int((row+3)/3),int((col+3)/3)] def isgood_final(self): """Checks if sudoku is completed correctly Use only for completed sudokus """ for a in range(0,9): suma = 0 sumb = 0 for b in range(0,9): suma = suma+self.sudoku[a][b] sumb = sumb+self.sudoku[b][a] if suma != 45 or sumb != 45: return False for r in range(1,4): for c in range(1,4): sumc = 0 for r_n in range(r*3-3,r*3): for c_n in range(c*3-3,c*3): sumc = sumc+self.sudoku[r_n][c_n] if sumc != 45: return False return True def isgood(self): """Checks if a partial (or complete) sudoku is correct This is slower than isgood_final """ for a in range(0,9): numbersa = [] numbersb = [] for b in range(0,9): if self.sudoku[a][b] != "": try: numbersa.index(self.sudoku[a][b]) except ValueError: numbersa.append(self.sudoku[a][b]) else: return False if self.sudoku[b][a] != "": try: numbersb.index(self.sudoku[b][a]) except ValueError: numbersb.append(self.sudoku[b][a]) else: return False for r in range(1,4): for c in range(1,4): numbersc = [] for r_n in range(r*3-3,r*3): for c_n in range(c*3-3,c*3): if self.sudoku[r_n][c_n] != "": try: numbersc.index(self.sudoku[r_n][c_n]) except ValueError: numbersc.append(self.sudoku[r_n][c_n]) else: return False return True def isready(self): """Checks if all cells are filled""" for row in self.sudoku: try: row.index("") except ValueError: pass else: return False return True def get_numbers(self,row,col): """Returns numbers that can be filled into a cell""" numbers = [] numbers.append(self.sudoku[row][col]) numbers = list(range(1,10)) for i in range(0,9): try: numbers.remove(self.sudoku[row][i]) except ValueError: pass try: numbers.remove(self.sudoku[i][col]) except ValueError: pass x,y = self.get_grid(row,col) for r in range(int(x*3-3),int(x*3)): for c in range(int(y*3-3),int(y*3)): if self.sudoku[r][c] != "": try: numbers.remove(self.sudoku[r][c]) except ValueError: pass return numbers def run(self): """Solves the sudoku This solves some of the sudoku and should be called until the sudoku is ready. The status can be monitored using Sudoku objects good, done and split_request attributes. Also returns False if something is wrong otherwise returns True. """ changed = False if self.isready(): if self.isgood_final(): self.done = True self.good = True return True else: self.done = True self.good = False return False for row in range(0,9): for col in range(0,9): if self.sudoku[row][col] == "": numbers = self.get_numbers(row,col) if len(numbers) == 1: changed = True # changed! self.sudoku[row][col] = numbers[0] elif len(numbers) == 0: # got into deadlock self.done = True self.good = False return False elif self.split_mode != False and len(numbers) >= 2: changed = True # changed! if self.split_mode == 1 and \ len(numbers) < self.split_numbers: self.split_numbers = len(numbers) elif self.split_mode == 2 and \ len(numbers) == self.split_numbers: # prepare for splitting self.numbers = numbers self.row = row self.col = col self.done = True self.good = False self.split_request = True return True if self.split_mode == 1: self.split_mode = 2 if changed == False: # if nothing has been solved in this round if self.isgood(): self.split_mode = 1 # turns split mode on else: # give up if sudoku is faulty self.done = True self.good = False return False return True
Tomin1/sudoku-solver
sudoku/solver.py
Python
gpl-3.0
7,465
#!/usr/bin/env python # # Eric S. Raymond # # Greatly modified by Nigel W. Moriarty # April 2003 # from pexpect import * import os, sys import getpass import time class ssh_session: "Session with extra state including the password to be used." def __init__(self, user, host, password=None, verbose=0): self.user = user self.host = host self.verbose = verbose self.password = password self.keys = [ 'authenticity', 'assword:', '@@@@@@@@@@@@', 'Command not found.', EOF, ] self.f = open('ssh.out','w') def __repr__(self): outl = 'class :'+self.__class__.__name__ for attr in self.__dict__: if attr == 'password': outl += '\n\t'+attr+' : '+'*'*len(self.password) else: outl += '\n\t'+attr+' : '+str(getattr(self, attr)) return outl def __exec(self, command): "Execute a command on the remote host. Return the output." child = spawn(command, #timeout=10, ) if self.verbose: sys.stderr.write("-> " + command + "\n") seen = child.expect(self.keys) self.f.write(str(child.before) + str(child.after)+'\n') if seen == 0: child.sendline('yes') seen = child.expect(self.keys) if seen == 1: if not self.password: self.password = getpass.getpass('Remote password: ') child.sendline(self.password) child.readline() time.sleep(5) # Added to allow the background running of remote process if not child.isalive(): seen = child.expect(self.keys) if seen == 2: lines = child.readlines() self.f.write(lines) if self.verbose: sys.stderr.write("<- " + child.before + "|\n") try: self.f.write(str(child.before) + str(child.after)+'\n') except: pass self.f.close() return child.before def ssh(self, command): return self.__exec("ssh -l %s %s \"%s\"" \ % (self.user,self.host,command)) def scp(self, src, dst): return self.__exec("scp %s %s@%s:%s" \ % (src, session.user, session.host, dst)) def exists(self, file): "Retrieve file permissions of specified remote file." seen = self.ssh("/bin/ls -ld %s" % file) if string.find(seen, "No such file") > -1: return None # File doesn't exist else: return seen.split()[0] # Return permission field of listing.
Alberto-Beralix/Beralix
i386-squashfs-root/usr/share/doc/python-pexpect/examples/ssh_session.py
Python
gpl-3.0
2,839
from .models import * from django.db.models import Count, F, Max, Sum # Game info services def get_median_points(game_id): return median_value(Team.objects.filter(play__game__id=game_id) \ .exclude(play__team__points__isnull=True), 'play__team__points') def get_play_count(game_id): return Play.objects.filter(game__id=game_id).count() def get_game_list(): games = Game.objects.all() \ .annotate(plays=Count('play__id')) \ .annotate(last_played=Max('play__date')) \ .order_by('-plays') return games def get_game_players(game_id): play_players = Play.objects.filter(game__id=game_id) \ .annotate(name=F('team__players__name')) \ .annotate(winner=F('team__winner')) \ .annotate(pid=F('team__players__id')) \ .values('id', 'name', 'pid', 'winner').distinct() return Player.objects.raw( '''SELECT pid, name, COUNT(DISTINCT id) AS 'count', SUM(winner) AS 'wins' FROM ( %s ) GROUP BY pid ORDER BY COUNT(id) DESC, SUM(winner) DESC''' % str(play_players.query), translations={'pid': 'id'}) def get_faction_plays(game_id): return Faction.objects.filter(game__id=game_id) \ .values('name') \ .annotate(wins=Sum('team__winner')) \ .annotate(count=Count('name')) \ .order_by('-count', '-wins') # Player info services def get_player_games(player_id): plays_ids = get_play_ids(player_id) return Game.objects.filter(play__id__in=plays_ids) \ .extra(select={ 'wins': 'SELECT COUNT(DISTINCT bgplays_team.play_id) FROM bgplays_team ' 'INNER JOIN bgplays_team_players ON bgplays_team.id = bgplays_team_players.team_id ' 'INNER JOIN bgplays_play ON bgplays_play.id = bgplays_team.play_id ' 'WHERE winner = 1 ' 'AND bgplays_play.game_id = bgplays_game.id ' 'AND bgplays_team_players.player_id = % s' % player_id}, ) \ .values('name', 'id', 'wins') \ .annotate(count=Count('name')) \ .order_by('-count') def get_player_list(): # The plays calculation takes a lot. # TODO: We should find another way to fetch them. players = Player.objects.all() \ .annotate(last_played=Max('team__play__date')) \ .extra(select={ 'plays': 'SELECT COUNT(DISTINCT bgplays_team.play_id) FROM bgplays_team ' 'INNER JOIN bgplays_team_players ON bgplays_team.id = bgplays_team_players.team_id ' 'WHERE bgplays_team_players.player_id = bgplays_player.id'}, ) \ .order_by('-plays', '-last_played') return players def get_player_mates(player_id): # XXX: That distinct() is probably not working as expected # But there are not counterexamples in the current data set plays_ids = get_play_ids(player_id) return Player.objects.filter(team__play__id__in=plays_ids) \ .exclude(id=player_id) \ .values('name', 'team__play__id') \ .distinct() \ .values('name') \ .annotate(count=Count('name')) \ .order_by('-count') # Helper methods def get_play_ids(player_id): return Play.objects.filter(team__players__id=player_id).values('id').distinct() def median_value(queryset, term): count = queryset.count() values = queryset.values_list(term, flat=True).order_by(term) if count % 2 == 1: return values[int(round(count / 2))] elif count > 0: return sum(values[count / 2 - 1:count / 2 + 1]) / 2.0 else: return 0
Alberdi/marcestats
bgplays/services.py
Python
gpl-3.0
3,592
import sys import smtplib def output((code, msg)): sys.stdout.write('%s %s\n' % (code, msg)) sys.stdout.flush() smtp = smtplib.SMTP('localhost', 2500) output(smtp.ehlo('moon.localdomain')) print smtp.esmtp_features output(smtp.mail('Damien Churchill <damoxc@gmail.com>')) output(smtp.rcpt('Damien Churchill <damoxc@damoxc.net>')) output(smtp.data('Subject: Testing\n\nTest')) output(smtp.quit())
damoxc/vsmtpd
test_smtp.py
Python
gpl-3.0
406
# -*- coding: utf-8 -*- ## ## ## This file is part of Indico. ## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN). ## ## Indico is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 3 of the ## License, or (at your option) any later version. ## ## Indico is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Indico;if not, see <http://www.gnu.org/licenses/>. from indico.core.db import DBMgr from MaKaC import user from MaKaC.common.indexes import IndexesHolder """ Generates a file with all the avatars that are not well indexed by name. """ DBMgr.getInstance().startRequest() error = False ah = user.AvatarHolder() ni=IndexesHolder()._getIdx()["name"] log = file('names_ids.txt','w') lines = [] for uid, user in ah._getIdx().iteritems(): for word in ni._words: if uid in ni._words[word] and word != user.getName(): lines.append(uid + "-" + user.getName() + "-" + word) log.writelines("\n".join(lines)) log.close() if not error: DBMgr.getInstance().endRequest() print "No error. The change are saved" else: print "There were errors. The changes was not saved"
Ictp/indico
bin/utils/getNameBadIndexed.py
Python
gpl-3.0
1,515
# Author: Nic Wolfe <nic@wolfeden.ca> # URL: https://sickrage.tv # Git: https://github.com/SickRage/SickRage.git # # This file is part of SickRage. # # SickRage is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # SickRage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with SickRage. If not, see <http://www.gnu.org/licenses/>. import os.path import sickbeard from sickbeard import helpers, logger from sickbeard.metadata.generic import GenericMetadata from sickrage.helper.encoding import ek from sickrage.helper.exceptions import ShowDirectoryNotFoundException from hachoir_parser import createParser from hachoir_metadata import extractMetadata from hachoir_core.log import log log.use_print = False class ImageCache: def __init__(self): pass def __del__(self): pass def _cache_dir(self): """ Builds up the full path to the image cache directory """ return ek(os.path.abspath, ek(os.path.join, sickbeard.CACHE_DIR, 'images')) def _thumbnails_dir(self): """ Builds up the full path to the thumbnails image cache directory """ return ek(os.path.abspath, ek(os.path.join, self._cache_dir(), 'thumbnails')) def poster_path(self, indexer_id): """ Builds up the path to a poster cache for a given Indexer ID :param indexer_id: ID of the show to use in the file name :return: a full path to the cached poster file for the given Indexer ID """ poster_file_name = str(indexer_id) + '.poster.jpg' return ek(os.path.join, self._cache_dir(), poster_file_name) def banner_path(self, indexer_id): """ Builds up the path to a banner cache for a given Indexer ID :param indexer_id: ID of the show to use in the file name :return: a full path to the cached banner file for the given Indexer ID """ banner_file_name = str(indexer_id) + '.banner.jpg' return ek(os.path.join, self._cache_dir(), banner_file_name) def fanart_path(self, indexer_id): """ Builds up the path to a fanart cache for a given Indexer ID :param indexer_id: ID of the show to use in the file name :return: a full path to the cached fanart file for the given Indexer ID """ fanart_file_name = str(indexer_id) + '.fanart.jpg' return ek(os.path.join, self._cache_dir(), fanart_file_name) def poster_thumb_path(self, indexer_id): """ Builds up the path to a poster thumb cache for a given Indexer ID :param indexer_id: ID of the show to use in the file name :return: a full path to the cached poster thumb file for the given Indexer ID """ posterthumb_file_name = str(indexer_id) + '.poster.jpg' return ek(os.path.join, self._thumbnails_dir(), posterthumb_file_name) def banner_thumb_path(self, indexer_id): """ Builds up the path to a banner thumb cache for a given Indexer ID :param indexer_id: ID of the show to use in the file name :return: a full path to the cached banner thumb file for the given Indexer ID """ bannerthumb_file_name = str(indexer_id) + '.banner.jpg' return ek(os.path.join, self._thumbnails_dir(), bannerthumb_file_name) def has_poster(self, indexer_id): """ Returns true if a cached poster exists for the given Indexer ID """ poster_path = self.poster_path(indexer_id) logger.log(u"Checking if file " + str(poster_path) + " exists", logger.DEBUG) return ek(os.path.isfile, poster_path) def has_banner(self, indexer_id): """ Returns true if a cached banner exists for the given Indexer ID """ banner_path = self.banner_path(indexer_id) logger.log(u"Checking if file " + str(banner_path) + " exists", logger.DEBUG) return ek(os.path.isfile, banner_path) def has_fanart(self, indexer_id): """ Returns true if a cached fanart exists for the given Indexer ID """ fanart_path = self.fanart_path(indexer_id) logger.log(u"Checking if file " + str(fanart_path) + " exists", logger.DEBUG) return ek(os.path.isfile, fanart_path) def has_poster_thumbnail(self, indexer_id): """ Returns true if a cached poster thumbnail exists for the given Indexer ID """ poster_thumb_path = self.poster_thumb_path(indexer_id) logger.log(u"Checking if file " + str(poster_thumb_path) + " exists", logger.DEBUG) return ek(os.path.isfile, poster_thumb_path) def has_banner_thumbnail(self, indexer_id): """ Returns true if a cached banner exists for the given Indexer ID """ banner_thumb_path = self.banner_thumb_path(indexer_id) logger.log(u"Checking if file " + str(banner_thumb_path) + " exists", logger.DEBUG) return ek(os.path.isfile, banner_thumb_path) BANNER = 1 POSTER = 2 BANNER_THUMB = 3 POSTER_THUMB = 4 FANART = 5 def which_type(self, path): """ Analyzes the image provided and attempts to determine whether it is a poster or banner. :param path: full path to the image :return: BANNER, POSTER if it concluded one or the other, or None if the image was neither (or didn't exist) """ if not ek(os.path.isfile, path): logger.log(u"Couldn't check the type of " + str(path) + " cause it doesn't exist", logger.WARNING) return None # use hachoir to parse the image for us img_parser = createParser(path) img_metadata = extractMetadata(img_parser) if not img_metadata: logger.log(u"Unable to get metadata from " + str(path) + ", not using your existing image", logger.DEBUG) return None img_ratio = float(img_metadata.get('width')) / float(img_metadata.get('height')) img_parser.stream._input.close() # most posters are around 0.68 width/height ratio (eg. 680/1000) if 0.55 < img_ratio < 0.8: return self.POSTER # most banners are around 5.4 width/height ratio (eg. 758/140) elif 5 < img_ratio < 6: return self.BANNER # most fanart are around 1.77777 width/height ratio (eg. 1280/720 and 1920/1080) elif 1.7 < img_ratio < 1.8: return self.FANART else: logger.log(u"Image has size ratio of " + str(img_ratio) + ", unknown type", logger.WARNING) return None def _cache_image_from_file(self, image_path, img_type, indexer_id): """ Takes the image provided and copies it to the cache folder :param image_path: path to the image we're caching :param img_type: BANNER or POSTER or FANART :param indexer_id: id of the show this image belongs to :return: bool representing success """ # generate the path based on the type & indexer_id if img_type == self.POSTER: dest_path = self.poster_path(indexer_id) elif img_type == self.BANNER: dest_path = self.banner_path(indexer_id) elif img_type == self.FANART: dest_path = self.fanart_path(indexer_id) else: logger.log(u"Invalid cache image type: " + str(img_type), logger.ERROR) return False # make sure the cache folder exists before we try copying to it if not ek(os.path.isdir, self._cache_dir()): logger.log(u"Image cache dir didn't exist, creating it at " + str(self._cache_dir())) ek(os.makedirs, self._cache_dir()) if not ek(os.path.isdir, self._thumbnails_dir()): logger.log(u"Thumbnails cache dir didn't exist, creating it at " + str(self._thumbnails_dir())) ek(os.makedirs, self._thumbnails_dir()) logger.log(u"Copying from " + image_path + " to " + dest_path) helpers.copyFile(image_path, dest_path) return True def _cache_image_from_indexer(self, show_obj, img_type): """ Retrieves an image of the type specified from indexer and saves it to the cache folder :param show_obj: TVShow object that we want to cache an image for :param img_type: BANNER or POSTER or FANART :return: bool representing success """ # generate the path based on the type & indexer_id if img_type == self.POSTER: img_type_name = 'poster' dest_path = self.poster_path(show_obj.indexerid) elif img_type == self.BANNER: img_type_name = 'banner' dest_path = self.banner_path(show_obj.indexerid) elif img_type == self.POSTER_THUMB: img_type_name = 'poster_thumb' dest_path = self.poster_thumb_path(show_obj.indexerid) elif img_type == self.BANNER_THUMB: img_type_name = 'banner_thumb' dest_path = self.banner_thumb_path(show_obj.indexerid) elif img_type == self.FANART: img_type_name = 'fanart' dest_path = self.fanart_path(show_obj.indexerid) else: logger.log(u"Invalid cache image type: " + str(img_type), logger.ERROR) return False # retrieve the image from indexer using the generic metadata class # TODO: refactor metadata_generator = GenericMetadata() img_data = metadata_generator._retrieve_show_image(img_type_name, show_obj) result = metadata_generator._write_image(img_data, dest_path) return result def fill_cache(self, show_obj): """ Caches all images for the given show. Copies them from the show dir if possible, or downloads them from indexer if they aren't in the show dir. :param show_obj: TVShow object to cache images for """ logger.log(u"Checking if we need any cache images for show " + str(show_obj.indexerid), logger.DEBUG) # check if the images are already cached or not need_images = {self.POSTER: not self.has_poster(show_obj.indexerid), self.BANNER: not self.has_banner(show_obj.indexerid), self.POSTER_THUMB: not self.has_poster_thumbnail(show_obj.indexerid), self.BANNER_THUMB: not self.has_banner_thumbnail(show_obj.indexerid), self.FANART: not self.has_fanart(show_obj.indexerid)} if not need_images[self.POSTER] and not need_images[self.BANNER] and not need_images[self.POSTER_THUMB] and not \ need_images[self.BANNER_THUMB] and not need_images[self.FANART]: logger.log(u"No new cache images needed, not retrieving new ones", logger.DEBUG) return # check the show dir for poster or banner images and use them if need_images[self.POSTER] or need_images[self.BANNER] or need_images[self.FANART]: try: for cur_provider in sickbeard.metadata_provider_dict.values(): logger.log(u"Checking if we can use the show image from the " + cur_provider.name + " metadata", logger.DEBUG) if ek(os.path.isfile, cur_provider.get_poster_path(show_obj)): cur_file_name = os.path.abspath(cur_provider.get_poster_path(show_obj)) cur_file_type = self.which_type(cur_file_name) if cur_file_type == None: logger.log(u"Unable to retrieve image type, not using the image from " + str(cur_file_name), logger.WARNING) continue logger.log(u"Checking if image " + cur_file_name + " (type " + str( cur_file_type) + " needs metadata: " + str(need_images[cur_file_type]), logger.DEBUG) if cur_file_type in need_images and need_images[cur_file_type]: logger.log( u"Found an image in the show dir that doesn't exist in the cache, caching it: " + cur_file_name + ", type " + str( cur_file_type), logger.DEBUG) self._cache_image_from_file(cur_file_name, cur_file_type, show_obj.indexerid) need_images[cur_file_type] = False except ShowDirectoryNotFoundException: logger.log(u"Unable to search for images in show dir because it doesn't exist", logger.WARNING) # download from indexer for missing ones for cur_image_type in [self.POSTER, self.BANNER, self.POSTER_THUMB, self.BANNER_THUMB,self.FANART]: logger.log(u"Seeing if we still need an image of type " + str(cur_image_type) + ": " + str( need_images[cur_image_type]), logger.DEBUG) if cur_image_type in need_images and need_images[cur_image_type]: self._cache_image_from_indexer(show_obj, cur_image_type) logger.log(u"Done cache check")
badloop/SickRage
sickbeard/image_cache.py
Python
gpl-3.0
13,572
#------------------------------------------------------------ # -*- coding: utf-8 -*- #------------------------------------------------------------ # Download Tools # Based on the code from VideoMonkey XBMC Plugin #------------------------------------------------------------ # pelisalacarta # http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/ #------------------------------------------------------------ # Creado por: # Jesús (tvalacarta@gmail.com) # jurrabi (jurrabi@gmail.com) # bandavi (xbandavix@gmail.com) # Licencia: GPL (http://www.gnu.org/licenses/gpl-3.0.html) #------------------------------------------------------------ # Historial de cambios: #------------------------------------------------------------ import urlparse,urllib2,urllib import time import os import config import logger import re import downloadtools import socket logger.info("[scrapertools.py] init") # True - Muestra las cabeceras HTTP en el log # False - No las muestra DEBUG_LEVEL = True CACHE_ACTIVA = "0" # Automatica CACHE_SIEMPRE = "1" # Cachear todo CACHE_NUNCA = "2" # No cachear nada CACHE_PATH = config.get_setting("cache.dir") logger.info("[scrapertools.py] CACHE_PATH="+CACHE_PATH) DEBUG = False def cache_page(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12']],modo_cache=CACHE_ACTIVA, timeout=socket.getdefaulttimeout()): return cachePage(url,post,headers,modo_cache,timeout=timeout) # TODO: (3.1) Quitar el parámetro modoCache (ahora se hace por configuración) # TODO: (3.2) Usar notación minusculas_con_underscores para funciones y variables como recomienda Python http://www.python.org/dev/peps/pep-0008/ def cachePage(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12']],modoCache=CACHE_ACTIVA, timeout=socket.getdefaulttimeout()): logger.info("[scrapertools.py] cachePage url="+url) modoCache = config.get_setting("cache.mode") ''' if config.get_platform()=="plex": from PMS import HTTP try: logger.info("url="+url) data = HTTP.Request(url) logger.info("descargada") except: data = "" logger.error("Error descargando "+url) import sys for line in sys.exc_info(): logger.error( "%s" % line ) return data ''' # CACHE_NUNCA: Siempre va a la URL a descargar # obligatorio para peticiones POST if modoCache == CACHE_NUNCA or post is not None: logger.info("[scrapertools.py] MODO_CACHE=2 (no cachear)") try: data = downloadpage(url,post,headers, timeout=timeout) except: data="" # CACHE_SIEMPRE: Siempre descarga de cache, sin comprobar fechas, excepto cuando no está elif modoCache == CACHE_SIEMPRE: logger.info("[scrapertools.py] MODO_CACHE=1 (cachear todo)") # Obtiene los handlers del fichero en la cache cachedFile, newFile = getCacheFileNames(url) # Si no hay ninguno, descarga if cachedFile == "": logger.debug("[scrapertools.py] No está en cache") # Lo descarga data = downloadpage(url,post,headers) # Lo graba en cache outfile = open(newFile,"w") outfile.write(data) outfile.flush() outfile.close() logger.info("[scrapertools.py] Grabado a " + newFile) else: logger.info("[scrapertools.py] Leyendo de cache " + cachedFile) infile = open( cachedFile ) data = infile.read() infile.close() # CACHE_ACTIVA: Descarga de la cache si no ha cambiado else: logger.info("[scrapertools.py] MODO_CACHE=0 (automática)") # Datos descargados data = "" # Obtiene los handlers del fichero en la cache cachedFile, newFile = getCacheFileNames(url) # Si no hay ninguno, descarga if cachedFile == "": logger.debug("[scrapertools.py] No está en cache") # Lo descarga data = downloadpage(url,post,headers) # Lo graba en cache outfile = open(newFile,"w") outfile.write(data) outfile.flush() outfile.close() logger.info("[scrapertools.py] Grabado a " + newFile) # Si sólo hay uno comprueba el timestamp (hace una petición if-modified-since) else: # Extrae el timestamp antiguo del nombre del fichero oldtimestamp = time.mktime( time.strptime(cachedFile[-20:-6], "%Y%m%d%H%M%S") ) logger.info("[scrapertools.py] oldtimestamp="+cachedFile[-20:-6]) logger.info("[scrapertools.py] oldtimestamp="+time.ctime(oldtimestamp)) # Hace la petición updated,data = downloadtools.downloadIfNotModifiedSince(url,oldtimestamp) # Si ha cambiado if updated: # Borra el viejo logger.debug("[scrapertools.py] Borrando "+cachedFile) os.remove(cachedFile) # Graba en cache el nuevo outfile = open(newFile,"w") outfile.write(data) outfile.flush() outfile.close() logger.info("[scrapertools.py] Grabado a " + newFile) # Devuelve el contenido del fichero de la cache else: logger.info("[scrapertools.py] Leyendo de cache " + cachedFile) infile = open( cachedFile ) data = infile.read() infile.close() return data def getCacheFileNames(url): # Obtiene el directorio de la cache para esta url siteCachePath = getSiteCachePath(url) # Obtiene el ID de la cache (md5 de la URL) cacheId = get_md5(url) logger.debug("[scrapertools.py] cacheId="+cacheId) # Timestamp actual nowtimestamp = time.strftime("%Y%m%d%H%M%S", time.localtime()) logger.debug("[scrapertools.py] nowtimestamp="+nowtimestamp) # Nombre del fichero # La cache se almacena en una estructura CACHE + URL ruta = os.path.join( siteCachePath , cacheId[:2] , cacheId[2:] ) newFile = os.path.join( ruta , nowtimestamp + ".cache" ) logger.debug("[scrapertools.py] newFile="+newFile) if not os.path.exists(ruta): os.makedirs( ruta ) # Busca ese fichero en la cache cachedFile = getCachedFile(siteCachePath,cacheId) return cachedFile, newFile # Busca ese fichero en la cache def getCachedFile(siteCachePath,cacheId): mascara = os.path.join(siteCachePath,cacheId[:2],cacheId[2:],"*.cache") logger.debug("[scrapertools.py] mascara="+mascara) import glob ficheros = glob.glob( mascara ) logger.debug("[scrapertools.py] Hay %d ficheros con ese id" % len(ficheros)) cachedFile = "" # Si hay más de uno, los borra (serán pruebas de programación) y descarga de nuevo if len(ficheros)>1: logger.debug("[scrapertools.py] Cache inválida") for fichero in ficheros: logger.debug("[scrapertools.py] Borrando "+fichero) os.remove(fichero) cachedFile = "" # Hay uno: fichero cacheado elif len(ficheros)==1: cachedFile = ficheros[0] return cachedFile def getSiteCachePath(url): # Obtiene el dominio principal de la URL dominio = urlparse.urlparse(url)[1] logger.debug("[scrapertools.py] dominio="+dominio) nombres = dominio.split(".") if len(nombres)>1: dominio = nombres[len(nombres)-2]+"."+nombres[len(nombres)-1] else: dominio = nombres[0] logger.debug("[scrapertools.py] dominio="+dominio) # Crea un directorio en la cache para direcciones de ese dominio siteCachePath = os.path.join( CACHE_PATH , dominio ) if not os.path.exists(CACHE_PATH): try: os.mkdir( CACHE_PATH ) except: logger.error("[scrapertools.py] Error al crear directorio "+CACHE_PATH) if not os.path.exists(siteCachePath): try: os.mkdir( siteCachePath ) except: logger.error("[scrapertools.py] Error al crear directorio "+siteCachePath) logger.debug("[scrapertools.py] siteCachePath="+siteCachePath) return siteCachePath def cachePage2(url,headers): logger.info("Descargando " + url) inicio = time.clock() req = urllib2.Request(url) for header in headers: logger.info(header[0]+":"+header[1]) req.add_header(header[0], header[1]) try: response = urllib2.urlopen(req) except: req = urllib2.Request(url.replace(" ","%20")) for header in headers: logger.info(header[0]+":"+header[1]) req.add_header(header[0], header[1]) response = urllib2.urlopen(req) data=response.read() response.close() fin = time.clock() logger.info("Descargado en %d segundos " % (fin-inicio+1)) ''' outfile = open(localFileName,"w") outfile.write(data) outfile.flush() outfile.close() logger.info("Grabado a " + localFileName) ''' return data def cachePagePost(url,post): logger.info("Descargando " + url) inicio = time.clock() req = urllib2.Request(url,post) req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3') try: response = urllib2.urlopen(req) except: req = urllib2.Request(url.replace(" ","%20"),post) req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3') response = urllib2.urlopen(req) data=response.read() response.close() fin = time.clock() logger.info("Descargado en %d segundos " % (fin-inicio+1)) ''' outfile = open(localFileName,"w") outfile.write(data) outfile.flush() outfile.close() logger.info("Grabado a " + localFileName) ''' return data class NoRedirectHandler(urllib2.HTTPRedirectHandler): def http_error_302(self, req, fp, code, msg, headers): infourl = urllib.addinfourl(fp, headers, req.get_full_url()) infourl.status = code infourl.code = code return infourl http_error_300 = http_error_302 http_error_301 = http_error_302 http_error_303 = http_error_302 http_error_307 = http_error_302 def downloadpage(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12']],follow_redirects=True, timeout=socket.getdefaulttimeout(), header_to_get=None): logger.info("[scrapertools.py] downloadpage") logger.info("[scrapertools.py] url="+url) if post is not None: logger.info("[scrapertools.py] post="+post) else: logger.info("[scrapertools.py] post=None") # --------------------------------- # Instala las cookies # --------------------------------- # Inicializa la librería de las cookies ficherocookies = os.path.join( config.get_setting("cookies.dir"), 'cookies.dat' ) logger.info("[scrapertools.py] ficherocookies="+ficherocookies) cj = None ClientCookie = None cookielib = None # Let's see if cookielib is available try: logger.info("[scrapertools.py] Importando cookielib") import cookielib except ImportError: logger.info("[scrapertools.py] cookielib no disponible") # If importing cookielib fails # let's try ClientCookie try: logger.info("[scrapertools.py] Importando ClientCookie") import ClientCookie except ImportError: logger.info("[scrapertools.py] ClientCookie no disponible") # ClientCookie isn't available either urlopen = urllib2.urlopen Request = urllib2.Request else: logger.info("[scrapertools.py] ClientCookie disponible") # imported ClientCookie urlopen = ClientCookie.urlopen Request = ClientCookie.Request cj = ClientCookie.MozillaCookieJar() else: logger.info("[scrapertools.py] cookielib disponible") # importing cookielib worked urlopen = urllib2.urlopen Request = urllib2.Request logger.info("[scrapertools.py] cambio en politicas") #cj = cookielib.LWPCookieJar(ficherocookies,policy=MyCookiePolicy()) #cj = cookielib.MozillaCookieJar(ficherocookies,policy=MyCookiePolicy) #cj = cookielib.FileCookieJar(ficherocookies) try: cj = cookielib.MozillaCookieJar() cj.set_policy(MyCookiePolicy()) except: import traceback logger.info(traceback.format_exc()) if cj is not None: # we successfully imported # one of the two cookie handling modules logger.info("[scrapertools.py] Hay cookies") if os.path.isfile(ficherocookies): logger.info("[scrapertools.py] Leyendo fichero cookies") # if we have a cookie file already saved # then load the cookies into the Cookie Jar try: cj.load(ficherocookies,ignore_discard=True) except: logger.info("[scrapertools.py] El fichero de cookies existe pero es ilegible, se borra") os.remove(ficherocookies) # Now we need to get our Cookie Jar # installed in the opener; # for fetching URLs if cookielib is not None: logger.info("[scrapertools.py] opener usando urllib2 (cookielib)") # if we use cookielib # then we get the HTTPCookieProcessor # and install the opener in urllib2 if not follow_redirects: opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=DEBUG_LEVEL),urllib2.HTTPCookieProcessor(cj),NoRedirectHandler()) else: opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=DEBUG_LEVEL),urllib2.HTTPCookieProcessor(cj)) urllib2.install_opener(opener) else: logger.info("[scrapertools.py] opener usando ClientCookie") # if we use ClientCookie # then we get the HTTPCookieProcessor # and install the opener in ClientCookie opener = ClientCookie.build_opener(ClientCookie.HTTPCookieProcessor(cj)) ClientCookie.install_opener(opener) # ------------------------------------------------- # Cookies instaladas, lanza la petición # ------------------------------------------------- # Contador inicio = time.clock() # Diccionario para las cabeceras txheaders = {} # Construye el request if post is None: logger.info("[scrapertools.py] petición GET") else: logger.info("[scrapertools.py] petición POST") # Añade las cabeceras logger.info("[scrapertools.py] ---------------------------") for header in headers: logger.info("[scrapertools.py] header %s=%s" % (str(header[0]),str(header[1])) ) txheaders[header[0]]=header[1] logger.info("[scrapertools.py] ---------------------------") req = Request(url, post, txheaders) try: if timeout is None: logger.info("[scrapertools.py] Peticion sin timeout") handle=urlopen(req) else: logger.info("[scrapertools.py] Peticion con timeout") #Para todas las versiones: deftimeout = socket.getdefaulttimeout() socket.setdefaulttimeout(timeout) handle=urlopen(req) socket.setdefaulttimeout(deftimeout) logger.info("[scrapertools.py] ...hecha") # Actualiza el almacén de cookies logger.info("[scrapertools.py] Grabando cookies...") cj.save(ficherocookies,ignore_discard=True) # ,ignore_expires=True logger.info("[scrapertools.py] ...hecho") # Lee los datos y cierra if handle.info().get('Content-Encoding') == 'gzip': logger.info("[scrapertools.py] gzipped") fin = inicio import StringIO data=handle.read() compressedstream = StringIO.StringIO(data) import gzip gzipper = gzip.GzipFile(fileobj=compressedstream) data = gzipper.read() gzipper.close() fin = time.clock() else: logger.info("[scrapertools.py] normal") data = handle.read() except urllib2.HTTPError,e: import traceback logger.info(traceback.format_exc()) data = e.read() #logger.info("data="+repr(data)) return data info = handle.info() logger.info("[scrapertools.py] Respuesta") logger.info("[scrapertools.py] ---------------------------") for header in info: logger.info("[scrapertools.py] "+header+"="+info[header]) # Truco para devolver el valor de un header en lugar del cuerpo entero if header_to_get is not None: if header==header_to_get: data=info[header] handle.close() logger.info("[scrapertools.py] ---------------------------") ''' # Lanza la petición try: response = urllib2.urlopen(req) # Si falla la repite sustituyendo caracteres especiales except: req = urllib2.Request(url.replace(" ","%20")) # Añade las cabeceras for header in headers: req.add_header(header[0],header[1]) response = urllib2.urlopen(req) ''' # Tiempo transcurrido fin = time.clock() logger.info("[scrapertools.py] Descargado en %d segundos " % (fin-inicio+1)) return data import cookielib class MyCookiePolicy(cookielib.DefaultCookiePolicy): def set_ok(self, cookie, request): #logger.info("set_ok Cookie "+repr(cookie)+" request "+repr(request)) #cookie.discard = False #cookie. devuelve = cookielib.DefaultCookiePolicy.set_ok(self, cookie, request) #logger.info("set_ok "+repr(devuelve)) return devuelve def return_ok(self, cookie, request): #logger.info("return_ok Cookie "+repr(cookie)+" request "+repr(request)) #cookie.discard = False devuelve = cookielib.DefaultCookiePolicy.return_ok(self, cookie, request) #logger.info("return_ok "+repr(devuelve)) return devuelve def domain_return_ok(self, domain, request): #logger.info("domain_return_ok domain "+repr(domain)+" request "+repr(request)) devuelve = cookielib.DefaultCookiePolicy.domain_return_ok(self, domain, request) #logger.info("domain_return_ok "+repr(devuelve)) return devuelve def path_return_ok(self,path, request): #logger.info("path_return_ok path "+repr(path)+" request "+repr(request)) devuelve = cookielib.DefaultCookiePolicy.path_return_ok(self, path, request) #logger.info("path_return_ok "+repr(devuelve)) return devuelve def downloadpagewithcookies(url): # --------------------------------- # Instala las cookies # --------------------------------- # Inicializa la librería de las cookies ficherocookies = os.path.join( config.get_data_path(), 'cookies.dat' ) logger.info("[scrapertools.py] Cookiefile="+ficherocookies) cj = None ClientCookie = None cookielib = None # Let's see if cookielib is available try: import cookielib except ImportError: # If importing cookielib fails # let's try ClientCookie try: import ClientCookie except ImportError: # ClientCookie isn't available either urlopen = urllib2.urlopen Request = urllib2.Request else: # imported ClientCookie urlopen = ClientCookie.urlopen Request = ClientCookie.Request cj = ClientCookie.MozillaCookieJar() else: # importing cookielib worked urlopen = urllib2.urlopen Request = urllib2.Request cj = cookielib.MozillaCookieJar() # This is a subclass of FileCookieJar # that has useful load and save methods if cj is not None: # we successfully imported # one of the two cookie handling modules if os.path.isfile(ficherocookies): # if we have a cookie file already saved # then load the cookies into the Cookie Jar try: cj.load(ficherocookies) except: logger.info("[scrapertools.py] El fichero de cookies existe pero es ilegible, se borra") os.remove(ficherocookies) # Now we need to get our Cookie Jar # installed in the opener; # for fetching URLs if cookielib is not None: # if we use cookielib # then we get the HTTPCookieProcessor # and install the opener in urllib2 opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) urllib2.install_opener(opener) else: # if we use ClientCookie # then we get the HTTPCookieProcessor # and install the opener in ClientCookie opener = ClientCookie.build_opener(ClientCookie.HTTPCookieProcessor(cj)) ClientCookie.install_opener(opener) #print "-------------------------------------------------------" theurl = url # an example url that sets a cookie, # try different urls here and see the cookie collection you can make ! #txheaders = {'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3', # 'Referer':'http://www.megavideo.com/?s=signup'} txheaders = { 'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3', 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Host':'www.meristation.com', 'Accept-Language':'es-es,es;q=0.8,en-us;q=0.5,en;q=0.3', 'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Keep-Alive':'300', 'Connection':'keep-alive'} # fake a user agent, some websites (like google) don't like automated exploration req = Request(theurl, None, txheaders) handle = urlopen(req) cj.save(ficherocookies) # save the cookies again data=handle.read() handle.close() return data def downloadpageWithoutCookies(url): logger.info("[scrapertools.py] Descargando " + url) inicio = time.clock() req = urllib2.Request(url) req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; es-ES; rv:1.9.0.14) Gecko/2009082707 Firefox/3.0.14') req.add_header('X-Requested-With','XMLHttpRequest') try: response = urllib2.urlopen(req) except: req = urllib2.Request(url.replace(" ","%20")) req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; es-ES; rv:1.9.0.14) Gecko/2009082707 Firefox/3.0.14') response = urllib2.urlopen(req) data=response.read() response.close() fin = time.clock() logger.info("[scrapertools.py] Descargado en %d segundos " % (fin-inicio+1)) return data def downloadpageGzip(url): # Inicializa la librería de las cookies ficherocookies = os.path.join( config.get_data_path(), 'cookies.dat' ) logger.info("Cookiefile="+ficherocookies) inicio = time.clock() cj = None ClientCookie = None cookielib = None # Let's see if cookielib is available try: import cookielib except ImportError: # If importing cookielib fails # let's try ClientCookie try: import ClientCookie except ImportError: # ClientCookie isn't available either urlopen = urllib2.urlopen Request = urllib2.Request else: # imported ClientCookie urlopen = ClientCookie.urlopen Request = ClientCookie.Request cj = ClientCookie.MozillaCookieJar() else: # importing cookielib worked urlopen = urllib2.urlopen Request = urllib2.Request cj = cookielib.MozillaCookieJar() # This is a subclass of FileCookieJar # that has useful load and save methods # --------------------------------- # Instala las cookies # --------------------------------- if cj is not None: # we successfully imported # one of the two cookie handling modules if os.path.isfile(ficherocookies): # if we have a cookie file already saved # then load the cookies into the Cookie Jar try: cj.load(ficherocookies) except: logger.info("[scrapertools.py] El fichero de cookies existe pero es ilegible, se borra") os.remove(ficherocookies) # Now we need to get our Cookie Jar # installed in the opener; # for fetching URLs if cookielib is not None: # if we use cookielib # then we get the HTTPCookieProcessor # and install the opener in urllib2 opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj)) urllib2.install_opener(opener) else: # if we use ClientCookie # then we get the HTTPCookieProcessor # and install the opener in ClientCookie opener = ClientCookie.build_opener(ClientCookie.HTTPCookieProcessor(cj)) ClientCookie.install_opener(opener) #print "-------------------------------------------------------" theurl = url # an example url that sets a cookie, # try different urls here and see the cookie collection you can make ! #txheaders = {'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3', # 'Referer':'http://www.megavideo.com/?s=signup'} import httplib parsedurl = urlparse.urlparse(url) logger.info("parsedurl="+str(parsedurl)) txheaders = { 'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3', 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language':'es-es,es;q=0.8,en-us;q=0.5,en;q=0.3', 'Accept-Charset':'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept-Encoding':'gzip,deflate', 'Keep-Alive':'300', 'Connection':'keep-alive', 'Referer':parsedurl[0]+"://"+parsedurl[1]} logger.info(str(txheaders)) # fake a user agent, some websites (like google) don't like automated exploration req = Request(theurl, None, txheaders) handle = urlopen(req) cj.save(ficherocookies) # save the cookies again data=handle.read() handle.close() fin = time.clock() logger.info("[scrapertools.py] Descargado 'Gzipped data' en %d segundos " % (fin-inicio+1)) # Descomprime el archivo de datos Gzip try: fin = inicio import StringIO compressedstream = StringIO.StringIO(data) import gzip gzipper = gzip.GzipFile(fileobj=compressedstream) data1 = gzipper.read() gzipper.close() fin = time.clock() logger.info("[scrapertools.py] 'Gzipped data' descomprimido en %d segundos " % (fin-inicio+1)) return data1 except: return data def printMatches(matches): i = 0 for match in matches: logger.info("[scrapertools.py] %d %s" % (i , match)) i = i + 1 def get_match(data,patron,index=0): matches = re.findall( patron , data , flags=re.DOTALL ) return matches[index] def find_single_match(data,patron,index=0): try: matches = re.findall( patron , data , flags=re.DOTALL ) return matches[index] except: return "" def entityunescape(cadena): return unescape(cadena) def unescape(text): """Removes HTML or XML character references and entities from a text string. keep &amp;, &gt;, &lt; in the source code. from Fredrik Lundh http://effbot.org/zone/re-sub.htm#unescape-html """ def fixup(m): text = m.group(0) if text[:2] == "&#": # character reference try: if text[:3] == "&#x": return unichr(int(text[3:-1], 16)).encode("utf-8") else: return unichr(int(text[2:-1])).encode("utf-8") except ValueError: logger.info("error de valor") pass else: # named entity try: ''' if text[1:-1] == "amp": text = "&amp;amp;" elif text[1:-1] == "gt": text = "&amp;gt;" elif text[1:-1] == "lt": text = "&amp;lt;" else: print text[1:-1] text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]).encode("utf-8") ''' import htmlentitydefs text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]).encode("utf-8") except KeyError: logger.info("keyerror") pass except: pass return text # leave as is return re.sub("&#?\w+;", fixup, text) # Convierte los codigos html "&ntilde;" y lo reemplaza por "ñ" caracter unicode utf-8 def decodeHtmlentities(string): string = entitiesfix(string) entity_re = re.compile("&(#?)(\d{1,5}|\w{1,8});") def substitute_entity(match): from htmlentitydefs import name2codepoint as n2cp ent = match.group(2) if match.group(1) == "#": return unichr(int(ent)).encode('utf-8') else: cp = n2cp.get(ent) if cp: return unichr(cp).encode('utf-8') else: return match.group() return entity_re.subn(substitute_entity, string)[0] def entitiesfix(string): # Las entidades comienzan siempre con el símbolo & , y terminan con un punto y coma ( ; ). string = string.replace("&aacute","&aacute;") string = string.replace("&eacute","&eacute;") string = string.replace("&iacute","&iacute;") string = string.replace("&oacute","&oacute;") string = string.replace("&uacute","&uacute;") string = string.replace("&Aacute","&Aacute;") string = string.replace("&Eacute","&Eacute;") string = string.replace("&Iacute","&Iacute;") string = string.replace("&Oacute","&Oacute;") string = string.replace("&Uacute","&Uacute;") string = string.replace("&uuml" ,"&uuml;") string = string.replace("&Uuml" ,"&Uuml;") string = string.replace("&ntilde","&ntilde;") string = string.replace("&#191" ,"&#191;") string = string.replace("&#161" ,"&#161;") string = string.replace(";;" ,";") return string def htmlclean(cadena): cadena = cadena.replace("<center>","") cadena = cadena.replace("</center>","") cadena = cadena.replace("<cite>","") cadena = cadena.replace("</cite>","") cadena = cadena.replace("<em>","") cadena = cadena.replace("</em>","") cadena = cadena.replace("<b>","") cadena = cadena.replace("</b>","") cadena = cadena.replace("<u>","") cadena = cadena.replace("</u>","") cadena = cadena.replace("<li>","") cadena = cadena.replace("</li>","") cadena = cadena.replace("<tbody>","") cadena = cadena.replace("</tbody>","") cadena = cadena.replace("<tr>","") cadena = cadena.replace("</tr>","") cadena = cadena.replace("<![CDATA[","") cadena = cadena.replace("<Br />","") cadena = cadena.replace("<BR />","") cadena = cadena.replace("<Br>","") cadena = re.compile("<script.*?</script>",re.DOTALL).sub("",cadena) cadena = re.compile("<option[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</option>","") cadena = re.compile("<i[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</iframe>","") cadena = cadena.replace("</i>","") cadena = re.compile("<table[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</table>","") cadena = re.compile("<td[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</td>","") cadena = re.compile("<div[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</div>","") cadena = re.compile("<dd[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</dd>","") cadena = re.compile("<font[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</font>","") cadena = re.compile("<strong[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</strong>","") cadena = re.compile("<small[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</small>","") cadena = re.compile("<span[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</span>","") cadena = re.compile("<a[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</a>","") cadena = re.compile("<p[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</p>","") cadena = re.compile("<ul[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</ul>","") cadena = re.compile("<h1[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</h1>","") cadena = re.compile("<h2[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</h2>","") cadena = re.compile("<h3[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</h3>","") cadena = re.compile("<h4[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</h4>","") cadena = re.compile("<!--[^-]+-->",re.DOTALL).sub("",cadena) cadena = re.compile("<img[^>]*>",re.DOTALL).sub("",cadena) cadena = re.compile("<br[^>]*>",re.DOTALL).sub("",cadena) cadena = re.compile("<object[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</object>","") cadena = re.compile("<param[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</param>","") cadena = re.compile("<embed[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</embed>","") cadena = re.compile("<title[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("</title>","") cadena = re.compile("<link[^>]*>",re.DOTALL).sub("",cadena) cadena = cadena.replace("\t","") cadena = entityunescape(cadena) return cadena def slugify(title): #print title # Sustituye acentos y eñes title = title.replace("Á","a") title = title.replace("É","e") title = title.replace("Í","i") title = title.replace("Ó","o") title = title.replace("Ú","u") title = title.replace("á","a") title = title.replace("é","e") title = title.replace("í","i") title = title.replace("ó","o") title = title.replace("ú","u") title = title.replace("À","a") title = title.replace("È","e") title = title.replace("Ì","i") title = title.replace("Ò","o") title = title.replace("Ù","u") title = title.replace("à","a") title = title.replace("è","e") title = title.replace("ì","i") title = title.replace("ò","o") title = title.replace("ù","u") title = title.replace("ç","c") title = title.replace("Ç","C") title = title.replace("Ñ","n") title = title.replace("ñ","n") title = title.replace("/","-") title = title.replace("&amp;","&") # Pasa a minúsculas title = title.lower().strip() # Elimina caracteres no válidos validchars = "abcdefghijklmnopqrstuvwxyz1234567890- " title = ''.join(c for c in title if c in validchars) # Sustituye espacios en blanco duplicados y saltos de línea title = re.compile("\s+",re.DOTALL).sub(" ",title) # Sustituye espacios en blanco por guiones title = re.compile("\s",re.DOTALL).sub("-",title.strip()) # Sustituye espacios en blanco duplicados y saltos de línea title = re.compile("\-+",re.DOTALL).sub("-",title) # Arregla casos especiales if title.startswith("-"): title = title [1:] if title=="": title = "-"+str(time.time()) return title def remove_show_from_title(title,show): #print slugify(title)+" == "+slugify(show) # Quita el nombre del programa del título if slugify(title).startswith(slugify(show)): # Convierte a unicode primero, o el encoding se pierde title = unicode(title,"utf-8","replace") show = unicode(show,"utf-8","replace") title = title[ len(show) : ].strip() if title.startswith("-"): title = title[ 1: ].strip() if title=="": title = str( time.time() ) # Vuelve a utf-8 title = title.encode("utf-8","ignore") show = show.encode("utf-8","ignore") return title def getRandom(str): return get_md5(str) def getLocationHeaderFromResponse(url): return get_header_from_response(url,header_to_get="location") def get_header_from_response(url,header_to_get="",post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12']]): header_to_get = header_to_get.lower() logger.info("[scrapertools.py] get_header_from_response url="+url+", header_to_get="+header_to_get) if post is not None: logger.info("[scrapertools.py] post="+post) else: logger.info("[scrapertools.py] post=None") # Inicializa la librería de las cookies ficherocookies = os.path.join( config.get_setting("cookies.dir"), 'cookies.dat' ) logger.info("[scrapertools.py] ficherocookies="+ficherocookies) cj = None ClientCookie = None cookielib = None import cookielib # importing cookielib worked urlopen = urllib2.urlopen Request = urllib2.Request cj = cookielib.MozillaCookieJar() # This is a subclass of FileCookieJar # that has useful load and save methods if os.path.isfile(ficherocookies): logger.info("[scrapertools.py] Leyendo fichero cookies") # if we have a cookie file already saved # then load the cookies into the Cookie Jar try: cj.load(ficherocookies) except: logger.info("[scrapertools.py] El fichero de cookies existe pero es ilegible, se borra") os.remove(ficherocookies) if header_to_get=="location": opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=DEBUG_LEVEL),urllib2.HTTPCookieProcessor(cj),NoRedirectHandler()) else: opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=DEBUG_LEVEL),urllib2.HTTPCookieProcessor(cj)) urllib2.install_opener(opener) # Contador inicio = time.clock() # Diccionario para las cabeceras txheaders = {} # Traza la peticion if post is None: logger.info("[scrapertools.py] petición GET") else: logger.info("[scrapertools.py] petición POST") # Login y password Filenium # http://abcd%40gmail.com:mipass@filenium.com/get/Oi8vd3d3/LmZpbGVz/ZXJ2ZS5j/b20vZmls/ZS9kTnBL/dm11/b0/?.zip if "filenium" in url: from servers import filenium url , authorization_header = filenium.extract_authorization_header(url) headers.append( [ "Authorization",authorization_header ] ) # Array de cabeceras logger.info("[scrapertools.py] ---------------------------") for header in headers: logger.info("[scrapertools.py] header=%s" % str(header[0])) txheaders[header[0]]=header[1] logger.info("[scrapertools.py] ---------------------------") # Construye el request req = Request(url, post, txheaders) handle = urlopen(req) # Actualiza el almacén de cookies cj.save(ficherocookies) # Lee los datos y cierra #data=handle.read() info = handle.info() logger.info("[scrapertools.py] Respuesta") logger.info("[scrapertools.py] ---------------------------") location_header="" for header in info: logger.info("[scrapertools.py] "+header+"="+info[header]) if header==header_to_get: location_header=info[header] handle.close() logger.info("[scrapertools.py] ---------------------------") # Tiempo transcurrido fin = time.clock() logger.info("[scrapertools.py] Descargado en %d segundos " % (fin-inicio+1)) return location_header def get_headers_from_response(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12']]): return_headers = [] logger.info("[scrapertools.py] get_headers_from_response url="+url) if post is not None: logger.info("[scrapertools.py] post="+post) else: logger.info("[scrapertools.py] post=None") # Inicializa la librería de las cookies ficherocookies = os.path.join( config.get_setting("cookies.dir"), 'cookies.dat' ) logger.info("[scrapertools.py] ficherocookies="+ficherocookies) cj = None ClientCookie = None cookielib = None import cookielib # importing cookielib worked urlopen = urllib2.urlopen Request = urllib2.Request cj = cookielib.MozillaCookieJar() # This is a subclass of FileCookieJar # that has useful load and save methods if os.path.isfile(ficherocookies): logger.info("[scrapertools.py] Leyendo fichero cookies") # if we have a cookie file already saved # then load the cookies into the Cookie Jar try: cj.load(ficherocookies) except: logger.info("[scrapertools.py] El fichero de cookies existe pero es ilegible, se borra") os.remove(ficherocookies) opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj),NoRedirectHandler()) urllib2.install_opener(opener) # Contador inicio = time.clock() # Diccionario para las cabeceras txheaders = {} # Traza la peticion if post is None: logger.info("[scrapertools.py] petición GET") else: logger.info("[scrapertools.py] petición POST") # Array de cabeceras logger.info("[scrapertools.py] ---------------------------") for header in headers: logger.info("[scrapertools.py] header=%s" % str(header[0])) txheaders[header[0]]=header[1] logger.info("[scrapertools.py] ---------------------------") # Construye el request req = Request(url, post, txheaders) handle = urlopen(req) # Actualiza el almacén de cookies cj.save(ficherocookies) # Lee los datos y cierra #data=handle.read() info = handle.info() logger.info("[scrapertools.py] Respuesta") logger.info("[scrapertools.py] ---------------------------") location_header="" for header in info: logger.info("[scrapertools.py] "+header+"="+info[header]) return_headers.append( [header,info[header]] ) handle.close() logger.info("[scrapertools.py] ---------------------------") # Tiempo transcurrido fin = time.clock() logger.info("[scrapertools.py] Descargado en %d segundos " % (fin-inicio+1)) return return_headers def unseo(cadena): if cadena.upper().startswith("VER GRATIS LA PELICULA "): cadena = cadena[23:] elif cadena.upper().startswith("VER GRATIS PELICULA "): cadena = cadena[20:] elif cadena.upper().startswith("VER ONLINE LA PELICULA "): cadena = cadena[23:] elif cadena.upper().startswith("VER GRATIS "): cadena = cadena[11:] elif cadena.upper().startswith("VER ONLINE "): cadena = cadena[11:] elif cadena.upper().startswith("DESCARGA DIRECTA "): cadena = cadena[17:] return cadena #scrapertools.get_filename_from_url(media_url)[-4:] def get_filename_from_url(url): import urlparse parsed_url = urlparse.urlparse(url) try: filename = parsed_url.path except: # Si falla es porque la implementación de parsed_url no reconoce los atributos como "path" if len(parsed_url)>=4: filename = parsed_url[2] else: filename = "" return filename def get_domain_from_url(url): import urlparse parsed_url = urlparse.urlparse(url) try: filename = parsed_url.netloc except: # Si falla es porque la implementación de parsed_url no reconoce los atributos como "path" if len(parsed_url)>=4: filename = parsed_url[1] else: filename = "" return filename # Parses the title of a tv show episode and returns the season id + episode id in format "1x01" def get_season_and_episode(title): logger.info("get_season_and_episode('"+title+"')") patron ="(\d+)[x|X](\d+)" matches = re.compile(patron).findall(title) logger.info(str(matches)) filename=matches[0][0]+"x"+matches[0][1] logger.info("get_season_and_episode('"+title+"') -> "+filename) return filename def get_sha1(cadena): try: import hashlib devuelve = hashlib.sha1(cadena).hexdigest() except: import sha import binascii devuelve = binascii.hexlify(sha.new(cadena).digest()) return devuelve def get_md5(cadena): try: import hashlib devuelve = hashlib.md5(cadena).hexdigest() except: import md5 import binascii devuelve = binascii.hexlify(md5.new(cadena).digest()) return devuelve def read_body_and_headers(url, post=None, headers=[], follow_redirects=False, timeout=None): logger.info("read_body_and_headers "+url) if post is not None: logger.info("read_body_and_headers post="+post) if len(headers)==0: headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:18.0) Gecko/20100101 Firefox/18.0"]) # Start cookie lib ficherocookies = os.path.join( config.get_data_path(), 'cookies.dat' ) logger.info("read_body_and_headers cookies_file="+ficherocookies) cj = None ClientCookie = None cookielib = None # Let's see if cookielib is available try: logger.info("read_body_and_headers importing cookielib") import cookielib except ImportError: logger.info("read_body_and_headers cookielib no disponible") # If importing cookielib fails # let's try ClientCookie try: logger.info("read_body_and_headers importing ClientCookie") import ClientCookie except ImportError: logger.info("read_body_and_headers ClientCookie not available") # ClientCookie isn't available either urlopen = urllib2.urlopen Request = urllib2.Request else: logger.info("read_body_and_headers ClientCookie available") # imported ClientCookie urlopen = ClientCookie.urlopen Request = ClientCookie.Request cj = ClientCookie.MozillaCookieJar() else: logger.info("read_body_and_headers cookielib available") # importing cookielib worked urlopen = urllib2.urlopen Request = urllib2.Request cj = cookielib.MozillaCookieJar() # This is a subclass of FileCookieJar # that has useful load and save methods if cj is not None: # we successfully imported # one of the two cookie handling modules logger.info("read_body_and_headers Cookies enabled") if os.path.isfile(ficherocookies): logger.info("read_body_and_headers Reading cookie file") # if we have a cookie file already saved # then load the cookies into the Cookie Jar try: cj.load(ficherocookies) except: logger.info("read_body_and_headers Wrong cookie file, deleting...") os.remove(ficherocookies) # Now we need to get our Cookie Jar # installed in the opener; # for fetching URLs if cookielib is not None: logger.info("read_body_and_headers opener using urllib2 (cookielib)") # if we use cookielib # then we get the HTTPCookieProcessor # and install the opener in urllib2 if not follow_redirects: opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=DEBUG_LEVEL),urllib2.HTTPCookieProcessor(cj),NoRedirectHandler()) else: opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=DEBUG_LEVEL),urllib2.HTTPCookieProcessor(cj)) urllib2.install_opener(opener) else: logger.info("read_body_and_headers opener using ClientCookie") # if we use ClientCookie # then we get the HTTPCookieProcessor # and install the opener in ClientCookie opener = ClientCookie.build_opener(ClientCookie.HTTPCookieProcessor(cj)) ClientCookie.install_opener(opener) # ------------------------------------------------- # Cookies instaladas, lanza la petición # ------------------------------------------------- # Contador inicio = time.clock() # Diccionario para las cabeceras txheaders = {} # Construye el request if post is None: logger.info("read_body_and_headers GET request") else: logger.info("read_body_and_headers POST request") # Añade las cabeceras logger.info("read_body_and_headers ---------------------------") for header in headers: logger.info("read_body_and_headers header %s=%s" % (str(header[0]),str(header[1])) ) txheaders[header[0]]=header[1] logger.info("read_body_and_headers ---------------------------") req = Request(url, post, txheaders) if timeout is None: handle=urlopen(req) else: #Disponible en python 2.6 en adelante --> handle = urlopen(req, timeout=timeout) #Para todas las versiones: try: import socket deftimeout = socket.getdefaulttimeout() socket.setdefaulttimeout(timeout) handle=urlopen(req) socket.setdefaulttimeout(deftimeout) except: import sys for line in sys.exc_info(): logger.info( "%s" % line ) # Actualiza el almacén de cookies cj.save(ficherocookies) # Lee los datos y cierra if handle.info().get('Content-Encoding') == 'gzip': buf = StringIO( handle.read()) f = gzip.GzipFile(fileobj=buf) data = f.read() else: data=handle.read() info = handle.info() logger.info("read_body_and_headers Response") returnheaders=[] logger.info("read_body_and_headers ---------------------------") for header in info: logger.info("read_body_and_headers "+header+"="+info[header]) returnheaders.append([header,info[header]]) handle.close() logger.info("read_body_and_headers ---------------------------") ''' # Lanza la petición try: response = urllib2.urlopen(req) # Si falla la repite sustituyendo caracteres especiales except: req = urllib2.Request(url.replace(" ","%20")) # Añade las cabeceras for header in headers: req.add_header(header[0],header[1]) response = urllib2.urlopen(req) ''' # Tiempo transcurrido fin = time.clock() logger.info("read_body_and_headers Downloaded in %d seconds " % (fin-inicio+1)) logger.info("read_body_and_headers body="+data) return data,returnheaders
golaizola/pelisalacarta-xbmc
core/scrapertools.py
Python
gpl-3.0
52,267
from pypwrctrl.pypwrctrl import Plug, PlugDevice, PlugMaster
Innovailable/pypwrctrl
pypwrctrl/__init__.py
Python
gpl-3.0
61
import logging import re from datetime import datetime from typing import List, Any, Dict, Union, Pattern from pastehunter.common import base62_decode, base62_encode from pastehunter.inputs.base_input import BasePasteSite logger = logging.getLogger('pastehunter') class IxDotIoSite(BasePasteSite): # Yeah, yeah, I know, no regex for HTML parsing... # If we end up doing a lot more of this, then maybe we'll use beautifulsoup or something. # Capturing groups: # 1. Paste ID # 2. Timestamp _ITEM_ID_RE: Pattern = re.compile('<div class="t">[\\sa-zA-Z0-9]+' '<a href="/(.*?)">\\[r][^\r\n]+' '\\s+@ (.*?)[\r\n]') def __init__(self, conf): self.conf = conf self.site = "ix.io" url_main = "http://" + self.site self.url_recent = url_main + "/user/" self.view_pattern = url_main + "/{}/" self.raw_pattern = url_main + "/{}" self.url = None def remap_raw_item(self, raw_item: [str, Dict]) -> Dict[str, Any]: pid = raw_item['pid'] paste_data = { # at a 'filename': str(pid), 'confname': 'ixio', 'pastesite': self.site, 'pasteid': pid, } # Timezone is UTC/Zulu date = datetime.strptime(raw_item['date'], '%a %b %d %H:%M:%S %Y').isoformat() paste_data['@timestamp'] = date encoded_pid = self.get_paste_id(paste_data) paste_data['scrape_url'] = self.raw_pattern.format(encoded_pid) return paste_data def get_paste_for_id(self, paste_id: Any) -> str: self.make_request(self.raw_pattern.format(paste_id)) def get_paste_id(self, paste_obj: Dict[str, Any]) -> str: decoded = paste_obj.get('pasteid') return base62_encode(decoded) def get_recent_items(self, input_history: List[str]): history = [] paste_list = [] try: recent_page = self.make_request(self.url_recent) item_data = self.get_data_for_page(recent_page.text) for val in item_data: # Track paste ids to prevent dupes pid = val['pid'] history.append(pid) if pid in input_history: continue paste_data = self.remap_raw_item(val) paste_list.append(paste_data) return paste_list, history except Exception as e: logger.error("Unable to parse ixio items: {0}".format(e)) return paste_list, history def get_data_for_page(self, page_data: str) -> List[Dict[str, Union[int, str]]]: page: List[Dict[str, Union[int, str]]] = [] last_item_id = -1 regex_matches = self._ITEM_ID_RE.findall(page_data) # We are going to reverse the order because ix pages are structured newest -> oldest, and this makes it simpler. regex_matches.reverse() for encoded_id, created_at in regex_matches: # Okay so the logic here is a bit tricky. Basically, ix's all user page only returns anonymous pastes # BUT! We can infer the paste ids that aren't present by filling in the blanks, because ix IDs are # incremental. So first, we base62 decode the value so we can use it as an int item_id = base62_decode(encoded_id) # Then, we check if we've seen another value. If this is our first, we can skip a lot of this logic. # (we probably don't want to go back and grab every ix paste historically for most use cases) if last_item_id == -1: page.append({'pid': item_id, 'date': created_at}) last_item_id = item_id # If there has been a delta, let's traverse it. elif item_id - last_item_id > 1: # We've already hit last_item_id so we skip that and fill in the delta for i in range(last_item_id + 1, item_id + 1): # Copy the created date as a best guess page.append({'pid': i, 'date': created_at}) last_item_id = item_id else: # If there's no delta, just add this nromally page.append({'pid': item_id, 'date': created_at}) last_item_id = item_id return page def recent_pastes(conf, input_history): site = IxDotIoSite(conf) # populate vars from config return site.get_recent_items(input_history)
kevthehermit/PasteHunter
pastehunter/inputs/ixio.py
Python
gpl-3.0
4,534
""" WSGI config for pyday2016 project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pyday2016.settings") application = get_wsgi_application()
fproldan/pyday2016
pyday2016/wsgi.py
Python
gpl-3.0
396
#!/usr/bin/python """ Script: Forest, a simple Python forum script. Author: Andrew Nelis (andrew.nelis@gmail.com) OnTheWeb: http://www.triv.org.uk/~nelis/forest Date: Jun 2010 Version: 1.0.3 A Python CGI script for a basic flat-file based forum. Getting Started: * Set up your web server/place forest.py so that it is executed as a CGI script. You'll probably have to change the python path at the top of this script and chmod this script as appropriate if you're not on Windows. * Put the stylesheet forest.css somewhere where it will be served by the webserver. * Edit some of the variables below to taste. Most important of all: o DATA_PATH - Should point to a writable folder where the posts will be stored. o CSS_PATH - Specify where the stylesheet forest.css is. (There are other settings within this file, mostly self explanatory.) * Go to the appropriate URL and post away! LICENCE: Copyright (c) 2010 Andrew Nelis Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import md5 import os import time # Show any errors on the page. You might want to take this out on a live server # and look in the servers error log instead. # Removed for compatibility issues #import cgitb #cgitb.enable() # ============================================================================ # Configuration # ============================================================================ # Where the threads are stored. This folder must exist. DATA_PATH = '/opt/publicbox/share/forumspace/' #Where the forest CGI is located (as a URL). CGI_URL='/cgi-bin/forest.py' # Where the main stylesheet is kept (as a URL). CSS_PATH = '/content/css/forest.css' # What is the title of the board? BOARD_TITLE = 'PublicBox Board' # Simple Description of the board, appears at the top of each page BOARD_DESCRIPTION = """PublicBox Board. Put media reviews or questions here.<br> <A HREF="http://publicbox.lan">Click here to go back to the main site</a> """ # How dates are stored (see python time module for details) DATE_FORMAT = '%d %b %Y %H:%M:%S' # If no author name is given, then this is the default. ANON_AUTHOR = 'Anonymous Coward' # How many entries to show on the index? INDEX_PAGE_SIZE = 20 # How many entries to show on the thread page? THREAD_PAGE_SIZE = 20 # Maximum lengths for names, subjects and message bodies. # (currently we chop them off without warning) MAX_AUTHOR_LEN = 20 MAX_SUBJECT_LEN = 100 MAX_BODY_LEN = 10000 # ============================================================================ # HTML Elements. # ============================================================================ HTML_TOP = ''' <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>%s</title> <link rel="stylesheet" href="%s" type="text/css" /> <script type="text/javascript"> function show(elem_id) { elem = document.getElementById(elem_id); if (elem) elem.style.display = 'block'; } </script> </head> <body> <h3>%s</h3> <p class="board_description">%s</p> ''' % (BOARD_TITLE, CSS_PATH, BOARD_TITLE, BOARD_DESCRIPTION) HTML_BOTTOM = ''' <p class="smallprint" >Powered by the <a href="http://www.triv.org.uk/~nelis/forest">Forest Python Board</a></font> </body></html>''' HTML_THREADS_TOP = '''<table width="95%" class="threads_table"> <tr class="threads_header"> <th width="60%">Subject</th><th>Author</th><th>Date</th><th>Replies</th><th>Last Reply</th> </tr> ''' HTML_THREADS_ROW = ''' <tr class="%s"> <td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td> </tr> ''' HTML_NEW_THREAD = ''' <p><a href="javascript:show('hidden_form');">Start a new thread</a></p> <div id="hidden_form"> <form method="post" action="?new=thread"> <p>Name: <input name="author" maxlength="%s"/></p> <p>Subject: <input name="subject" maxlength="%s" size="80"/></p> <p><textarea name="body" rows="10" cols="80"></textarea></p> <p><input class="submit_button" type="submit" value="New Thread!"/></p> </form> </div> ''' % (MAX_AUTHOR_LEN, MAX_SUBJECT_LEN) HTML_NEW_REPLY = ''' <p><a href="javascript:show('hidden_form')">Reply to this thread</a></p> <div id="hidden_form"> <form method="post" action="?new=reply&amp;thread=%%s"> <p>Name: <input name="author" maxlength="%s"/></p> <p><textarea name="body" rows="10" cols="80"></textarea></p> <p><input class="submit_button" type="submit" value="Reply!"/></p> </form> </div> ''' % (MAX_AUTHOR_LEN,) HTML_THREADS_BOTTOM = '</table>' HTML_THREAD_TOP = ''' <table width="95%%" class="threads_table"> <col width="15%%" /> <col width="85%%" /> <tr><td colspan="2"><a href="?">&lt;&lt; Main</a></td></tr> <tr class="thread_header"><td colspan="2">%s</td></tr> ''' HTML_THREAD_ROW = ''' <tr class="%s"> <td valign="top"><b>%s</b><br/><small>%s</small></td> <td>%s</td> </tr> ''' HTML_THREAD_BOTTOM = ''' </table> ''' # ============================================================================ # Error messages # ============================================================================ ERR_INVALID_THREAD = '<h3 class="error">Invalid Thread Specified</h3>' ERR_NO_SUBJECT = '<h3 class="error">No Subject Given</h3>' ERR_NO_BODY = '<h3 class="error">No body text!</h3>' # ============================================================================ # Misc. globals # ============================================================================ # No need to fiddle with these though. ROW_STYLES = {0: 'thread_row', 1: 'thread_row_alt'} INDEX_FILE = os.path.join(DATA_PATH, 'index.txt') THREAD_PATH = DATA_PATH # ============================================================================ # Function definitions # ============================================================================ html_escape_table = { "&": "&amp;", '"': "&quot;", "'": "&apos;", ">": "&gt;", "<": "&lt;", ';': "&#59;", "/": "&#47;", '=': "&#61;", ":": "&#58;", '?': "&#63;", '!': "&#33;", '(': "&#40;", "{": "&#121;", "[": "&#91", "-": "&#45", } def strip_html( text ): """Remove HTML chars from the given text and replace them with HTML entities. """ return "".join(html_escape_table.get(c,c) for c in text ) def process_body(body): """Process the message body e.g. for escaping smilies, HTML etc. ready for storing. We should then just be able to print the body out""" import re # Maximum body length. new_body = strip_html( body[:MAX_BODY_LEN] ) new_body = new_body.replace('\n', '<br/>\n') # Turn (obvious) URLs into links. # new_body = url_re.sub(r'<a href="\1">\1</a>', new_body) # url_re = re.compile('(http://[\S\.]+)') return new_body.encode('string_escape') def process_author(author): """Clean the author tag""" # Remove tabs and ensure a maximum length. new_author = strip_html( author[:MAX_AUTHOR_LEN] ) return new_author.replace('\t', ' ') def process_subject(subject): """Clean the subject line""" if ( subject is not None): return subject[:MAX_SUBJECT_LEN] else: return "No Subject" def get_query_params(): """Return the URL parameters as a dictionary. Writing our own simple version means we don't have to import the cgi module for every page (which noticeably slows down page viewing). """ param_string = os.getenv('QUERY_STRING', '') params = param_string.split('&') param_dict = {} for param in params: if '=' in param: key, value = param.split('=', 1) param_dict[key] = value else: param_dict[param] = None return param_dict def is_valid_hash(hash_string): """Ensure that <hash_string> is a proper hash representing an existing thread""" # Should be a string comprising of hex digits if not hash_string.isalnum(): return False if not os.path.exists(os.path.join(THREAD_PATH, hash_string)): return False return True def get_offset(args): """Get the page offset, validating or returning 0 if None or invalid.""" offset = args.get('offset', '0') if offset.isdigit(): return int(offset) else: return 0 def update_thread(author, subject=None, key=None): """Update the thread, creating a new thread if key is None. Returns the key (hash). author - String, the name of the author. subject - String, the title of the thread. key - String, the key to an existing thread to update. If <subject> is given, then it's assumed that we're starting a new thread and if <key> is given, then we should be updating an existing thread. """ now = time.strftime(DATE_FORMAT) author = process_author(author) if key: row_hash = key else: row_hash = md5.new('%s%s%s' % (now, author, subject)).hexdigest() # Read the index of threads in. try: threads = file(INDEX_FILE, 'r').readlines() except IOError: # The file gets (re)created later on so there's no problem. threads = [] new_threads = [] # Index format: # hash, date, num_replies, last_reply, author, subject if not key: # A new thread, put at the top. new_threads.append('\t'.join( (row_hash, now, '0', '-', author, subject))) for thread in threads: if thread.startswith(row_hash): # insert the updated thread at the beginning. # (_ ignore last reply - we're setting it to now) _, date, num_replies, _, author, subject = \ thread.strip().split('\t') num_replies = str(int(num_replies) + 1) new_threads.insert(0, '\t'.join( (row_hash, date, num_replies, now, author, subject))) else: new_threads.append(thread.strip()) # Overwrite the existing index with the updated index. threads = file(INDEX_FILE, 'w') threads.write('\n'.join(new_threads)) threads.close() return row_hash def new_subject(field_storage): """Add a new subject to the list of threads. field_storage - cgi.FieldStorage instance. On success: returns <new subject hash string> On error: raises ValueError with error as message. """ author = field_storage.getfirst( 'author', ANON_AUTHOR ) subject = field_storage.getfirst( 'subject' ) body = field_storage.getfirst( 'body' ) if not subject: raise ValueError( ERR_NO_SUBJECT ) elif not body: raise ValueError( ERR_NO_BODY ) subject = strip_html(subject.replace('\t', ' ')) row_hash = update_thread( author, subject ) new_post( author, subject, body, row_hash ) return row_hash def new_post(author, subject, body, key): """Create a new post, either by creating or appending to a post file. author, subject, body, key - Strings """ author = process_author(author) subject = process_subject(subject) body = process_body(body) date = time.strftime(DATE_FORMAT) post_filename = os.path.join(THREAD_PATH, key) if not os.path.exists(post_filename): post_file = file(post_filename, 'w') print >> post_file, '%s\t%s' % (key, subject) else: post_file = file(post_filename, 'a') print >> post_file, '%s\t%s\t%s' % (date, author, body) def reply(field_storage, key): """Reply to an existing post. field_storage - A cgi.FieldStorage containing post data for the post key - String, the id of the thread we're replying to. On success: return <thread key string> On failure: raise ValueError with error message as error value. """ # Check that the thread id is valid. if not (key and is_valid_hash(key)): raise ValueError( ERR_INVALID_THREAD ) author = field_storage.getfirst( 'author', ANON_AUTHOR ) body = field_storage.getfirst( 'body' ) if not body: raise ValueError( ERR_NO_BODY ) author = author.replace('\t', ' ') update_thread(author, key=key) new_post(author, None, body, key) return key def display_paging_links( current_offset, num_items, page_length, thread=None ): """Display a list of links to go to a given page number""" pages = num_items / page_length # Any left over pages? if (num_items % page_length): pages += 1 if pages < 2: # Only one page. Don't bother showing links. return links = [] if thread: url = '?thread=%s&offset=%%d' % thread else: url = '?offset=%d' for page_number in range(pages): offset = page_number * page_length if offset != current_offset: links.append( '<a href="%s">%s</a>' % \ (url % offset, page_number + 1) ) else: links.append( str( page_number + 1 ) ) print ' | '.join(links) def list_threads(offset=0): """List the existing threads.""" if os.path.exists(INDEX_FILE): thread_file = file(INDEX_FILE, 'r') threads = thread_file.read().strip().split('\n') thread_file.close() else: threads = [] num_threads = len(threads) display_paging_links(offset, num_threads, INDEX_PAGE_SIZE) print HTML_THREADS_TOP thread_index = -1 for thread in threads[offset:offset + INDEX_PAGE_SIZE]: thread_index += 1 thread_items = thread.split('\t') if len(thread_items) != 6: continue thread_hash, date, num_replies, last_reply, author, subject = \ thread_items link = '<a href="?thread=%s">%s</a>' % (thread_hash, subject) # Date Author Subject Replies Last Reply print HTML_THREADS_ROW % (ROW_STYLES[thread_index % 2], link, author, date, num_replies, last_reply) print HTML_THREADS_BOTTOM print HTML_NEW_THREAD def list_single_thread(thread_hash, offset=0): """Output the HTMl for a given thread id""" if not is_valid_hash(thread_hash): print ERR_INVALID_THREAD return thread_file = file(os.path.join(THREAD_PATH, thread_hash), 'r') threads = thread_file.read().split('\n') thread_file.close() # The first item in the file is actually the hash and the subject. But we # don't need it really. _, subject = threads.pop(0).split('\t') num_posts = len(threads) display_paging_links(offset, num_posts, THREAD_PAGE_SIZE, thread_hash) print HTML_THREAD_TOP % subject.strip() row_index = -1 for line in threads[offset : offset + THREAD_PAGE_SIZE]: row_index += 1 split_line = line.split('\t') if len(split_line) != 3: continue date, author, body = split_line print HTML_THREAD_ROW % (ROW_STYLES[row_index % 2], author, date, body.decode('string_escape')) print HTML_THREAD_BOTTOM print HTML_NEW_REPLY % thread_hash def redirect( threadid, offset=None ): """Redirect the browser""" #new_location = os.environ.get('REQUEST_URI', '') new_location = CGI_URL new_location += '?thread=%s' % threadid if offset: new_location += '&offset=%s' % offset ## can't use standard redirect on CGIHTTPServer # print 'Status: 303 See Other' # print 'Location: %s' % new_location # print # print 'Nothing to see here, move along!' print 'Content-Type: text/html; charset=utf-8' print print '<html><head><meta http-equiv="refresh" content="0;url=%s">' % new_location print "</head></html>" def handle(): """Main entry point for our code. Handles the web request.""" query_params = get_query_params() post_error = None if query_params.has_key('new'): # We only want the whole cgi module when we need to parse POST data. import cgi form_data = cgi.FieldStorage() what = query_params['new'] if what == 'thread': try: thread_hash = new_subject(form_data) redirect( thread_hash ) return except ValueError, error: post_error = str( error ) elif what == 'reply': try: thread_hash = reply(form_data, query_params.get('thread')) # QQQ -> Get offset. redirect( thread_hash ) return except ValueError, error: post_error = str( error ) print 'Content-Type: text/html; charset=utf-8' print print HTML_TOP if post_error: print post_error # paging. offset = get_offset( query_params ) if query_params.has_key( 'thread' ): list_single_thread( query_params['thread'], offset ) else: list_threads( offset ) print HTML_BOTTOM if __name__ == '__main__': handle()
whunder/Publicbox
publicbox/src/forest.py
Python
gpl-3.0
17,871
#coding: utf-8 import numpy as np class MovieBase(): def read_from_csv(self, file_path): f = open(file_path, 'r') self.matrix = np.loadtxt(f, delimiter=',') self.shape = self.matrix.shape f.close() def get_user_unseen_films(self, user_idx): if user_idx >= self.shape[1]: raise ValueError(u"Matrix only has {} rows. Cannot access row {}".format(self.shape[0], user_idx)) return [i for i, rating in enumerate(self.matrix[user_idx]) if rating == 0] def normalize_base(self): self.user_means = self._get_user_means() print self.user_means movie_ratings = self._replace_zero_entries(self.matrix) # Replacing the 0 entries by the mean rating of the # movie of the col it is located in self.norm_matrix = self.matrix + movie_ratings # Subtracting the mean rating for each user from its row user_means = self._subtract_row_means(self.norm_matrix) self.norm_matrix += user_means def _get_user_means(self): means = [] for row in self.matrix: row_mean = np.mean(filter(lambda rating: rating != 0, row)) means.append(row_mean) #return means return self.matrix.mean(axis=1) def _subtract_row_means(self, matrix): # M is the matrix _m = np.ones(self.shape) row_means = self.matrix.mean(axis=1) for i, row in enumerate(matrix): _m *= -row_means[i] return _m def _replace_zero_entries(self, matrix): _m = np.zeros(self.shape) for j in range(self.shape[1]): col = matrix[:, j] col_mean = np.mean(filter(lambda rating: rating != 0, col)) for i, row in enumerate(col): if row == 0: _m[i, j] = col_mean return _m def factor_svd(self): self.U, self._S, self.V_t = np.linalg.svd(self.norm_matrix, full_matrices=True) #self.U, self._S, self.V_t = np.linalg.svd(self.matrix, full_matrices=True) self.S = np.diag(self._S) def low_rank_approximate(self, k): self.U_k = self.U[:,range(k)] self.S_k = np.diag(self._S[:k]) self._S_k = np.diag(self.S_k) self.Vk_t = self.V_t[range(k)] self.R_k = self.U_k.dot(self.S_k).dot(self.Vk_t) def build_aspect_matrices(self): # user-aspect = Uk * sqrt(Sk) self.user_aspect = self.U_k.dot(np.sqrt(self.S_k)) # movie-aspect = sqrt(Sk) * Vk_t self.movie_aspect = np.sqrt(self.S_k).dot(self.Vk_t) print self.user_aspect print self.movie_aspect def get_prediction(self, user, movie): rating = self.user_aspect[user].dot(self.movie_aspect[:, movie]) #+ self.user_means[user] return int(rating) def do_stuff(self): self.read_from_csv('test.csv') unseen = {} for i in range(self.shape[0]): unseen_list = self.get_user_unseen_films(user_idx=i) if unseen_list: unseen[i] = unseen_list self.normalize_base() self.factor_svd() self.low_rank_approximate(k=2) self.build_aspect_matrices() for user, movie_list in unseen.iteritems(): for movie in movie_list: prediction = self.get_prediction(user=user, movie=movie) print "Prediction for user {} and movie {}: {}".format(user, movie, prediction) # l_r_appr = np.zeros(self.shape) # for i in range(k): # l_r_appr += self._S[i] * np.dot(self.U.transpose()[i], self.V_t[:,i]) # print l_r_appr # Uk, _Sk, Vk_t = np.linalg.svd(l_r_appr, full_matrices=True) # print "Uk: {}".format(Uk) # print "_Sk: {}".format(_Sk) # print "Vk_t: {}".format(Vk_t)
luizpericolo/alc-2014-3
movie_base.py
Python
gpl-3.0
3,879
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file './QScintilla/SearchWidget.ui' # # Created: Tue Nov 18 17:53:58 2014 # by: PyQt5 UI code generator 5.3.2 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_SearchWidget(object): def setupUi(self, SearchWidget): SearchWidget.setObjectName("SearchWidget") SearchWidget.resize(973, 25) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(SearchWidget.sizePolicy().hasHeightForWidth()) SearchWidget.setSizePolicy(sizePolicy) self.horizontalLayout = QtWidgets.QHBoxLayout(SearchWidget) self.horizontalLayout.setContentsMargins(0, 0, 0, 0) self.horizontalLayout.setObjectName("horizontalLayout") self.closeButton = QtWidgets.QToolButton(SearchWidget) self.closeButton.setText("") self.closeButton.setObjectName("closeButton") self.horizontalLayout.addWidget(self.closeButton) self.label = QtWidgets.QLabel(SearchWidget) self.label.setObjectName("label") self.horizontalLayout.addWidget(self.label) self.findtextCombo = QtWidgets.QComboBox(SearchWidget) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.findtextCombo.sizePolicy().hasHeightForWidth()) self.findtextCombo.setSizePolicy(sizePolicy) self.findtextCombo.setMinimumSize(QtCore.QSize(300, 0)) self.findtextCombo.setEditable(True) self.findtextCombo.setInsertPolicy(QtWidgets.QComboBox.InsertAtTop) self.findtextCombo.setDuplicatesEnabled(False) self.findtextCombo.setObjectName("findtextCombo") self.horizontalLayout.addWidget(self.findtextCombo) self.findPrevButton = QtWidgets.QToolButton(SearchWidget) self.findPrevButton.setObjectName("findPrevButton") self.horizontalLayout.addWidget(self.findPrevButton) self.findNextButton = QtWidgets.QToolButton(SearchWidget) self.findNextButton.setObjectName("findNextButton") self.horizontalLayout.addWidget(self.findNextButton) self.caseCheckBox = QtWidgets.QCheckBox(SearchWidget) self.caseCheckBox.setObjectName("caseCheckBox") self.horizontalLayout.addWidget(self.caseCheckBox) self.wordCheckBox = QtWidgets.QCheckBox(SearchWidget) self.wordCheckBox.setObjectName("wordCheckBox") self.horizontalLayout.addWidget(self.wordCheckBox) self.regexpCheckBox = QtWidgets.QCheckBox(SearchWidget) self.regexpCheckBox.setObjectName("regexpCheckBox") self.horizontalLayout.addWidget(self.regexpCheckBox) self.wrapCheckBox = QtWidgets.QCheckBox(SearchWidget) self.wrapCheckBox.setObjectName("wrapCheckBox") self.horizontalLayout.addWidget(self.wrapCheckBox) self.selectionCheckBox = QtWidgets.QCheckBox(SearchWidget) self.selectionCheckBox.setObjectName("selectionCheckBox") self.horizontalLayout.addWidget(self.selectionCheckBox) self.retranslateUi(SearchWidget) QtCore.QMetaObject.connectSlotsByName(SearchWidget) SearchWidget.setTabOrder(self.findtextCombo, self.caseCheckBox) SearchWidget.setTabOrder(self.caseCheckBox, self.wordCheckBox) SearchWidget.setTabOrder(self.wordCheckBox, self.regexpCheckBox) SearchWidget.setTabOrder(self.regexpCheckBox, self.wrapCheckBox) SearchWidget.setTabOrder(self.wrapCheckBox, self.selectionCheckBox) SearchWidget.setTabOrder(self.selectionCheckBox, self.findNextButton) SearchWidget.setTabOrder(self.findNextButton, self.findPrevButton) SearchWidget.setTabOrder(self.findPrevButton, self.closeButton) def retranslateUi(self, SearchWidget): _translate = QtCore.QCoreApplication.translate SearchWidget.setWindowTitle(_translate("SearchWidget", "Find")) self.closeButton.setToolTip(_translate("SearchWidget", "Press to close the window")) self.label.setText(_translate("SearchWidget", "Find:")) self.findPrevButton.setToolTip(_translate("SearchWidget", "Press to find the previous occurrence")) self.findNextButton.setToolTip(_translate("SearchWidget", "Press to find the next occurrence")) self.caseCheckBox.setText(_translate("SearchWidget", "Match case")) self.wordCheckBox.setText(_translate("SearchWidget", "Whole word")) self.regexpCheckBox.setText(_translate("SearchWidget", "Regexp")) self.wrapCheckBox.setText(_translate("SearchWidget", "Wrap around")) self.selectionCheckBox.setText(_translate("SearchWidget", "Selection only"))
davy39/eric
QScintilla/Ui_SearchWidget.py
Python
gpl-3.0
4,989
from django.test import TestCase, RequestFactory from main.models import * from main.views import * from bs4 import BeautifulSoup from .base import * import datetime from feedback.models import IndividualFeedback class StatusCheckTest(TestCase): """Testing the decorator test functions""" def test_user_teacher_test_works(self): elmar = create_teacher() self.assertTrue(is_staff(elmar.user)) self.assertTrue(is_teacher(elmar.user)) self.assertFalse(is_admin(elmar.user)) self.assertFalse(is_student(elmar.user)) def test_staff_admin_status_is_properly_undertood_at_login(self): admin = create_admin() self.assertTrue(is_staff(admin.user)) self.assertFalse(is_teacher(admin.user)) self.assertTrue(is_admin(admin.user)) self.assertFalse(is_student(admin.user)) def test_student_is_student_and_neither_admin_nor_teacher(self): bugs_user = User.objects.create_user( username='bb42', password='ilovecarrots') bugs = Student.objects.create( student_id='bb42', last_name='Bunny', first_name='Bugs', user=bugs_user ) self.assertTrue(is_student(bugs_user)) self.assertFalse(is_staff(bugs_user)) self.assertFalse(is_admin(bugs_user)) self.assertFalse(is_teacher(bugs_user)) class HomePageTest(TeacherUnitTest): """Simple tests for the home page""" def test_home_page_renders_home_template(self): response = self.client.get('/') self.assertTemplateUsed(response, 'home.html') def test_home_page_title_contains_uni_name(self): response = self.client.get('/') self.assertContains(response, 'Acme University') class HomePageForStudentTest(StudentUnitTest): """Student homepage is shown""" def test_student_home_shows_student_template(self): response = self.client.get('/') self.assertTemplateUsed(response, 'student_home.html') def test_student_sees_links_to_all_marksheets(self): student = self.user.student module1 = create_module() performance1 = Performance.objects.create( student=student, module=module1) assessment1 = Assessment.objects.create( module=module1, value=50, title='Essay', available=True, resit_available=True ) assessment2 = Assessment.objects.create( module=module1, value=50, title='Exam', available=True ) assessment_result_1 = AssessmentResult.objects.create( assessment=assessment1, mark=30, resit_mark=40, ) feedback_1_1 = IndividualFeedback.objects.create( assessment_result=assessment_result_1, attempt='first', completed=True ) feedback_1_2 = IndividualFeedback.objects.create( assessment_result=assessment_result_1, attempt='resit', completed=True ) performance1.assessment_results.add(assessment_result_1) link1 = ( '<a href="/export_feedback/' + module1.code + '/' + str(module1.year) + '/' + assessment1.slug + '/' + student.student_id + '/' ) link1_1 = link1 + 'first/' link1_2 = link1 + 'resit/' assessment_result_2 = AssessmentResult.objects.create( assessment=assessment2, mark=30, resit_mark=40, ) feedback_2_1 = IndividualFeedback.objects.create( assessment_result=assessment_result_2, attempt='first', completed=True ) performance1.assessment_results.add(assessment_result_2) link2_1 = ( '<a href="/export_feedback/' + module1.code + '/' + str(module1.year) + '/' + assessment2.slug + '/' + student.student_id + '/first/' ) module2 = Module.objects.create( title="Introduction to Squaredance", year=1901, code='i2sq42' ) student.modules.add(module2) performance2 = Performance.objects.create( student=student, module=module2) assessment3 = Assessment.objects.create( module=module2, value=50, title='Essay', available=False, resit_available=False ) assessment_result_3 = AssessmentResult.objects.create( assessment=assessment3, mark=30, resit_mark=40, ) feedback_3_1 = IndividualFeedback.objects.create( assessment_result=assessment_result_3, attempt='first', completed=True ) feedback_3_2 = IndividualFeedback.objects.create( assessment_result=assessment_result_3, attempt='resit', completed=True ) performance2.assessment_results.add(assessment_result_3) link3 = ( '<a href="/export_feedback/' + module2.code + '/' + str(module2.year) + '/' + assessment3.slug + '/' + student.student_id ) link3_1 = link3 + '/first/' link3_2 = link3 + '/resit/' assessment4 = Assessment.objects.create( module=module2, value=50, title='Exam', available=False ) assessment_result_4 = AssessmentResult.objects.create( assessment=assessment4, mark=30, resit_mark=40, ) feedback_4_1 = IndividualFeedback.objects.create( assessment_result=assessment_result_4, attempt='first', completed=True ) performance2.assessment_results.add(assessment_result_4) link4_1 = ( '<a href="/export_feedback/' + module2.code + '/' + str(module2.year) + '/' + assessment2.slug + '/' + student.student_id + '/first/' ) response = self.client.get('/') self.assertContains(response, link1_1) self.assertContains(response, link1_2) self.assertContains(response, link2_1) self.assertNotContains(response, link3_1) self.assertNotContains(response, link3_2) self.assertNotContains(response, link4_1) class AdminDashboardTest(AdminUnitTest): """Checks the Admin Dashboard""" def test_admin_page_uses_right_template(self): response = self.client.get('/admin_dashboard/') self.assertNotContains(response, 'Main Settings') self.user.staff.main_admin = True self.user.staff.save() response = self.client.get('/admin_dashboard/') self.assertContains(response, 'Main Settings') def test_admin_page_shows_all_subjects_and_years_for_main_admin(self): self.user.staff.main_admin = True self.user.staff.save() subject_area_1 = SubjectArea.objects.create(name='Cartoon Studies') subject_area_2 = SubjectArea.objects.create(name='Evil Plotting') course_1 = Course.objects.create( title='BA in Cartoon Studies', short_title='Cartoon Studies', ) course_1.subject_areas.add(subject_area_1) course_2 = Course.objects.create( title='BA in Evil Plotting', short_title='Evil Plotting', ) course_2.subject_areas.add(subject_area_2) course_3 = Course.objects.create( title='BA in Cartoon Studies with Evil Plotting', short_title='Cartoon Studies / Evil Plotting', ) course_3.subject_areas.add(subject_area_1) course_3.subject_areas.add(subject_area_2) stuff = set_up_stuff() student_1 = stuff[1] student_1.course = course_1 student_1.year = 1 student_1.save() student_2 = stuff[2] student_2.course = course_2 student_2.year = 2 student_2.save() student_3 = stuff[3] student_3.course = course_3 student_3.year = 3 student_3.save() response = self.client.get('/admin_dashboard/') url = ( '<a href="/assign_tutors/' + subject_area_1.slug + '/1/">' ) self.assertContains(response, url) url = ( '<a href="/assign_tutors/' + subject_area_1.slug + '/2/">' ) self.assertNotContains(response, url) url = ( '<a href="/assign_tutors/' + subject_area_1.slug + '/3/">' ) self.assertContains(response, url) url = ( '<a href="/assign_tutors/' + subject_area_2.slug + '/1/">' ) self.assertNotContains(response, url) url = ( '<a href="/assign_tutors/' + subject_area_2.slug + '/2/">' ) self.assertContains(response, url) url = ( '<a href="/assign_tutors/' + subject_area_2.slug + '/3/">' ) self.assertContains(response, url) def test_admin_page_shows_own_subjects_and_years_for_normal_admin(self): subject_area_1 = SubjectArea.objects.create(name='Cartoon Studies') self.user.staff.subject_areas.add(subject_area_1) subject_area_2 = SubjectArea.objects.create(name='Evil Plotting') course_1 = Course.objects.create( title='BA in Cartoon Studies', short_title='Cartoon Studies', ) course_1.subject_areas.add(subject_area_1) course_2 = Course.objects.create( title='BA in Evil Plotting', short_title='Evil Plotting', ) course_2.subject_areas.add(subject_area_2) course_3 = Course.objects.create( title='BA in Cartoon Studies with Evil Plotting', short_title='Cartoon Studies / Evil Plotting', ) course_3.subject_areas.add(subject_area_1) course_3.subject_areas.add(subject_area_2) stuff = set_up_stuff() student_1 = stuff[1] student_1.course = course_1 student_1.year = 1 student_1.save() student_2 = stuff[2] student_2.course = course_2 student_2.year = 2 student_2.save() student_3 = stuff[3] student_3.course = course_3 student_3.year = 3 student_3.save() response = self.client.get('/admin_dashboard/') url = ( '<a href="/assign_tutors/' + subject_area_1.slug + '/1/">' ) self.assertContains(response, url) url = ( '<a href="/assign_tutors/' + subject_area_1.slug + '/2/">' ) self.assertNotContains(response, url) url = ( '<a href="/assign_tutors/' + subject_area_1.slug + '/3/">' ) self.assertContains(response, url) url = ( '<a href="/assign_tutors/' + subject_area_2.slug + '/1/">' ) self.assertNotContains(response, url) url = ( '<a href="/assign_tutors/' + subject_area_2.slug + '/2/">' ) self.assertNotContains(response, url) url = ( '<a href="/assign_tutors/' + subject_area_2.slug + '/3/">' ) self.assertNotContains(response, url) class StudentViewTest(TeacherUnitTest): """Tests for the student view function""" def test_student_view_renders_student_view_template(self): student = create_student() response = self.client.get(student.get_absolute_url()) self.assertTemplateUsed(response, 'student_view.html') self.assertContains(response, "bb23") self.assertContains(response, "Bunny") self.assertContains(response, "Bugs") class AddEditStudentTest(TeacherUnitTest): """Tests for the student form function""" def send_form(self): response = self.client.post( '/add_student/', data={ 'student_id': 'bb23', 'last_name': 'Bünny', 'first_name': 'Bugs Middle Names' } ) return response def test_add_edit_student_renders_right_template(self): response = self.client.get('/add_student/') self.assertTemplateUsed(response, 'student_form.html') def test_add_student_adds_student_to_database(self): self.send_form() student = Student.objects.first() self.assertEqual(student.student_id, 'bb23') self.assertEqual(student.last_name, 'Bünny') self.assertEqual(student.first_name, 'Bugs Middle Names') def test_edit_student_shows_correct_data(self): student = create_student() response = self.client.get(student.get_edit_url()) self.assertTemplateUsed(response, 'student_form.html') self.assertContains(response, 'Bunny') self.assertContains(response, 'Bugs') self.assertContains(response, 'bb23') class InviteStudentTest(AdminUnitTest): """Already added students can be invited""" def test_students_can_be_invited_users_get_created(self): subject_area = create_subject_area() course = Course.objects.create( title='BA in Cartoon Studies', short_title='BA CS', ) course.subject_areas.add(subject_area) course.save() student1 = create_student() student1.email = 'bb23@acme.edu' student1.save() student2 = Student.objects.create( student_id='bb4223', first_name='Buster Middle Names', last_name='Bunny', email='bb4223@acme.edu', year=2, course=course ) url = '/invite_students/' + subject_area.slug + '/' request = self.factory.post( url, data={ 'selected_student_id': [ student1.student_id, student2.student_id ] } ) request.user = self.user invite_students(request, subject_area.slug, testing=True) user1 = User.objects.get(username='bmnb1') user2 = User.objects.get(username='bmnb2') student1_out = Student.objects.get(student_id='bb23') student2_out = Student.objects.get(first_name='Buster Middle Names') self.assertEqual(student1_out.user, user1) self.assertEqual(student2_out.user, user2) def test_invitation_status_is_displayed_correctly(self): subject_area = create_subject_area() course = Course.objects.create( title='BA in Cartoon Studies', short_title='BA CS', ) course.subject_areas.add(subject_area) course.save() student1 = create_student() # No email address student2 = Student.objects.create( student_id='bb4223', first_name='Buster', last_name='Bunny', year=2, email='bb4423@acme.edu', course=course ) url = '/invite_students/' + subject_area.slug + '/' request = self.factory.post( url, data={'selected_student_id': [ student1.student_id, student2.student_id ] } ) request.user = self.user response = invite_students(request, subject_area.slug, testing=True) soup = BeautifulSoup(response.content) added = str(soup.select('#students_added')[0]) not_added = str(soup.select('#students_without_email')[0]) self.assertIn(student1.name(), not_added) self.assertIn(student2.name(), added) class StaffResetPasswordTest(AdminUnitTest): """Password can be reset by staff""" def test_staff_can_reset_password(self): request = self.factory.get( '/reset_password/', data={'email': self.user.email} ) request.user = self.user response = reset_password(request, testing=True) self.assertContains(response, self.user.first_name) class StudentResetPasswordTest(NotYetLoggedInUnitTest): def test_student_can_reset_password(self): user = User.objects.create_user( username='bb42', password='ilovecarrots') student = Student.objects.create( student_id='bb42', last_name='Bunny', first_name='Bugs', user=user, email='bb23@acme.edu' ) request = self.factory.get( '/reset_password/', data={'email': student.email} ) request.user = self.user response = reset_password(request, testing=True) self.assertContains(response, student.short_first_name()) class ModuleViewTest(TeacherUnitTest): """Tests for the module view""" def test_module_view_renders_module_view_template(self): module = Module.objects.create( title="Hunting Practice", code="hp23", year=1900 ) response = self.client.get(module.get_absolute_url()) self.assertTemplateUsed(response, 'module_view.html') def test_performances_in_a_module_are_shown(self): module = Module.objects.create( title="Hunting Practice", code="hp23", year=1900, eligible="1" ) student = Student.objects.create( last_name="Pig", first_name="Porky", student_id="pp2323", year=2 ) response = self.client.post( module.get_add_students_url(), data={'student_ids': [student.student_id]} ) out_response = self.client.get(module.get_absolute_url()) self.assertContains(out_response, "Pig, Porky") def test_only_active_students_appear_in_module_view(self): module = create_module() student1 = create_student() student2 = Student.objects.create( last_name="Pig", first_name="Porky", student_id="pp2323", active=False ) student1.modules.add(module) performance1 = Performance.objects.create( student=student1, module=module) performance2 = Performance.objects.create( student=student2, module=module) student2.modules.add(module) response = self.client.get(module.get_absolute_url()) self.assertContains(response, 'Bunny, Bugs') self.assertNotContains(response, 'Pig, Porky') def test_assessment_availability_is_shown_correctly(self): module = create_module() student = create_student() student.modules.add(module) performance = Performance.objects.create( student=student, module=module) assessment = Assessment.objects.create( title="Essay", value=100, available=False, marksheet_type="Something" ) module.assessments.add(assessment) response = self.client.get(module.get_absolute_url()) self.assertContains( response, '<span class="glyphicon glyphicon-eye-close">' ) self.assertContains( response, 'Show Essay to students' ) assessment.available = True assessment.save() response = self.client.get(module.get_absolute_url()) self.assertContains( response, '<span class="glyphicon glyphicon-eye-open">' ) self.assertContains( response, 'Hide Essay from students' ) def test_only_assessments_with_marksheet_show_availability(self): module = create_module() student = create_student() student.modules.add(module) performance = Performance.objects.create( student=student, module=module) assessment1 = Assessment.objects.create( title="Essay", value=50, available=False, marksheet_type="Something" ) assessment2 = Assessment.objects.create( title="Exam", value=50, available=False, ) module.assessments.add(assessment1) module.assessments.add(assessment2) response = self.client.get(module.get_absolute_url()) self.assertContains( response, 'Show Essay to students' ) self.assertNotContains( response, 'Show Exam to students' ) def test_resit_menu_shows_when_required(self): stuff = set_up_stuff() module = stuff[0] module.foundational = True module.save() student1 = stuff[1] student1.qld = True student1.save() student2 = stuff[2] student2.qld = True student2.save() performance1 = Performance.objects.get( module=module, student=student1 ) performance2 = Performance.objects.get( module=module, student=student2 ) assessment1 = Assessment.objects.create( module=module, title='Essay', value=50 ) assessment2 = Assessment.objects.create( module=module, title='Presentation', value=50 ) result1_1 = AssessmentResult.objects.create( assessment=assessment1, mark=42 ) result1_2 = AssessmentResult.objects.create( assessment=assessment2, mark=40 ) performance1.assessment_results.add(result1_1) performance1.assessment_results.add(result1_2) result2_1 = AssessmentResult.objects.create( assessment=assessment1, mark=60, ) result2_2 = AssessmentResult.objects.create( assessment=assessment2, mark=80 ) performance2.assessment_results.add(result2_1) performance2.assessment_results.add(result2_2) response = self.client.get(module.get_absolute_url()) resit_string = ( '<a class = "btn btn-default dropdown-toggle" data-toggle' + '="dropdown">Resits <span class="caret"></span></a>' ) self.assertNotContains( response, resit_string ) result1_1.mark = 0 result1_1.save() response = self.client.get(module.get_absolute_url()) self.assertContains( response, resit_string ) result1_1.mark = 50 result1_1.save() result2_1.mark = 39 result2_1.save() response = self.client.get(module.get_absolute_url()) self.assertContains( response, resit_string ) def test_two_resit_with_feedback_symbols_show(self): stuff = set_up_stuff() module = stuff[0] module.foundational = True module.save() student1 = stuff[1] student1.qld = True student1.save() performance1 = Performance.objects.get( module=module, student=student1 ) assessment1 = Assessment.objects.create( module=module, title='Essay', value=50, marksheet_type='ESSAY', resit_marksheet_type='ESSAY', ) assessment2 = Assessment.objects.create( module=module, title='Presentation', value=50, marksheet_type='PRESENTATION', resit_marksheet_type='PRESENTATION', ) result1_1 = AssessmentResult.objects.create( assessment=assessment1, mark=38, resit_mark=80 ) result1_2 = AssessmentResult.objects.create( assessment=assessment2, mark=36, resit_mark=80 ) performance1.assessment_results.add(result1_1) performance1.assessment_results.add(result1_2) resit_string_essay = ( '<a href="/individual_feedback/' + stuff[0].code + '/' + str(stuff[0].year) + '/' + assessment1.slug + '/' + stuff[1].student_id + '/resit/">' ) resit_string_presentation = ( '<a href="/individual_feedback/' + stuff[0].code + '/' + str(stuff[0].year) + '/' + assessment2.slug + '/' + stuff[1].student_id + '/resit/">' ) response = self.client.get(module.get_absolute_url()) self.assertContains( response, resit_string_essay ) self.assertContains( response, resit_string_presentation ) def test_two_resit_with_feedback_symbols_show_with_3_assessments(self): stuff = set_up_stuff() module = stuff[0] module.save() student1 = stuff[1] student1.save() performance1 = Performance.objects.get( module=module, student=student1 ) assessment1 = Assessment.objects.create( module=module, title='Essay', value=25, marksheet_type='ESSAY', resit_marksheet_type='ESSAY', ) assessment2 = Assessment.objects.create( module=module, title='Presentation', value=25, marksheet_type='PRESENTATION', resit_marksheet_type='PRESENTATION', ) assessment3 = Assessment.objects.create( module=module, title='Second Essay', value=50, marksheet_type='ESSAY', resit_marksheet_type='ESSAY', ) result1_1 = AssessmentResult.objects.create( assessment=assessment1, mark=30, resit_mark=80 ) result1_2 = AssessmentResult.objects.create( assessment=assessment2, mark=28, resit_mark=80 ) result1_3 = AssessmentResult.objects.create( assessment=assessment3, mark=40, ) performance1.assessment_results.add(result1_1) performance1.assessment_results.add(result1_2) performance1.assessment_results.add(result1_3) resit_string_essay = ( '<a href="/individual_feedback/' + stuff[0].code + '/' + str(stuff[0].year) + '/' + assessment1.slug + '/' + stuff[1].student_id + '/resit/">' ) resit_string_presentation = ( '<a href="/individual_feedback/' + stuff[0].code + '/' + str(stuff[0].year) + '/' + assessment2.slug + '/' + stuff[1].student_id + '/resit/">' ) resit_string_second_essay = ( '<a href="/individual_feedback/' + stuff[0].code + '/' + str(stuff[0].year) + '/' + assessment3.slug + '/' + stuff[1].student_id + '/resit/">' ) response = self.client.get(module.get_absolute_url()) self.assertContains( response, resit_string_essay ) self.assertContains( response, resit_string_presentation ) self.assertNotContains( response, resit_string_second_essay ) class AddStudentsToModuleTest(TeacherUnitTest): """Tests for the function to add students to a module""" def test_add_students_to_module_uses_right_template(self): module = create_module() response = self.client.get(module.get_add_students_url()) self.assertTemplateUsed(response, 'add_students_to_module.html') def test_only_students_from_same_subject_areas_and_year_are_shown(self): subject_area1 = create_subject_area() subject_area2 = SubjectArea.objects.create(name="Evil Plotting") course = Course.objects.create(title="BA in Cartoon Studies") course.subject_areas.add(subject_area1) course.save() course2 = Course.objects.create( title="BA in Evil Plotting") course2.subject_areas.add(subject_area2) course2.save() module = create_module() module.subject_areas.add(subject_area1) module.save() student1 = create_student() student1.course = course student1.year = 1 student1.save() student2 = Student.objects.create( last_name="Duck", first_name="Daffy", student_id="dd42", course=course2, year=1 ) student3 = Student.objects.create( last_name="Pig", first_name="Porky", student_id="pp2323", course=course, year=2 ) student4 = Student.objects.create( last_name="Runner", first_name="Road", student_id="rr42", course=course, year=1, active=False ) response = self.client.get(module.get_add_students_url()) self.assertContains(response, 'Bunny') self.assertNotContains(response, 'Duck') self.assertNotContains(response, 'Pig') self.assertNotContains(response, 'Runner') def test_submitting_an_empty_form_does_not_break_it(self): module = create_module() response = self.client.post( '/add_students_to_module/%s/%s' % (module.code, module.year), data={} ) self.assertTrue(response.status_code in [301, 302]) class RemoveStudentFromModuleTest(TeacherUnitTest): """Tests for the function to remove a student from a module""" def test_student_removed_from_module_is_not_in_module_anymore(self): module = create_module() student = create_student() student.modules.add(module) Performance.objects.create(module=module, student=student) url = ( '/remove_student_from_module/' + module.code + '/' + str(module.year) + '/' + student.student_id + '/' ) request = self.factory.get(url) request.user = self.user response = remove_student_from_module( request, module.code, module.year, student.student_id) self.assertEqual(Performance.objects.count(), 0) self.assertEqual(student.modules.count(), 0) def test_assessment_results_are_deleted(self): module = create_module() student = create_student() student.modules.add(module) performance = Performance.objects.create( module=module, student=student) assessment = Assessment.objects.create( module=module, title='Essay' ) result = AssessmentResult.objects.create(assessment=assessment) self.assertEqual(AssessmentResult.objects.count(), 1) performance.assessment_results.add(result) url = ( '/remove_student_from_module/' + module.code + '/' + str(module.year) + '/' + student.student_id + '/' ) response = self.client.get(url) self.assertEqual(AssessmentResult.objects.count(), 0) def test_feedback_gets_deleted(self): module = create_module() student = create_student() student.modules.add(module) performance = Performance.objects.create( module=module, student=student) assessment = Assessment.objects.create( module=module, title='Essay' ) result = AssessmentResult.objects.create(assessment=assessment) performance.assessment_results.add(result) feedback = IndividualFeedback.objects.create( assessment_result=result, attempt='first' ) url = ( '/remove_student_from_module/' + module.code + '/' + str(module.year) + '/' + student.student_id + '/' ) response = self.client.get(url) self.assertEqual(IndividualFeedback.objects.count(), 0) class DeleteModuleTest(TeacherUnitTest): """Tests that the Delete Module Function removes performances and marks""" def test_deleting_module_deletes_everything_else(self): module = create_module() module.teachers.add(self.user.staff) student = create_student() student.modules.add(module) performance = Performance.objects.create( module=module, student=student) assessment = Assessment.objects.create( module=module, title="Dissertation", value=100, ) result = AssessmentResult.objects.create( assessment=assessment, mark=60 ) performance.assessment_results.add(result) response = self.client.get(module.get_delete_self_url()) self.assertEqual(Module.objects.count(), 0) self.assertEqual(Student.objects.count(), 1) self.assertEqual(Performance.objects.count(), 0) self.assertEqual(Assessment.objects.count(), 0) self.assertEqual(AssessmentResult.objects.count(), 0) def test_only_instructor_or_admin_can_delete_a_module(self): module = create_module() student = create_student() student.modules.add(module) performance = Performance.objects.create( module=module, student=student) assessment = Assessment.objects.create( module=module, title="Dissertation", value=100, ) result = AssessmentResult.objects.create( assessment=assessment, mark=60 ) performance.assessment_results.add(result) response = self.client.get(module.get_delete_self_url()) self.assertEqual(Module.objects.count(), 1) self.assertEqual(Student.objects.count(), 1) self.assertEqual(Performance.objects.count(), 1) self.assertEqual(Assessment.objects.count(), 1) self.assertEqual(AssessmentResult.objects.count(), 1) class SeminarGroupTest(TeacherUnitTest): """Tests involving the seminar group setup""" def test_seminar_groups_can_be_saved(self): stuff = set_up_stuff() module = stuff[0] student1 = stuff[1] student2 = stuff[2] student3 = stuff[3] request = self.factory.post( module.get_seminar_groups_url(), data={ 'action': 'Save students', student1.student_id: '1', student2.student_id: '2', student3.student_id: '1' } ) request.user = self.user response = assign_seminar_groups(request, module.code, module.year) performance1 = Performance.objects.get(student=student1, module=module) performance2 = Performance.objects.get(student=student2, module=module) performance3 = Performance.objects.get(student=student3, module=module) self.assertEqual(performance1.seminar_group, 1) self.assertEqual(performance2.seminar_group, 2) self.assertEqual(performance3.seminar_group, 1) def test_seminar_groups_can_be_randomized_ignoring_previous_values(self): stuff = set_up_stuff() module = stuff[0] student1 = stuff[1] student2 = stuff[2] student3 = stuff[3] student4 = stuff[4] student5 = stuff[5] request = self.factory.post( module.get_seminar_groups_url(), data={ 'action': 'Go', 'ignore': True, 'number_of_groups': '3' } ) request.user = self.user response = assign_seminar_groups(request, module.code, module.year) performance1 = Performance.objects.get(student=student1, module=module) performance2 = Performance.objects.get(student=student2, module=module) performance3 = Performance.objects.get(student=student3, module=module) performance4 = Performance.objects.get(student=student4, module=module) performance5 = Performance.objects.get(student=student5, module=module) self.assertNotEqual(performance1.seminar_group, None) self.assertNotEqual(performance2.seminar_group, None) self.assertNotEqual(performance3.seminar_group, None) self.assertNotEqual(performance4.seminar_group, None) self.assertNotEqual(performance5.seminar_group, None) list_of_seminar_groups = [] list_of_seminar_groups.append(performance1.seminar_group) list_of_seminar_groups.append(performance2.seminar_group) list_of_seminar_groups.append(performance3.seminar_group) list_of_seminar_groups.append(performance4.seminar_group) list_of_seminar_groups.append(performance5.seminar_group) self.assertTrue(1 in list_of_seminar_groups) self.assertTrue(2 in list_of_seminar_groups) self.assertTrue(3 in list_of_seminar_groups) def test_seminar_groups_can_be_randomized_leaving_previous_values(self): stuff = set_up_stuff() module = stuff[0] student1 = stuff[1] performance1 = Performance.objects.get(student=student1, module=module) performance1.seminar_group = 1 performance1.save() student2 = stuff[2] student3 = stuff[3] student4 = stuff[4] student5 = stuff[5] request = self.factory.post( module.get_seminar_groups_url(), data={ student2.student_id: '2', 'action': 'Go', 'number_of_groups': '3' } ) request.user = self.user response = assign_seminar_groups(request, module.code, module.year) performance1 = Performance.objects.get(student=student1, module=module) performance2 = Performance.objects.get(student=student2, module=module) performance3 = Performance.objects.get(student=student3, module=module) performance4 = Performance.objects.get(student=student4, module=module) performance5 = Performance.objects.get(student=student5, module=module) self.assertEqual(performance1.seminar_group, 1) self.assertEqual(performance2.seminar_group, 2) self.assertNotEqual(performance3.seminar_group, None) self.assertNotEqual(performance4.seminar_group, None) self.assertNotEqual(performance5.seminar_group, None) def test_seminar_group_overview_uses_correct_template(self): module = create_module() response = self.client.get(module.get_seminar_group_overview_url()) self.assertTemplateUsed(response, 'seminar_group_overview.html') def test_seminar_group_overview_is_correct(self): stuff = set_up_stuff() module = stuff[0] student1 = stuff[1] student2 = stuff[2] student3 = stuff[3] student4 = stuff[4] student5 = stuff[5] performance1 = Performance.objects.get(student=student1, module=module) performance1.seminar_group = 1 performance1.save() performance2 = Performance.objects.get(student=student2, module=module) performance2.seminar_group = 2 performance2.save() performance3 = Performance.objects.get(student=student3, module=module) performance3.seminar_group = 1 performance3.save() performance4 = Performance.objects.get(student=student4, module=module) performance4.seminar_group = 2 performance4.save() performance5 = Performance.objects.get(student=student5, module=module) performance5.seminar_group = 1 performance5.save() request = self.factory.get(module.get_seminar_group_overview_url()) request.user = self.user response = seminar_group_overview(request, module.code, module.year) soup = BeautifulSoup(response.content) group_1 = str(soup.select('#group_1')[0]) group_2 = str(soup.select('#group_2')[0]) self.assertIn(student1.short_name(), group_1) self.assertIn(student2.short_name(), group_2) self.assertIn(student3.short_name(), group_1) self.assertIn(student4.short_name(), group_2) self.assertIn(student5.short_name(), group_1) class AssessmentTest(TeacherUnitTest): """Tests involving setting and deleting of assessments""" def test_assessments_page_uses_right_template(self): module = set_up_stuff()[0] response = self.client.get(module.get_assessment_url()) self.assertTemplateUsed(response, 'assessment.html') def test_assessments_can_be_added_to_module(self): module = set_up_stuff()[0] request = self.factory.post( module.get_assessment_url(), data={ 'title': 'Hunting Exercise', 'value': 40, } ) request.user = self.user assessment(request, module.code, module.year) assessment_out = Assessment.objects.first() self.assertEqual(assessment_out.title, 'Hunting Exercise') self.assertEqual(assessment_out.value, 40) def test_assessment_can_be_deleted(self): stuff = set_up_stuff() module = stuff[0] performance = Performance.objects.first() assessment = Assessment.objects.create( module=module, title="Hunting Exercise", value=40 ) result = AssessmentResult.objects.create( assessment=assessment, mark=40 ) performance.assessment_results.add(result) self.assertEqual(Assessment.objects.count(), 1) self.assertEqual(AssessmentResult.objects.count(), 1) request = self.factory.get(assessment.get_delete_url()) request.user = self.user delete_assessment(request, module.code, module.year, assessment.slug) self.assertEqual(Assessment.objects.count(), 0) self.assertEqual(AssessmentResult.objects.count(), 0) def test_toggle_assessment_availability_works(self): module = create_module() assessment = Assessment.objects.create( module=module, title='Hunting Exercise', value=100 ) self.assertFalse(assessment.available) request = self.factory.get(assessment.get_toggle_availability_url()) request.user = self.user response = toggle_assessment_availability( request, module.code, module.year, assessment.slug, 'first') assessment_out = Assessment.objects.first() self.assertTrue(assessment_out.available) request = self.factory.get(assessment.get_toggle_availability_url()) request.user = self.user response = toggle_assessment_availability( request, module.code, module.year, assessment.slug, 'first') assessment_out = Assessment.objects.first() self.assertFalse(assessment_out.available) request = self.factory.get( assessment.get_toggle_availability_url('resit')) request.user = self.user response = toggle_assessment_availability( request, module.code, module.year, assessment.slug, 'resit') assessment_out = Assessment.objects.first() self.assertTrue(assessment_out.resit_available) request = self.factory.get( assessment.get_toggle_availability_url('resit')) request.user = self.user response = toggle_assessment_availability( request, module.code, module.year, assessment.slug, 'resit') assessment_out = Assessment.objects.first() self.assertFalse(assessment_out.resit_available) request = self.factory.get( assessment.get_toggle_availability_url('second_resit')) request.user = self.user response = toggle_assessment_availability( request, module.code, module.year, assessment.slug, 'second_resit') assessment_out = Assessment.objects.first() self.assertTrue(assessment_out.second_resit_available) request = self.factory.get( assessment.get_toggle_availability_url('second_resit')) request.user = self.user response = toggle_assessment_availability( request, module.code, module.year, assessment.slug, 'second_resit') assessment_out = Assessment.objects.first() self.assertFalse(assessment_out.second_resit_available) class AttendanceTest(TeacherUnitTest): """Tests around the attendance function""" def test_attendance_uses_correct_template(self): module = set_up_stuff()[0] response = self.client.get(module.get_attendance_url('all')) self.assertTemplateUsed(response, 'attendance.html') def test_attendance_form_shows_seminar_group(self): stuff = set_up_stuff() module = stuff[0] student1 = stuff[1] student2 = stuff[2] student3 = stuff[3] student4 = stuff[4] student5 = stuff[5] performance1 = Performance.objects.get(student=student1, module=module) performance2 = Performance.objects.get(student=student2, module=module) performance3 = Performance.objects.get(student=student3, module=module) performance4 = Performance.objects.get(student=student4, module=module) performance5 = Performance.objects.get(student=student5, module=module) performance1.seminar_group = 1 performance1.save() performance2.seminar_group = 1 performance2.save() performance3.seminar_group = 1 performance3.save() performance4.seminar_group = 2 performance4.save() performance5.seminar_group = 2 performance5.save() request = self.factory.get(module.get_attendance_url(1)) request.user = self.user response = attendance(request, module.code, module.year, '1') self.assertContains(response, student1.last_name) self.assertContains(response, student2.last_name) self.assertContains(response, student3.last_name) self.assertNotContains(response, student4.last_name) self.assertNotContains(response, student5.last_name) request = self.factory.get(module.get_attendance_url(2)) request.user = self.user response = attendance(request, module.code, module.year, '2') self.assertNotContains(response, student1.last_name) self.assertNotContains(response, student2.last_name) self.assertNotContains(response, student3.last_name) self.assertContains(response, student4.last_name) self.assertContains(response, student5.last_name) request = self.factory.get(module.get_attendance_url('all')) request.user = self.user response = attendance(request, module.code, module.year, 'all') self.assertContains(response, student1.last_name) self.assertContains(response, student2.last_name) self.assertContains(response, student3.last_name) self.assertContains(response, student4.last_name) self.assertContains(response, student5.last_name) def test_attendance_form_shows_only_active_students(self): stuff = set_up_stuff() module = stuff[0] student1 = stuff[1] student2 = stuff[2] student3 = stuff[3] student4 = stuff[4] student5 = stuff[5] student5.active = False student5.save() performance1 = Performance.objects.get(student=student1, module=module) performance2 = Performance.objects.get(student=student2, module=module) performance3 = Performance.objects.get(student=student3, module=module) performance4 = Performance.objects.get(student=student4, module=module) performance5 = Performance.objects.get(student=student5, module=module) performance1.seminar_group = 1 performance1.save() performance2.seminar_group = 1 performance2.save() performance3.seminar_group = 1 performance3.save() performance4.seminar_group = 1 performance4.save() performance5.seminar_group = 1 performance5.save() request = self.factory.get(module.get_attendance_url(1)) request.user = self.user response = attendance(request, module.code, module.year, '1') self.assertContains(response, student1.last_name) self.assertContains(response, student2.last_name) self.assertContains(response, student3.last_name) self.assertContains(response, student4.last_name) self.assertNotContains(response, student5.last_name) def test_attendance_can_be_added_through_form(self): stuff = set_up_stuff() module = stuff[0] request = self.factory.post( module.get_attendance_url('all'), data={ 'bb23_1': 'p', 'bb23_2': 'a', 'bb23_3': 'e', 'dd42_1': 'p', 'dd42_3': 'a', 'save': 'Save Changes for all weeks' } ) request.user = self.user response = attendance(request, module.code, module.year, 'all') student1_out = Student.objects.get(student_id='bb23') performance1_out = Performance.objects.get( student=student1_out, module=module) student2_out = Student.objects.get(student_id='dd42') performance2_out = Performance.objects.get( student=student2_out, module=module) self.assertEqual(performance1_out.attendance_for(1), 'p') self.assertEqual(performance1_out.attendance_for(2), 'a') self.assertEqual(performance1_out.attendance_for(3), 'e') self.assertEqual(performance2_out.attendance_for(1), 'p') self.assertEqual(performance2_out.attendance_for(2), None) self.assertEqual(performance2_out.attendance_for(3), 'a') def test_attendance_changes_are_ignored_for_hidden_weeks(self): stuff = set_up_stuff() module = stuff[0] student1 = stuff[1] performance1 = Performance.objects.get(student=student1, module=module) performance1.save_attendance('1', 'e') request = self.factory.post( module.get_attendance_url('all'), data={ 'bb23_1': 'p', 'bb23_2': 'a', 'bb23_3': 'e', 'dd42_1': 'p', 'dd42_3': 'a', 'save': 'Save Changes for Week 2' } ) request.user = self.user attendance(request, module.code, module.year, 'all') student1_out = Student.objects.get(student_id='bb23') performance1_out = Performance.objects.get( student=student1_out, module=module) student2_out = Student.objects.get(student_id='dd42') performance2_out = Performance.objects.get( student=student2_out, module=module) self.assertEqual(performance1_out.attendance_for(1), 'e') self.assertEqual(performance1_out.attendance_for(2), 'a') self.assertEqual(performance1_out.attendance_for(3), None) self.assertEqual(performance2_out.attendance_for(1), None) self.assertEqual(performance2_out.attendance_for(2), None) self.assertEqual(performance2_out.attendance_for(3), None) class MarkAllAssessmentsTest(TeacherUnitTest): """Testing the function to mark all for one assessment openly.""" def test_mark_all_template_is_used(self): stuff = set_up_stuff() module = stuff[0] student = stuff[1] assessment = Assessment.objects.create( module=module, title="Essay", value=100) response = self.client.get(assessment.get_mark_all_url()) self.assertTemplateUsed(response, 'mark_all.html') def test_all_students_are_shown_in_mark_all_page(self): stuff = set_up_stuff() module = stuff[0] student1 = stuff[1] student2 = stuff[2] student3 = stuff[3] other_student = Student.objects.create( first_name="Road", last_name="Runner", student_id="rr42" ) assessment = Assessment.objects.create( module=module, title="Essay", value=100) request = self.factory.get(assessment.get_mark_all_url()) request.user = self.user response = mark_all( request, module.code, module.year, 'essay', 'first' ) self.assertContains(response, student1.name()) self.assertContains(response, student2.name()) self.assertContains(response, student3.name()) self.assertNotContains(response, other_student.name()) def test_only_students_who_need_resit_show_in_mark_all_resit_page(self): stuff = set_up_stuff() module = stuff[0] student1 = stuff[1] student2 = stuff[2] student3 = stuff[3] student4 = stuff[4] assessment1 = Assessment.objects.create( module=module, title="Essay", value=50) assessment2 = Assessment.objects.create( module=module, title="Exam", value=50) performance1 = Performance.objects.get( module=module, student=student1 ) performance2 = Performance.objects.get( module=module, student=student2 ) performance3 = Performance.objects.get( module=module, student=student3 ) performance4 = Performance.objects.get( module=module, student=student4 ) result_1_1 = AssessmentResult.objects.create( assessment=assessment1, mark=60 ) performance1.assessment_results.add(result_1_1) result_1_2 = AssessmentResult.objects.create( assessment=assessment2, mark=60 ) performance1.assessment_results.add(result_1_2) # Student 1 clearly passed and should not be in either result_2_1 = AssessmentResult.objects.create( assessment=assessment1, mark=30 ) performance2.assessment_results.add(result_2_1) result_2_2 = AssessmentResult.objects.create( assessment=assessment2, mark=30 ) performance2.assessment_results.add(result_2_2) # Student 2 clearly failed and should be in both result_3_1 = AssessmentResult.objects.create( assessment=assessment1, mark=35 ) performance3.assessment_results.add(result_3_1) result_3_2 = AssessmentResult.objects.create( assessment=assessment2, mark=40 ) performance3.assessment_results.add(result_3_2) # Student 3 failed (not so clearly) and should be in 1 only request = self.factory.get( assessment1.get_mark_all_url(attempt='resit') ) result_4_1 = AssessmentResult.objects.create( assessment=assessment1, mark=60, concessions='G' ) performance4.assessment_results.add(result_4_1) result_4_2 = AssessmentResult.objects.create( assessment=assessment2, mark=60 ) performance4.assessment_results.add(result_4_2) # Student 4 has concessions for the passed essay and should be in 1 request.user = self.user response1 = mark_all( request, module.code, module.year, 'essay', 'resit' ) self.assertNotContains(response1, student1.name()) self.assertContains(response1, student2.name()) self.assertContains(response1, student3.name()) self.assertContains(response1, student4.name()) request = self.factory.get( assessment2.get_mark_all_url(attempt='resit') ) request.user = self.user response2 = mark_all( request, module.code, module.year, 'exam', 'resit' ) self.assertNotContains(response2, student1.name()) self.assertContains(response2, student2.name()) self.assertNotContains(response2, student3.name()) self.assertNotContains(response2, student4.name()) def test_existing_results_show_up_in_mark_all_page(self): stuff = set_up_stuff() module = stuff[0] student1 = stuff[1] assessment1 = Assessment.objects.create( module=module, title="Essay 1", value=50) assessment2 = Assessment.objects.create( module=module, title="Essay 2", value=50) performance1 = Performance.objects.get( module=module, student=student1) ar1_1 = AssessmentResult.objects.create( assessment=assessment1, mark=50 ) ar1_2 = AssessmentResult.objects.create( assessment=assessment2, mark=60 ) performance1.assessment_results.add(ar1_1) performance1.assessment_results.add(ar1_2) request = self.factory.get(assessment1.get_mark_all_url()) request.user = self.user response = mark_all( request, module.code, module.year, 'essay-1', 'first' ) self.assertContains(response, 60) html = ( '<input class="form-control assessment_mark" type="number" ' + 'min="0" max="100" id="essay-1_' + student1.student_id + '" name="mark_' + student1.student_id + '" type="number" value="50" /><small>Previously: 50</small>' ) self.assertContains(response, html) def test_marks_can_be_saved_with_existing_ar_objects(self): stuff = set_up_stuff() module = stuff[0] student1 = stuff[1] student2 = stuff[2] assessment1 = Assessment.objects.create( module=module, title="Essay 1", value=50) assessment2 = Assessment.objects.create( module=module, title="Essay 2", value=50) result1 = AssessmentResult.objects.create( assessment=assessment1, mark=50 ) performance1 = Performance.objects.get(module=module, student=student1) performance1.assessment_results.add(result1) result2 = AssessmentResult.objects.create(assessment=assessment1) performance2 = Performance.objects.get(module=module, student=student2) performance2.assessment_results.add(result2) id1 = 'mark_' + student1.student_id id2 = 'mark_' + student2.student_id request = self.factory.post( assessment1.get_mark_all_url(), data={ id1: '20', id2: '40' } ) request.user = self.user response = mark_all( request, module.code, module.year, 'essay-1', 'first' ) performance1_out = Performance.objects.get( module=module, student=student1) self.assertEqual( performance1_out.get_assessment_result('essay-1', 'first'), 20 ) performance2_out = Performance.objects.get( module=module, student=student2) self.assertEqual( performance2_out.get_assessment_result('essay-1', 'first'), 40 ) def test_marks_can_be_saved_without_existing_ar_objects(self): stuff = set_up_stuff() module = stuff[0] student1 = stuff[1] student2 = stuff[2] assessment1 = Assessment.objects.create( module=module, title="Essay 1", value=50) assessment2 = Assessment.objects.create( module=module, title="Essay 2", value=50) id1 = 'mark_' + student1.student_id id2 = 'mark_' + student2.student_id request = self.factory.post( assessment1.get_mark_all_url(), data={ id1: '20', id2: '40' } ) request.user = self.user response = mark_all( request, module.code, module.year, 'essay-1', 'first' ) performance1_out = Performance.objects.get( module=module, student=student1) self.assertEqual( performance1_out.get_assessment_result('essay-1', 'first'), 20 ) performance2_out = Performance.objects.get( module=module, student=student2) self.assertEqual( performance2_out.get_assessment_result('essay-1', 'first'), 40 ) class MarkAllAssessmentsAnonymouslyTest(TeacherUnitTest): """Testing the function to mark all for one assessment anonymously.""" def test_only_exam_ids_are_shown_if_anonymous_is_set(self): stuff = set_up_stuff() module = stuff[0] student1 = stuff[1] student1.exam_id = '1234' student1.save() student2 = stuff[2] student2.exam_id = '2345' student2.save() student3 = stuff[3] student3.exam_id = '3456' student3.save() assessment = Assessment.objects.create( module=module, title="Essay", value=100) request = self.factory.get(assessment.get_mark_all_url(anonymous=True)) request.user = self.user response = mark_all_anonymously( request, module.code, module.year, 'essay', 'first', ) self.assertContains(response, student1.exam_id) self.assertContains(response, student2.exam_id) self.assertContains(response, student3.exam_id) self.assertNotContains(response, student1.first_name) self.assertNotContains(response, student1.last_name) self.assertNotContains(response, student1.student_id) self.assertNotContains(response, student2.first_name) self.assertNotContains(response, student2.last_name) self.assertNotContains(response, student2.student_id) self.assertNotContains(response, student3.first_name) self.assertNotContains(response, student3.last_name) self.assertNotContains(response, student3.student_id) def test_anonymous_marks_can_be_saved_with_existing_ar_objects(self): stuff = set_up_stuff() module = stuff[0] student1 = stuff[1] student1.exam_id = '1234' student1.save() student2 = stuff[2] student2.exam_id = '2345' student2.save() assessment1 = Assessment.objects.create( module=module, title="Essay 1", value=50) assessment2 = Assessment.objects.create( module=module, title="Essay 2", value=50) result1 = AssessmentResult.objects.create( assessment=assessment1, mark=50 ) performance1 = Performance.objects.get(module=module, student=student1) performance1.assessment_results.add(result1) result2 = AssessmentResult.objects.create(assessment=assessment1) performance2 = Performance.objects.get(module=module, student=student2) performance2.assessment_results.add(result2) id1 = 'mark_' + student1.exam_id id2 = 'mark_' + student2.exam_id request = self.factory.post( assessment1.get_mark_all_url(anonymous=True), data={ id1: '20', id2: '40' } ) request.user = self.user response = mark_all_anonymously( request, module.code, module.year, 'essay-1', 'first' ) performance1_out = Performance.objects.get( module=module, student=student1) self.assertEqual( performance1_out.get_assessment_result('essay-1', 'first'), 20 ) performance2_out = Performance.objects.get( module=module, student=student2) self.assertEqual( performance2_out.get_assessment_result('essay-1', 'first'), 40 ) def test_anonymous_marks_can_be_saved_without_existing_ar_objects(self): stuff = set_up_stuff() module = stuff[0] student1 = stuff[1] student1.exam_id = '1234' student1.save() student2 = stuff[2] student2.exam_id = '2345' student2.save() assessment1 = Assessment.objects.create( module=module, title="Essay 1", value=50) assessment2 = Assessment.objects.create( module=module, title="Essay 2", value=50) id1 = 'mark_' + student1.exam_id id2 = 'mark_' + student2.exam_id request = self.factory.post( assessment1.get_mark_all_url(anonymous=True), data={ id1: '20', id2: '40' } ) request.user = self.user response = mark_all_anonymously( request, module.code, module.year, 'essay-1', 'first' ) performance1_out = Performance.objects.get( module=module, student=student1) self.assertEqual( performance1_out.get_assessment_result('essay-1', 'first'), 20 ) performance2_out = Performance.objects.get( module=module, student=student2) self.assertEqual( performance2_out.get_assessment_result('essay-1', 'first'), 40 ) def test_only_students_who_need_resit_show_in_mark_all_resit_a_page(self): stuff = set_up_stuff() module = stuff[0] student1 = stuff[1] student1.exam_id = '1234' student1.save() student2 = stuff[2] student2.exam_id = '2345' student2.save() student3 = stuff[3] student3.exam_id = '3456' student3.save() student4 = stuff[4] student4.exam_id = '4567' student4.save() assessment1 = Assessment.objects.create( module=module, title="Essay", value=50) assessment2 = Assessment.objects.create( module=module, title="Exam", value=50) performance1 = Performance.objects.get( module=module, student=student1 ) performance2 = Performance.objects.get( module=module, student=student2 ) performance3 = Performance.objects.get( module=module, student=student3 ) performance4 = Performance.objects.get( module=module, student=student4 ) result_1_1 = AssessmentResult.objects.create( assessment=assessment1, mark=60 ) performance1.assessment_results.add(result_1_1) result_1_2 = AssessmentResult.objects.create( assessment=assessment2, mark=60 ) performance1.assessment_results.add(result_1_2) # Student 1 clearly passed and should not be in either result_2_1 = AssessmentResult.objects.create( assessment=assessment1, mark=30 ) performance2.assessment_results.add(result_2_1) result_2_2 = AssessmentResult.objects.create( assessment=assessment2, mark=30 ) performance2.assessment_results.add(result_2_2) # Student 2 clearly failed and should be in both result_3_1 = AssessmentResult.objects.create( assessment=assessment1, mark=35 ) performance3.assessment_results.add(result_3_1) result_3_2 = AssessmentResult.objects.create( assessment=assessment2, mark=40 ) performance3.assessment_results.add(result_3_2) # Student 3 failed (not so clearly) and should be in 1 only request = self.factory.get( assessment1.get_mark_all_url(attempt='resit') ) result_4_1 = AssessmentResult.objects.create( assessment=assessment1, mark=60, concessions='G' ) performance4.assessment_results.add(result_4_1) result_4_2 = AssessmentResult.objects.create( assessment=assessment2, mark=60 ) performance4.assessment_results.add(result_4_2) # Student 4 has concessions for the passed essay and should be in 1 request.user = self.user request = self.factory.get( assessment1.get_mark_all_url( anonymous=True, attempt='resit' ) ) request.user = self.user response1 = mark_all_anonymously( request, module.code, module.year, 'essay', 'resit', ) self.assertNotContains(response1, student1.exam_id) self.assertContains(response1, student2.exam_id) self.assertContains(response1, student3.exam_id) self.assertContains(response1, student4.exam_id) request = self.factory.get( assessment2.get_mark_all_url( anonymous=True, attempt='resit' ) ) request.user = self.user response2 = mark_all_anonymously( request, module.code, module.year, 'exam', 'resit' ) self.assertNotContains(response2, student1.exam_id) self.assertContains(response2, student2.exam_id) self.assertNotContains(response2, student3.exam_id) self.assertNotContains(response2, student4.exam_id) class AddEditStaffTest(AdminUnitTest): """Tests for adding and adding a new staff member""" def test_staff_can_be_added_new_user_gets_created(self): subject_area = SubjectArea.objects.create(name='Cartoon Studies') request = self.factory.post('/add_staff/', data={ 'first_name': 'Elmar', 'last_name': 'Fudd', 'email': 'elmar.fudd@acme.edu', 'role': 'teacher' }) request.user = self.user add_or_edit_staff(request, testing=True) user = User.objects.get(last_name='Fudd') staff = Staff.objects.get(user=user) self.assertEqual(user.staff, staff) self.assertEqual(user.first_name, 'Elmar') self.assertEqual(user.email, 'elmar.fudd@acme.edu') self.assertEqual(staff.role, 'teacher') def test_form_for_existing_staff_shows_right_details(self): user_in = create_user() subject_area = SubjectArea.objects.create(name='Cartoon Studies') staff_in = Staff.objects.create(user=user_in, role='teacher') staff_in.subject_areas.add(subject_area) staff_in.save() request = self.factory.get(staff_in.get_edit_url()) request.user = self.user response = add_or_edit_staff(request, user_in.username) soup = BeautifulSoup(response.content) first_name = str(soup.select('#id_first_name')[0]['value']) self.assertEqual(first_name, 'Elmar') last_name = str(soup.select('#id_last_name')[0]['value']) self.assertEqual(last_name, 'Fudd') last_name = str(soup.select('#id_email')[0]['value']) self.assertEqual(last_name, 'e.fudd@acme.edu') teacher_option = str(soup.find(value='teacher')) self.assertTrue('selected="selected"' in teacher_option) def test_staff_member_can_be_edited(self): user_in = User.objects.create_user( 'ef10', 'e.fudd@acme.edu', 'rabbitseason') user_in.last_name = 'Fadd' user_in.first_name = 'Elmar' user_in.save() subject_area = SubjectArea.objects.create(name='Cartoon Studies') staff_in = Staff.objects.create(user=user_in, role='teacher') staff_in.subject_areas.add(subject_area) staff_in.save() request = self.factory.post(staff_in.get_edit_url(), data={ 'first_name': 'Elmar', 'last_name': 'Fudd', 'email': 'elmar.fudd@acme.edu', 'role': 'admin' }) request.user = self.user add_or_edit_staff(request, user_in.username, testing=True) staff_out = Staff.objects.get(user=user_in) self.assertEqual(staff_out.user.last_name, 'Fudd') self.assertEqual(staff_out.role, 'admin') class ViewStaffTest(AdminUnitTest): """Tests for Viewing Staff Members""" def test_staff_view_by_subject_uses_correct_template(self): response = self.client.get('/view_staff_by_subject/') self.assertTemplateUsed(response, 'all_staff_by_subject.html') def test_staff_view_by_subject_contains_staff(self): subject_area_1 = create_subject_area() subject_area_2 = SubjectArea.objects.create(name='Evil Plotting') staff1 = create_teacher() staff1.subject_areas.add(subject_area_1) staff1.save() user2 = User.objects.create_user( 'ys142', 'y.sam@acme.edu', 'squaredance') user2.last_name = 'Sam' user2.first_name = 'Yosemite' user2.save() staff2 = Staff.objects.create(user=user2, role='Teacher') staff2.subject_areas.add(subject_area_1) staff2.subject_areas.add(subject_area_2) staff2.save() user3 = User.objects.create_user( 'ta123', 't.avery@acme.edu', 'othergod') user3.first_name = 'Tex' user3.last_name = 'Avery' user3.save() staff3 = Staff.objects.create(user=user3, role='Admin') staff3.subject_areas.add(subject_area_1) staff3.save() request = self.factory.get('/view_staff_by_subject/') request.user = self.user response = view_staff_by_subject(request) soup = BeautifulSoup(response.content) table1 = str(soup.find(id=subject_area_1.slug)) self.assertTrue(staff1.name() in table1) self.assertTrue(staff2.name() in table1) self.assertTrue(staff3.name() in table1) table2 = str(soup.find(id=subject_area_2.slug)) self.assertFalse(staff1.name() in table2) self.assertTrue(staff2.name() in table2) self.assertFalse(staff3.name() in table2) def test_staff_view_by_name_contains_staff(self): subject_area_1 = create_subject_area() subject_area_2 = SubjectArea.objects.create(name='Evil Plotting') staff1 = create_teacher() staff1.subject_areas.add(subject_area_1) staff1.save() user2 = User.objects.create_user( 'ys142', 'y.sam@acme.edu', 'squaredance') user2.last_name = 'Sam' user2.first_name = 'Yosemite' user2.save() staff2 = Staff.objects.create(user=user2, role='Teacher') staff2.subject_areas.add(subject_area_1) staff2.subject_areas.add(subject_area_2) staff2.save() user3 = User.objects.create_user( 'ta142', 't.avery@acme.edu', 'othergod') user3.first_name = 'Tex' user3.last_name = 'Avery' user3.save() staff3 = Staff.objects.create(user=user3, role='Admin') staff3.subject_areas.add(subject_area_1) staff3.save() request = self.factory.get('/view_staff_by_name/') request.user = self.user response = view_staff_by_name(request) self.assertContains(response, staff1.name()) self.assertContains(response, staff2.name()) self.assertContains(response, staff3.name()) class YearViewTest(AdminUnitTest): """Tests around the year view function from a teacher's perspective""" def test_year_view_uses_right_template(self): response = self.client.get('/students/all/') self.assertTemplateUsed(response, 'year_view.html') def test_teachers_see_all_students_from_their_only_subject_area(self): stuff = set_up_stuff() subject_area1 = SubjectArea.objects.create(name="Cartoon Studies") subject_area2 = SubjectArea.objects.create(name="Evil Plotting") self.user.staff.subject_areas.add(subject_area1) course1 = Course.objects.create( title="BA in Cartoon Studies", short_title="Cartoon Studies") course1.subject_areas.add(subject_area1) course2 = Course.objects.create( title="BA in Evil Plotting", short_title="Evil Plotting") course2.subject_areas.add(subject_area2) course3 = Course.objects.create( title="BA in Cartoon Studies with Evil Plotting", short_title="Cartoon Studies / Evil Plotting" ) course3.subject_areas.add(subject_area1) course3.subject_areas.add(subject_area2) student1 = stuff[1] student1.year = 1 student1.course = course1 student1.save() student2 = stuff[2] student2.year = 1 student2.course = course1 student2.save() student3 = stuff[3] student3.course = course2 student3.year = 1 student3.save() student4 = stuff[4] student4.course = course3 student4.year = 1 student4.save() request = self.factory.get('/year_view/1/') request.user = self.user response = year_view(request, '1') self.assertContains(response, student1.last_name) self.assertContains(response, student2.last_name) self.assertNotContains(response, student3.last_name) self.assertContains(response, student4.last_name) def test_teachers_see_all_students_from_their_many_subject_areas(self): stuff = set_up_stuff() subject_area1 = SubjectArea.objects.create(name="Cartoon Studies") subject_area2 = SubjectArea.objects.create(name="Evil Plotting") self.user.staff.subject_areas.add(subject_area1) self.user.staff.subject_areas.add(subject_area2) course1 = Course.objects.create( title="BA in Cartoon Studies", short_title="Cartoon Studies") course1.subject_areas.add(subject_area1) course2 = Course.objects.create( title="BA in Evil Plotting", short_title="Evil Plotting") course2.subject_areas.add(subject_area2) course3 = Course.objects.create( title="BA in Cartoon Studies with Evil Plotting", short_title="Cartoon Studies / Evil Plotting" ) course3.subject_areas.add(subject_area1) course3.subject_areas.add(subject_area2) student1 = stuff[1] student1.year = 1 student1.course = course1 student1.save() student2 = stuff[2] student2.year = 1 student2.course = course1 student2.save() student3 = stuff[3] student3.course = course2 student3.year = 1 student3.save() student4 = stuff[4] student4.course = course3 student4.year = 1 student4.save() request = self.factory.get('/year_view/1/') request.user = self.user response = year_view(request, '1') self.assertContains(response, student1.last_name) self.assertContains(response, student2.last_name) self.assertContains(response, student3.last_name) self.assertContains(response, student4.last_name) def test_main_admin_sees_all_active_students_for_a_year_are_shown(self): stuff = set_up_stuff() self.user.staff.main_admin = True self.user.staff.save() student1 = stuff[1] student2 = stuff[2] student3 = stuff[3] student4 = stuff[4] student4.year = 2 student4.save() student5 = stuff[5] student5.active = False student5.save() request = self.factory.get('/year_view/1/') request.user = self.user response = year_view(request, '1') self.assertContains(response, student1.last_name) self.assertContains(response, student2.last_name) self.assertContains(response, student3.last_name) self.assertNotContains(response, student4.last_name) self.assertNotContains(response, student5.last_name) def test_only_admin_and_programme_director_see_edit_stuff(self): stuff = set_up_stuff() subject_area = create_subject_area() course = create_course() course.subject_areas.add(subject_area) self.user.staff.role = 'admin' self.user.staff.subject_areas.add(subject_area) self.user.staff.save() student1 = stuff[1] student1.course = course student1.save() student2 = stuff[2] student2.course = course student2.save() request = self.factory.get('/year_view/1/') request.user = self.user response = year_view(request, '1') self.assertContains(response, 'bulkfunctions') self.user.staff.role = 'teacher' self.user.staff.save() request = self.factory.get('/year_view/1/') request.user = self.user response = year_view(request, '1') self.assertNotContains(response, 'bulkfunctions') self.user.staff.role = 'teacher' self.user.staff.programme_director = True self.user.staff.save() request = self.factory.get('/year_view/1/') request.user = self.user response = year_view(request, '1') self.assertContains(response, 'bulkfunctions') def test_bulk_changing_functions_work(self): stuff = set_up_stuff() subject_area = create_subject_area() course1 = create_course() course1.subject_areas.add(subject_area) course2 = Course.objects.create( title='BA in Evil Plotting', short_title='Evil Plotting') subject_area2 = SubjectArea.objects.create(name='Evil Plotting') course2.subject_areas.add(subject_area2) self.user.staff.role = 'admin' self.user.staff.subject_areas.add(subject_area) self.user.staff.save() student1 = stuff[1] student1.course = course1 student1.qld = True student1.save() student2 = stuff[2] student2.course = course1 student2.qld = True student2.save() student3 = stuff[3] student3.course = course1 student3.qld = True student3.save() stuff[4].delete() stuff[5].delete() # Set course request = self.factory.post('/year_view/1/', data={ 'selected_student_id': [student2.student_id, student3.student_id], 'modify': 'course_BA in Evil Plotting' }) request.user = self.user response = year_view(request, '1') student1_out = Student.objects.get(student_id=student1.student_id) student2_out = Student.objects.get(student_id=student2.student_id) student3_out = Student.objects.get(student_id=student3.student_id) self.assertEqual(student1_out.course, course1) self.assertEqual(student2_out.course, course2) self.assertEqual(student3_out.course, course2) # Set QLD request = self.factory.post('/year_view/1/', data={ 'selected_student_id': [student1.student_id, student2.student_id], 'modify': 'qld_off' }) request.user = self.user response = year_view(request, '1') student1_out = Student.objects.get(student_id=student1.student_id) student2_out = Student.objects.get(student_id=student2.student_id) student3_out = Student.objects.get(student_id=student3.student_id) self.assertEqual(student1_out.qld, False) self.assertEqual(student2_out.qld, False) self.assertEqual(student3_out.qld, True) # Set begin of studies request = self.factory.post('/year_view/1/', data={ 'selected_student_id': [student1.student_id, student2.student_id], 'modify': 'since_1900' }) request.user = self.user response = year_view(request, '1') student1_out = Student.objects.get(student_id=student1.student_id) student2_out = Student.objects.get(student_id=student2.student_id) student3_out = Student.objects.get(student_id=student3.student_id) self.assertEqual(student1_out.since, 1900) self.assertEqual(student2_out.since, 1900) self.assertEqual(student3_out.since, None) # Set Year request = self.factory.post('/year_view/1/', data={ 'selected_student_id': [student1.student_id, student2.student_id], 'modify': 'year_2' }) request.user = self.user response = year_view(request, '1') student1_out = Student.objects.get(student_id=student1.student_id) student2_out = Student.objects.get(student_id=student2.student_id) student3_out = Student.objects.get(student_id=student3.student_id) self.assertEqual(student1_out.year, 2) self.assertEqual(student2_out.year, 2) self.assertEqual(student3_out.year, 1) # Active request = self.factory.post('/year_view/1/', data={ 'selected_student_id': [student1.student_id, student2.student_id], 'modify': 'active_no' }) request.user = self.user response = year_view(request, '1') student1_out = Student.objects.get(student_id=student1.student_id) student2_out = Student.objects.get(student_id=student2.student_id) student3_out = Student.objects.get(student_id=student3.student_id) self.assertEqual(student1_out.active, False) self.assertEqual(student2_out.active, False) self.assertEqual(student3_out.active, True) # Delete request = self.factory.post('/year_view/1/', data={ 'selected_student_id': [student1.student_id, student2.student_id], 'modify': 'delete_yes' }) request.user = self.user response = year_view(request, '1') self.assertEqual(Student.objects.count(), 1) def test_deleting_student_deletes_everything(self): module = create_module() student = create_student() student.modules.add(module) performance = Performance.objects.create( module=module, student=student) assessment = Assessment.objects.create( module=module, title='Essay' ) result = AssessmentResult.objects.create(assessment=assessment) feedback = IndividualFeedback.objects.create( assessment_result=result, attempt='first' ) self.assertEqual(AssessmentResult.objects.count(), 1) self.assertEqual(IndividualFeedback.objects.count(), 1) performance.assessment_results.add(result) request = self.factory.post('/year_view/1/', data={ 'selected_student_id': [student.student_id], 'modify': 'delete_yes' }) request.user = self.user response = year_view(request, '1') self.assertEqual(AssessmentResult.objects.count(), 0) self.assertEqual(IndividualFeedback.objects.count(), 0) class CSVParsingTests(AdminUnitTest): """Tests for the CSV Parsing""" def test_csv_data_gets_parsed_properly(self): parsed_csvlist = ( 'bb42;Bunny;Bugs;1900;1;bb42@acme.edu;+112345678/////' + 'dd23;Duck;Daffy;1900;1;dd23@acme.edu;+123456789/////' + 'pp42;Pig;Porky;1899;2;pp42@acme.edu;+134567890/////' + 'test;wrong;entry;to;beignored' ) data = Data.objects.create(id='randomstring', value=parsed_csvlist) request = self.factory.post('/parse_csv/randomstring/', data={ 'column1': 'student_id', 'column2': 'last_name', 'column3': 'first_name', 'column4': 'since', 'column5': 'year', 'column6': 'email', 'column7': 'phone_number', 'exclude': '4' }) request.user = self.user parse_csv(request, data.id) self.assertEqual(Student.objects.count(), 3) student1 = Student.objects.get(student_id='bb42') student2 = Student.objects.get(student_id='dd23') student3 = Student.objects.get(student_id='pp42') self.assertEqual(student1.last_name, 'Bunny') self.assertEqual(student1.first_name, 'Bugs') self.assertEqual(student1.since, 1900) self.assertEqual(student1.email, 'bb42@acme.edu') self.assertEqual(student1.phone_number, '+112345678') class AssignTutorsTest(AdminUnitTest): """Tests for the assigning tutors view from an admin perspective""" def test_right_template_used(self): SubjectArea.objects.create(name="Cartoon Studies") response = self.client.get('/assign_tutors/cartoon-studies/1/') self.assertTemplateUsed(response, 'assign_tutors.html') def test_assign_tutors_view_shows_right_tutors(self): subject_area1 = SubjectArea.objects.create(name="Cartoon Studies") subject_area2 = SubjectArea.objects.create(name="Evil Plotting") user1 = User.objects.create_user( username='ef1', password='rabbitseason', last_name='Fudd', first_name='Elmar' ) staff1 = Staff.objects.create(user=user1, role='teacher') staff1.subject_areas.add(subject_area1) user2 = User.objects.create_user( username='ys2', password='squaredance', last_name='Sam', first_name='Yosemite' ) staff2 = Staff.objects.create(user=user2, role='teacher') staff2.subject_areas.add(subject_area2) user3 = User.objects.create_user( username='mtm3', password='zapp', last_name='The Martian', first_name='Marvin' ) staff3 = Staff.objects.create(user=user3, role='teacher') staff3.subject_areas.add(subject_area1) staff3.subject_areas.add(subject_area2) request = self.factory.get('/assign_tutors/cartoon-studies/1') request.user = self.user response = assign_tutors(request, 'cartoon-studies', '1') soup = BeautifulSoup(response.content) table = str(soup.select('#teachers')[0]) self.assertTrue(user1.last_name in table) self.assertFalse(user2.last_name in table) self.assertTrue(user3.last_name in table) def test_assign_tutors_view_shows_right_students(self): subject_area1 = SubjectArea.objects.create(name="Cartoon Studies") subject_area2 = SubjectArea.objects.create(name="Evil Plotting") course1 = Course.objects.create(title='BA in Cartoon Studies') course1.subject_areas.add(subject_area1) course2 = Course.objects.create(title='BA in Evil Plotting') course2.subject_areas.add(subject_area2) course3 = Course.objects.create( title='BA in Cartoon Studies with Evil Plotting') course3.subject_areas.add(subject_area1, subject_area2) user1 = User.objects.create_user( username='ef1', password='rabbitseason', last_name='Fudd', first_name='Elmar' ) staff1 = Staff.objects.create(user=user1, role='teacher') staff1.subject_areas.add(subject_area1) student1 = Student.objects.create( student_id='bb42', first_name='Bugs', last_name='Bunny', course=course1, year=1 ) student2 = Student.objects.create( student_id='dd23', first_name='Duck', last_name='Daffy', course=course2, year=1 ) student3 = Student.objects.create( student_id='pp23', first_name='Porky', last_name='Pig', course=course3, year=1 ) student4 = Student.objects.create( student_id='rr23', first_name='Road', last_name='Runner', course=course1, year=2 ) request = self.factory.get('/assign_tutors/cartoon-studies/1') request.user = self.user response = assign_tutors(request, 'cartoon-studies', '1') self.assertContains(response, 'Bunny') self.assertNotContains(response, 'Duck') self.assertContains(response, 'Pig') self.assertNotContains(response, 'Runner') def test_tutors_can_be_assigned(self): subject_area = SubjectArea.objects.create(name="Cartoon Studies") course = Course.objects.create(title='BA in Cartoon Studies') course.subject_areas.add(subject_area) user1 = User.objects.create_user( username='ef1', password='rabbitseason', last_name='Fudd', first_name='Elmar' ) staff1 = Staff.objects.create(user=user1, role='teacher') staff1.subject_areas.add(subject_area) user2 = User.objects.create_user( username='ys2', password='squaredance', last_name='Sam', first_name='Yosemite' ) staff2 = Staff.objects.create(user=user2, role='teacher') staff2.subject_areas.add(subject_area) student1 = Student.objects.create( student_id='bb42', first_name='Bugs', last_name='Bunny', course=course, year=1 ) student2 = Student.objects.create( student_id='dd23', first_name='Duck', last_name='Daffy', course=course, year=1 ) student3 = Student.objects.create( student_id='pp23', first_name='Porky', last_name='Pig', course=course, year=1 ) student4 = Student.objects.create( student_id='rr23', first_name='Road', last_name='Runner', course=course, year=1 ) request = self.factory.post( '/assign_tutors/cartoon-studies/1', data={ 'bb42': 'ef1', 'dd23': 'ys2', 'pp23': 'ef1' } ) request.user = self.user response = assign_tutors(request, 'cartoon-studies', '1') student1_out = Student.objects.get(student_id='bb42') self.assertEqual(student1_out.tutor, staff1) student2_out = Student.objects.get(student_id='dd23') self.assertEqual(student2_out.tutor, staff2) student3_out = Student.objects.get(student_id='pp23') self.assertEqual(student3_out.tutor, staff1) student4_out = Student.objects.get(student_id='rr23') self.assertEqual(student4_out.tutor, None) class AllTuteeMeetingTest(TeacherUnitTest): """Tests about the function showing all tutee meetings""" def test_page_can_only_be_seen_by_pd(self): subject_area = create_subject_area() url = ( '/all_tutee_meetings/' + subject_area.slug + '/1/' ) request = self.factory.get(url) request.user = self.user response = all_tutee_meetings(request, 'cartoon-studies', '1') self.assertNotEqual(response.status_code, 200) self.user.staff.programme_director = True self.user.staff.save() request = self.factory.get(url) request.user = self.user response = all_tutee_meetings(request, 'cartoon-studies', '1') self.assertEqual(response.status_code, 200) def test_page_uses_right_template(self): subject_area = create_subject_area() url = ( '/all_tutee_meetings/' + subject_area.slug + '/1/' ) self.user.staff.programme_director = True self.user.staff.save() response = self.client.get(url) self.assertTemplateUsed(response, 'all_tutees.html') def test_students_in_the_right_year_show_up(self): subject_area = create_subject_area() course = Course.objects.create(title='Cartoon Studies') course.subject_areas.add(subject_area) student1 = Student.objects.create( student_id='bb1', first_name='Bugs', last_name='Bunny', year=1, course=course ) student2 = Student.objects.create( student_id='dd1', first_name='Duck', last_name='Daffy', year=2, course=course ) url = ( '/all_tutee_meetings/' + subject_area.slug + '/1/' ) self.user.staff.programme_director = True self.user.staff.save() request = self.factory.get(url) request.user = self.user response = all_tutee_meetings(request, 'cartoon-studies', '1') self.assertContains(response, student1.get_absolute_url()) self.assertNotContains(response, student2.get_absolute_url()) def test_tutor_appears_on_page(self): subject_area = create_subject_area() course = Course.objects.create(title='Cartoon Studies') course.subject_areas.add(subject_area) teacher = create_teacher() student1 = Student.objects.create( student_id='bb1', first_name='Bugs', last_name='Bunny', year=1, course=course, tutor=teacher ) url = ( '/all_tutee_meetings/' + subject_area.slug + '/1/' ) self.user.staff.programme_director = True self.user.staff.save() request = self.factory.get(url) request.user = self.user response = all_tutee_meetings(request, 'cartoon-studies', '1') self.assertContains(response, student1.get_absolute_url()) self.assertContains(response, teacher.name()) def test_tutor_meetings_appear(self): subject_area = create_subject_area() course = Course.objects.create(title='Cartoon Studies') course.subject_areas.add(subject_area) teacher = create_teacher() student1 = Student.objects.create( student_id='bb1', first_name='Bugs', last_name='Bunny', year=1, course=course, tutor=teacher ) student2 = Student.objects.create( student_id='dd1', first_name='Duck', last_name='Daffy', year=1, course=course, tutor=teacher ) date = datetime.date(1900, 1, 1) meeting1 = TuteeSession.objects.create( tutor=teacher, tutee=student1, date_of_meet=date, notes="Some Text" ) url = ( '/all_tutee_meetings/' + subject_area.slug + '/1/' ) self.user.staff.programme_director = True self.user.staff.save() request = self.factory.get(url) request.user = self.user response = all_tutee_meetings(request, 'cartoon-studies', '1') self.assertContains(response, '1 Jan 1900') self.assertContains(response, meeting1.get_absolute_url()) class MyTuteesTests(TeacherUnitTest): """Making sure that the my tutee view shows everything necessary""" def test_all_tutees_are_shown(self): stuff = set_up_stuff() student1 = stuff[1] student2 = stuff[2] student3 = stuff[3] student4 = stuff[4] student5 = stuff[5] student1.tutor = self.user.staff student1.save() student2.tutor = self.user.staff student2.save() student3.tutor = self.user.staff student3.save() request = self.factory.get('/my_tutees/') request.user = self.user response = my_tutees(request) self.assertContains(response, student1.name()) self.assertContains(response, student2.name()) self.assertContains(response, student3.name()) self.assertNotContains(response, student4.name()) self.assertNotContains(response, student5.name()) def test_all_tutee_meetings_are_shown(self): student = create_student() student.tutor = self.user.staff student.save() date1 = datetime.date(1900, 1, 1) date2 = datetime.date(1900, 1, 2) meeting1 = TuteeSession.objects.create( tutor=self.user.staff, tutee=student, date_of_meet=date1, notes='Text' ) meeting2 = TuteeSession.objects.create( tutor=self.user.staff, tutee=student, date_of_meet=date2, notes='Text' ) request = self.factory.get('/my_tutees/') request.user = self.user response = my_tutees(request) self.assertContains(response, '1 Jan 1900') self.assertContains(response, '2 Jan 1900') class AddressNinesTest(TeacherUnitTest): """Tests the function that allows to change averages ending with 9""" def test_address_nines_uses_right_template(self): module = create_module() response = self.client.get(module.get_address_nines_url()) self.assertTemplateUsed(response, 'address_nines.html') def test_address_nines_shows_all_averages_ending_with_nine(self): stuff = set_up_stuff() module = stuff[0] assessment1 = Assessment.objects.create( module=module, title='Assessment 1', value=20 ) assessment2 = Assessment.objects.create( module=module, title='Assessment 2', value=30 ) assessment3 = Assessment.objects.create( module=module, title='Assessment 3', value=50 ) # Student 1 with average of 49 student1 = stuff[1] performance1 = Performance.objects.get(module=module, student=student1) result1_1 = AssessmentResult.objects.create( assessment=assessment1, mark=50 ) performance1.assessment_results.add(result1_1) result1_2 = AssessmentResult.objects.create( assessment=assessment2, mark=48 ) performance1.assessment_results.add(result1_2) result1_3 = AssessmentResult.objects.create( assessment=assessment3, mark=50 ) performance1.assessment_results.add(result1_3) performance1.calculate_average() # Student 2 with 59 Average student2 = stuff[2] performance2 = Performance.objects.get(module=module, student=student2) result2_1 = AssessmentResult.objects.create( assessment=assessment1, mark=62 ) performance2.assessment_results.add(result2_1) result2_2 = AssessmentResult.objects.create( assessment=assessment2, mark=58 ) performance2.assessment_results.add(result2_2) result2_3 = AssessmentResult.objects.create( assessment=assessment3, mark=59 ) performance2.assessment_results.add(result2_3) performance2.calculate_average() # Student 3 with 60 Average student3 = stuff[3] performance3 = Performance.objects.get(module=module, student=student3) result3_1 = AssessmentResult.objects.create( assessment=assessment1, mark=60 ) performance3.assessment_results.add(result3_1) result3_2 = AssessmentResult.objects.create( assessment=assessment2, mark=60 ) performance3.assessment_results.add(result3_2) result3_3 = AssessmentResult.objects.create( assessment=assessment3, mark=60 ) performance3.assessment_results.add(result3_3) performance3.calculate_average() request = self.factory.get(module.get_address_nines_url()) request.user = self.user response = address_nines(request, module.code, module.year) self.assertContains(response, student1.short_name()) self.assertContains(response, student2.short_name()) self.assertNotContains(response, student3.short_name()) def test_address_nines_shows_no_nines_found_message_when_no_nines(self): stuff = set_up_stuff() module = stuff[0] assessment1 = Assessment.objects.create( module=module, title='Assessment 1', value=20 ) assessment2 = Assessment.objects.create( module=module, title='Assessment 2', value=30 ) assessment3 = Assessment.objects.create( module=module, title='Assessment 3', value=50 ) # Student 1 with 40 average student1 = stuff[1] performance1 = Performance.objects.get(module=module, student=student1) result1_1 = AssessmentResult.objects.create( assessment=assessment1, mark=40 ) performance1.assessment_results.add(result1_1) result1_2 = AssessmentResult.objects.create( assessment=assessment2, mark=40 ) performance1.assessment_results.add(result1_2) result1_3 = AssessmentResult.objects.create( assessment=assessment3, mark=40 ) performance1.assessment_results.add(result1_3) performance1.calculate_average() # Student 2 with 55 Average student2 = stuff[2] performance2 = Performance.objects.get(module=module, student=student2) result2_1 = AssessmentResult.objects.create( assessment=assessment1, mark=55 ) performance2.assessment_results.add(result2_1) result2_2 = AssessmentResult.objects.create( assessment=assessment2, mark=55 ) performance2.assessment_results.add(result2_2) result2_3 = AssessmentResult.objects.create( assessment=assessment3, mark=55 ) performance2.assessment_results.add(result2_3) performance2.calculate_average() # Student 3 with 60 Average student3 = stuff[3] performance3 = Performance.objects.get(module=module, student=student3) result3_1 = AssessmentResult.objects.create( assessment=assessment1, mark=60 ) performance3.assessment_results.add(result3_1) result3_2 = AssessmentResult.objects.create( assessment=assessment2, mark=60 ) performance3.assessment_results.add(result3_2) result3_3 = AssessmentResult.objects.create( assessment=assessment3, mark=60 ) performance3.assessment_results.add(result3_3) performance3.calculate_average() request = self.factory.get(module.get_address_nines_url()) request.user = self.user response = address_nines(request, module.code, module.year) self.assertNotContains(response, student1.short_name()) self.assertNotContains(response, student2.short_name()) self.assertNotContains(response, student3.short_name()) self.assertContains(response, 'no averages ending with a 9') def test_address_nines_changes_marks(self): stuff = set_up_stuff() module = stuff[0] assessment1 = Assessment.objects.create( module=module, title='Assessment 1', value=20 ) assessment2 = Assessment.objects.create( module=module, title='Assessment 2', value=30 ) assessment3 = Assessment.objects.create( module=module, title='Assessment 3', value=50 ) # Student 1 with average of 49 student1 = stuff[1] performance1 = Performance.objects.get(module=module, student=student1) result1_1 = AssessmentResult.objects.create( assessment=assessment1, mark=50 ) r1_1_field = 'mark_' + assessment1.slug + '_' + student1.student_id performance1.assessment_results.add(result1_1) result1_2 = AssessmentResult.objects.create( assessment=assessment2, mark=48 ) performance1.assessment_results.add(result1_2) r1_2_field = 'mark_' + assessment2.slug + '_' + student1.student_id result1_3 = AssessmentResult.objects.create( assessment=assessment3, mark=50 ) performance1.assessment_results.add(result1_3) r1_3_field = 'mark_' + assessment3.slug + '_' + student1.student_id performance1.calculate_average() # Student 2 with 59 Average student2 = stuff[2] performance2 = Performance.objects.get(module=module, student=student2) result2_1 = AssessmentResult.objects.create( assessment=assessment1, mark=62 ) performance2.assessment_results.add(result2_1) r2_1_field = 'mark_' + assessment1.slug + '_' + student2.student_id result2_2 = AssessmentResult.objects.create( assessment=assessment2, mark=58 ) performance2.assessment_results.add(result2_2) r2_2_field = 'mark_' + assessment2.slug + '_' + student2.student_id result2_3 = AssessmentResult.objects.create( assessment=assessment3, mark=59 ) performance2.assessment_results.add(result2_3) r2_3_field = 'mark_' + assessment3.slug + '_' + student2.student_id performance2.calculate_average() request = self.factory.post( module.get_address_nines_url(), data={ r1_1_field: '50', r1_2_field: '49', r1_3_field: '50', r2_1_field: '63', r2_2_field: '58', r2_3_field: '59' } ) request.user = self.user response = address_nines(request, module.code, module.year) performance_1_out = Performance.objects.get( student=student1, module=module ) performance_2_out = Performance.objects.get( student=student2, module=module ) self.assertEqual(performance_1_out.average, 50) self.assertEqual(performance_2_out.average, 60) def test_address_nines_templates_contains_correct_form_tags(self): stuff = set_up_stuff() module = stuff[0] assessment1 = Assessment.objects.create( module=module, title='Assessment 1', value=20 ) assessment2 = Assessment.objects.create( module=module, title='Assessment 2', value=30 ) assessment3 = Assessment.objects.create( module=module, title='Assessment 3', value=50 ) # Student 1 with average of 49 student1 = stuff[1] performance1 = Performance.objects.get(module=module, student=student1) result1_1 = AssessmentResult.objects.create( assessment=assessment1, mark=50 ) r1_1_field = ( 'name="mark_' + assessment1.slug + '_' + student1.student_id + '"' ) performance1.assessment_results.add(result1_1) result1_2 = AssessmentResult.objects.create( assessment=assessment2, mark=48 ) r1_2_field = ( 'name="mark_' + assessment2.slug + '_' + student1.student_id + '"' ) performance1.assessment_results.add(result1_2) result1_3 = AssessmentResult.objects.create( assessment=assessment3, mark=50 ) r1_3_field = ( 'name="mark_' + assessment3.slug + '_' + student1.student_id + '"' ) performance1.assessment_results.add(result1_3) performance1.calculate_average() request = self.factory.get(module.get_address_nines_url()) request.user = self.user response = address_nines(request, module.code, module.year) self.assertContains(response, r1_1_field) self.assertContains(response, r1_2_field) self.assertContains(response, r1_3_field) class EditExamIDsTest(AdminUnitTest): """Testing the function to manually edit Exam IDs""" def test_right_template_used(self): subject_area = SubjectArea.objects.create(name="Cartoon Studies") url = ( '/edit_exam_ids/' + subject_area.slug + '/1/' ) response = self.client.get(url) self.assertTemplateUsed(response, 'edit_exam_ids.html') def test_only_active_students_with_right_SA_and_year_appear_in_form(self): subject_area1 = SubjectArea.objects.create(name="Cartoon Studies") subject_area2 = SubjectArea.objects.create(name="Evil Plotting") course1 = Course.objects.create( title="MA in Cartoon Studies", short_title="Cartoon Studies" ) course1.subject_areas.add(subject_area1) course2 = Course.objects.create( title="MSc in Evil Plotting", short_title="Evil Plotting" ) course2.subject_areas.add(subject_area2) course3 = Course.objects.create( title="MA in Cartoon Studies and Evil Plotting", short_title="Cartoon Studies/Evil Plotting" ) course3.subject_areas.add(subject_area1) course3.subject_areas.add(subject_area2) stuff = set_up_stuff() student1 = stuff[1] student1.active = True student1.course = course1 student1.year = 1 student1.save() student2 = stuff[2] student2.active = False student2.course = course1 student2.year = 1 student2.save() student3 = stuff[3] student3.active = True student3.course = course2 student3.year = 1 student3.save() student4 = stuff[4] student4.active = True student4.course = course3 student4.year = 1 student4.save() student5 = stuff[5] student5.active = True student5.course = course3 student5.year = 2 student5.save() url = ( '/edit_exam_ids/' + subject_area1.slug + '/1/' ) request = self.factory.get(url) request.user = self.user response = edit_exam_ids(request, subject_area1.slug, '1') self.assertContains(response, student1.student_id) self.assertNotContains(response, student2.student_id) self.assertNotContains(response, student3.student_id) self.assertContains(response, student4.student_id) self.assertNotContains(response, student5.student_id) def test_existing_exam_ids_are_shown(self): subject_area = SubjectArea.objects.create(name="Cartoon Studies") course = Course.objects.create( title="MA in Cartoon Studies", short_title="Cartoon Studies" ) course.subject_areas.add(subject_area) stuff = set_up_stuff() student1 = stuff[1] student1.active = True student1.course = course student1.year = 1 student1.exam_id = '1234' student1.save() student2 = stuff[2] student2.active = True student2.course = course student2.year = 1 student2.exam_id = '56789ABC' student2.save() url = ( '/edit_exam_ids/' + subject_area.slug + '/1/' ) request = self.factory.get(url) request.user = self.user response = edit_exam_ids(request, subject_area.slug, '1') self.assertContains(response, '1234') self.assertContains(response, '56789ABC') def test_exam_ids_get_saved_properly(self): subject_area = SubjectArea.objects.create(name="Cartoon Studies") course = Course.objects.create( title="MA in Cartoon Studies", short_title="Cartoon Studies" ) course.subject_areas.add(subject_area) stuff = set_up_stuff() student1 = stuff[1] student1.active = True student1.course = course student1.year = 1 student1.save() student2 = stuff[2] student2.active = True student2.course = course student2.year = 1 student2.save() student3 = stuff[3] student3.active = True student3.course = course student3.year = 1 student3.save() url = ( '/edit_exam_ids/' + subject_area.slug + '/1/' ) request = self.factory.post( url, data={ student1.student_id: '1234', student2.student_id: '56789E', student3.student_id: '' } ) request.user = self.user response = edit_exam_ids(request, subject_area.slug, '1') student1_out = Student.objects.get(student_id=student1.student_id) student2_out = Student.objects.get(student_id=student2.student_id) student3_out = Student.objects.get(student_id=student3.student_id) self.assertEqual(student1_out.exam_id, '1234') self.assertEqual(student2_out.exam_id, '56789E') self.assertEqual(student3_out.exam_id, None) class ConcessionsTest(AdminUnitTest): """Testing the concessions form""" def test_concessions_form_uses_right_template(self): module = create_module() response = self.client.get(module.get_concessions_url('first')) self.assertTemplateUsed(response, 'concessions.html') def test_all_active_students_appear_in_template(self): stuff = set_up_stuff() module = stuff[0] student1 = stuff[1] student2 = stuff[2] student3 = stuff[3] student3.active = False student3.save() request = self.factory.get(module.get_concessions_url('first')) request.user = self.user response = concessions(request, module.code, module.year, 'first') self.assertContains(response, student1.short_name()) self.assertContains(response, student2.short_name()) self.assertNotContains(response, student3.short_name()) def test_correct_names_for_values_in_template(self): stuff = set_up_stuff() module = stuff[0] student1 = stuff[1] student2 = stuff[2] assessment1 = Assessment.objects.create( module=module, title="Assessment 1" ) assessment2 = Assessment.objects.create( module=module, title="Assessment 2" ) performance1 = Performance.objects.get(module=module, student=student1) assessment_result_1 = AssessmentResult.objects.create( assessment=assessment1, mark=38, concessions='N' ) assessment_result_2 = AssessmentResult.objects.create( assessment=assessment2, mark=38, concessions='G' ) performance1.assessment_results.add(assessment_result_1) performance1.assessment_results.add(assessment_result_2) request = self.factory.get(module.get_concessions_url('first')) request.user = self.user response = concessions(request, module.code, module.year, 'first') tag_name_1_1 = ( 'name="' + student1.student_id + '_' + assessment1.slug + '"' ) tag_name_1_2 = ( 'name="' + student1.student_id + '_' + assessment2.slug + '"' ) self.assertContains(response, tag_name_1_1) self.assertContains(response, tag_name_1_2) tag_name_2_1 = ( 'name="' + student2.student_id + '_' + assessment1.slug + '"' ) tag_name_2_2 = ( 'name="' + student2.student_id + '_' + assessment2.slug + '"' ) self.assertContains(response, tag_name_2_1) self.assertContains(response, tag_name_2_2) def test_existing_concessions_are_displayed(self): stuff = set_up_stuff() module = stuff[0] student1 = stuff[1] assessment1 = Assessment.objects.create( module=module, title="Assessment 1" ) assessment2 = Assessment.objects.create( module=module, title="Assessment 2" ) performance1 = Performance.objects.get(module=module, student=student1) assessment_result_1 = AssessmentResult.objects.create( assessment=assessment1, mark=38, concessions='N' ) assessment_result_2 = AssessmentResult.objects.create( assessment=assessment2, mark=38, concessions='G' ) performance1.assessment_results.add(assessment_result_1) performance1.assessment_results.add(assessment_result_2) request = self.factory.get(module.get_concessions_url('first')) request.user = self.user response = concessions(request, module.code, module.year, 'first') soup = BeautifulSoup(response.content) tag_name_1_1 = ( '#' + student1.student_id + '_' + assessment1.slug ) select1 = str(soup.select(tag_name_1_1)[0]) options1 = select1.split('<option') for part in options1: if 'value="N"' in part: option1 = part self.assertIn('selected', option1) tag_name_1_2 = ( '#' + student1.student_id + '_' + assessment2.slug ) select2 = str(soup.select(tag_name_1_2)[0]) options2 = select2.split('<option') for part in options2: if 'value="N"' in part: option2 = part self.assertNotIn('selected', option2) for part in options2: if 'value="G"' in part: option2 = part self.assertIn('selected', option2) def test_submitting_the_form_saves_concessions(self): stuff = set_up_stuff() module = stuff[0] student1 = stuff[1] student2 = stuff[2] assessment1 = Assessment.objects.create( module=module, title="Assessment 1" ) assessment2 = Assessment.objects.create( module=module, title="Assessment 2" ) performance1 = Performance.objects.get(module=module, student=student1) assessment_result_1 = AssessmentResult.objects.create( assessment=assessment1, mark=38, concessions='N' ) assessment_result_2 = AssessmentResult.objects.create( assessment=assessment2, mark=38, concessions='G' ) performance1.assessment_results.add(assessment_result_1) performance1.assessment_results.add(assessment_result_2) tag_name_1_1 = ( student1.student_id + '_' + assessment1.slug ) tag_name_1_2 = ( student1.student_id + '_' + assessment2.slug ) tag_name_2_1 = ( student2.student_id + '_' + assessment1.slug ) tag_name_2_2 = ( student2.student_id + '_' + assessment2.slug ) request = self.factory.post( module.get_concessions_url('first'), data={ tag_name_1_1: 'G', tag_name_1_2: 'P', tag_name_2_1: 'N', tag_name_2_2: 'G', } ) request.user = self.user response = concessions(request, module.code, module.year, 'first') assessment_result_1_1_out = AssessmentResult.objects.get( assessment = assessment1, part_of = performance1 ) assessment_result_1_2_out = AssessmentResult.objects.get( assessment = assessment2, part_of = performance1 ) performance2 = Performance.objects.get(module=module, student=student2) assessment_result_2_1_out = AssessmentResult.objects.get( assessment = assessment1, part_of = performance2 ) assessment_result_2_2_out = AssessmentResult.objects.get( assessment = assessment2, part_of = performance2 ) self.assertEqual(assessment_result_1_1_out.concessions, 'G') self.assertEqual(assessment_result_1_2_out.concessions, 'P') self.assertEqual(assessment_result_2_1_out.concessions, 'N') self.assertEqual(assessment_result_2_2_out.concessions, 'G') class NextYearTest(MainAdminUnitTest): """Testing the switch to the next year with all its complications""" def populate_db_with_students(self): subject_area_1 = SubjectArea.objects.create(name="Cartoon Studies") subject_area_2 = SubjectArea.objects.create(name="Evil Plotting") course_1 = Course.objects.create( title='BA in Cartoon Studies', short_title='Cartoon Studies', ) course_1.subject_areas.add(subject_area_1) course_2 = Course.objects.create( title='BA in Evil Plotting', short_title='Evil Plotting', ) course_2.subject_areas.add(subject_area_2) course_3 = Course.objects.create( title='BA in Cartoon Studies with Evil Plotting', short_title='Cartoon Studies / Evil Plotting', ) course_3.subject_areas.add(subject_area_1) course_3.subject_areas.add(subject_area_2) students = {} student1_1 = Student.objects.create( first_name='Bugs', last_name='Bunny', student_id='bb23', year=1, course=course_1, ) students['1-2'] = student1_1 student1_2 = Student.objects.create( first_name='Daffy', last_name='Duck', student_id='dd42', year=1, is_part_time=True, course=course_1 ) students['1-spty'] = student1_2 student1_3 = Student.objects.create( first_name='Silvester', last_name='Cat', student_id='sc23', year=1, is_part_time=True, second_part_time_year=True, course=course_1 ) students['spty-2'] = student1_3 student1_4 = Student.objects.create( first_name='While E', last_name='Coyote', student_id='wec23', year=1, course=course_3 ) students['mixed_course'] = student1_4 student2_1 = Student.objects.create( first_name='Tweety', last_name='Bird', student_id='tb23', year=2, course=course_1 ) students['2-3'] = student2_1 student3_1 = Student.objects.create( first_name='Tasmanian', last_name='Devil', student_id='td23', year=3, course=course_1 ) students['3-4'] = student3_1 student4_1 = Student.objects.create( first_name='Marvin', last_name='Martian', student_id='mm23', year=1, course=course_2 ) students['different_course'] = student4_1 return students def test_enter_student_progression_uses_correct_template(self): students = self.populate_db_with_students() response = self.client.get( '/enter_student_progression/cartoon-studies/1/' ) self.assertTemplateUsed(response, 'enter_student_progression.html') def test_enter_student_progression_shows_correct_students(self): students = self.populate_db_with_students() request = self.factory.get( '/enter_student_progression/cartoon-studies/1/' ) request.user = self.user response = enter_student_progression( request, 'cartoon-studies', '1') self.assertContains(response, students['1-2'].student_id) self.assertContains(response, students['1-spty'].student_id) self.assertContains(response, students['mixed_course'].student_id) self.assertNotContains( response, students['different_course'].student_id) self.assertNotContains(response, students['2-3'].student_id) self.assertNotContains(response, students['3-4'].student_id) def test_pass_and_proceed(self): student = Student.objects.create( first_name="Bugs", last_name="Bunny", student_id="bb23", year=1, next_year='PP' ) this_year = int(Setting.objects.get(name="current_year").value) next_year = str(this_year + 1) request = self.factory.get(reverse('proceed_to_next_year')) request.user = self.user response = proceed_to_next_year(request) student_out = Student.objects.first() self.assertEqual(student_out.year, 2) new_year = Setting.objects.get(name="current_year").value self.assertEqual(new_year, next_year) def test_pass_and_proceed_for_part_time_student(self): student1 = Student.objects.create( first_name="Bugs", last_name="Bunny", student_id="bb23", year=1, is_part_time=True, next_year='PP' ) student2 = Student.objects.create( first_name="Daffy", last_name="Duck", student_id="dd23", year=1, is_part_time=True, second_part_time_year=True, next_year='PP' ) request = self.factory.get(reverse('proceed_to_next_year')) request.user = self.user response = proceed_to_next_year(request) student1_out = Student.objects.get(first_name="Bugs") student2_out = Student.objects.get(first_name="Daffy") self.assertEqual(student1_out.year, 1) self.assertTrue(student1_out.second_part_time_year) self.assertEqual(student2_out.year, 2) self.assertFalse(student2_out.second_part_time_year) def test_pass_and_proceed_with_qld_resit(self): student = Student.objects.create( first_name="Bugs", last_name="Bunny", student_id="bb23", year=1, qld=True, next_year='PQ' ) this_year = int(Setting.objects.get(name="current_year").value) module = Module.objects.create( title="Carrot Eating", code="CE23", year=this_year, foundational=True ) assessment1 = Assessment.objects.create( title="Essay", value=20 ) assessment2 = Assessment.objects.create( title="Exam", value=80 ) result1 = AssessmentResult.objects.create( assessment=assessment1, mark=38, resit_mark=38 ) result2 = AssessmentResult.objects.create( assessment=assessment2, mark=80, ) performance = Performance.objects.create( student=student, module=module, belongs_to_year=1 ) performance.assessment_results.add(result1) performance.assessment_results.add(result2) self.assertEqual(performance.qld_failures_after_resit(), [result1]) request = self.factory.get(reverse('proceed_to_next_year')) request.user = self.user response = proceed_to_next_year(request) student_out = Student.objects.first() self.assertEqual(student_out.year, 2) comment_str = ( 'In Year 2, Bugs will have to resit Carrot Eating ' + '(Essay) for QLD purposes' ) self.assertEqual(student_out.notes, comment_str) def test_pass_and_proceed_with_trailed_resits(self): student = Student.objects.create( first_name="Bugs", last_name="Bunny", student_id="bb23", year=1, qld=True, next_year='PT' ) this_year = int(Setting.objects.get(name="current_year").value) module = Module.objects.create( title="Carrot Eating", code="CE23", year=this_year, foundational=True ) assessment1 = Assessment.objects.create( title="Essay", value=20 ) assessment2 = Assessment.objects.create( title="Exam", value=80 ) result1 = AssessmentResult.objects.create( assessment=assessment1, mark=38, ) result2 = AssessmentResult.objects.create( assessment=assessment2, mark=35, ) performance = Performance.objects.create( student=student, module=module, belongs_to_year=1 ) performance.assessment_results.add(result1) performance.assessment_results.add(result2) performance.calculate_average() request = self.factory.get(reverse('proceed_to_next_year')) request.user = self.user response = proceed_to_next_year(request) student_out = Student.objects.first() self.assertEqual(student_out.year, 2) comment_str = ( 'In Year 2, Bugs will have to resit Carrot Eating ' + '(Essay); Carrot Eating (Exam) (trailed)' ) self.assertEqual(student_out.notes, comment_str) def test_pass_and_proceed_with_compensation(self): student = Student.objects.create( first_name="Bugs", last_name="Bunny", student_id="bb23", year=1, qld=True, next_year='PC' ) this_year = int(Setting.objects.get(name="current_year").value) module = Module.objects.create( title="Carrot Eating", code="CE23", year=this_year, foundational=True ) assessment1 = Assessment.objects.create( title="Essay", value=20 ) assessment2 = Assessment.objects.create( title="Exam", value=80 ) result1 = AssessmentResult.objects.create( assessment=assessment1, mark=38, ) result2 = AssessmentResult.objects.create( assessment=assessment2, mark=35, ) performance = Performance.objects.create( student=student, module=module, belongs_to_year=1 ) performance.assessment_results.add(result1) performance.assessment_results.add(result2) performance.calculate_average() request = self.factory.get(reverse('proceed_to_next_year')) request.user = self.user response = proceed_to_next_year(request) student_out = Student.objects.first() self.assertEqual(student_out.year, 2) comment_str = 'Failure in %s (%s) has been compensated' %( module.title, performance.real_average) self.assertEqual(student_out.notes, comment_str) def test_repeat_year_works(self): student = Student.objects.create( first_name="Bugs", last_name="Bunny", student_id="bb23", year=1, next_year='R' ) this_year = int(Setting.objects.get(name="current_year").value) next_year = str(this_year + 1) request = self.factory.get(reverse('proceed_to_next_year')) request.user = self.user response = proceed_to_next_year(request) student_out = Student.objects.first() self.assertEqual(student_out.year, 1) new_year = Setting.objects.get(name="current_year").value self.assertEqual(new_year, next_year) self.assertEqual(student_out.notes, 'Repeated Year 1') def test_repeat_year_absj_works(self): student = Student.objects.create( first_name="Bugs", last_name="Bunny", student_id="bb23", year=1, next_year='ABSJ' ) this_year = int(Setting.objects.get(name="current_year").value) next_year = str(this_year + 1) request = self.factory.get(reverse('proceed_to_next_year')) request.user = self.user response = proceed_to_next_year(request) student_out = Student.objects.first() self.assertEqual(student_out.year, 1) new_year = Setting.objects.get(name="current_year").value self.assertEqual(new_year, next_year) self.assertEqual(student_out.notes, 'Repeated Year 1 ABSJ') def test_graduate_with_first(self): student = Student.objects.create( first_name="Bugs", last_name="Bunny", student_id="bb23", year=3, next_year='1' ) request = self.factory.get(reverse('proceed_to_next_year')) request.user = self.user response = proceed_to_next_year(request) student_out = Student.objects.first() self.assertEqual(student_out.year, 9) self.assertTrue(student_out.active) self.assertEqual(student_out.achieved_degree, 1) def test_graduate_with_21(self): student = Student.objects.create( first_name="Bugs", last_name="Bunny", student_id="bb23", year=3, next_year='21' ) request = self.factory.get(reverse('proceed_to_next_year')) request.user = self.user response = proceed_to_next_year(request) student_out = Student.objects.first() self.assertEqual(student_out.year, 9) self.assertTrue(student_out.active) self.assertEqual(student_out.achieved_degree, 21) def test_graduate_with_22(self): student = Student.objects.create( first_name="Bugs", last_name="Bunny", student_id="bb23", year=3, next_year='22' ) request = self.factory.get(reverse('proceed_to_next_year')) request.user = self.user response = proceed_to_next_year(request) student_out = Student.objects.first() self.assertEqual(student_out.year, 9) self.assertTrue(student_out.active) self.assertEqual(student_out.achieved_degree, 22) def test_graduate_with_3rd(self): student = Student.objects.create( first_name="Bugs", last_name="Bunny", student_id="bb23", year=3, next_year='3' ) request = self.factory.get(reverse('proceed_to_next_year')) request.user = self.user response = proceed_to_next_year(request) student_out = Student.objects.first() self.assertEqual(student_out.year, 9) self.assertTrue(student_out.active) self.assertEqual(student_out.achieved_degree, 3) def test_graduate_with_cert_he(self): student = Student.objects.create( first_name="Bugs", last_name="Bunny", student_id="bb23", year=3, next_year='C' ) request = self.factory.get(reverse('proceed_to_next_year')) request.user = self.user response = proceed_to_next_year(request) student_out = Student.objects.first() self.assertEqual(student_out.year, 9) self.assertTrue(student_out.active) self.assertEqual(student_out.achieved_degree, 7) def test_graduate_with_dipl_he(self): student = Student.objects.create( first_name="Bugs", last_name="Bunny", student_id="bb23", year=3, next_year='D' ) request = self.factory.get(reverse('proceed_to_next_year')) request.user = self.user response = proceed_to_next_year(request) student_out = Student.objects.first() self.assertEqual(student_out.year, 9) self.assertTrue(student_out.active) self.assertEqual(student_out.achieved_degree, 6) def test_graduate_with_ordinary_degree(self): student = Student.objects.create( first_name="Bugs", last_name="Bunny", student_id="bb23", year=3, next_year='O' ) request = self.factory.get(reverse('proceed_to_next_year')) request.user = self.user response = proceed_to_next_year(request) student_out = Student.objects.first() self.assertEqual(student_out.year, 9) self.assertTrue(student_out.active) self.assertEqual(student_out.achieved_degree, 5) def test_withdraw_student(self): student = Student.objects.create( first_name="Bugs", last_name="Bunny", student_id="bb23", year=3, next_year='WD' ) request = self.factory.get(reverse('proceed_to_next_year')) request.user = self.user response = proceed_to_next_year(request) student_out = Student.objects.first() self.assertEqual(student_out.year, 9) self.assertTrue(student_out.active) self.assertEqual(student_out.achieved_degree, 8) def test_proceed_to_next_year_with_multiple_students(self): students = self.populate_db_with_students() for student in students: students[student].next_year = 'PP' students[student].save() students['3-4'].next_year = '1' students['3-4'].save() request = self.factory.get(reverse('proceed_to_next_year')) request.user = self.user response = proceed_to_next_year(request) student_1_2 = Student.objects.get( student_id=students['1-2'].student_id) self.assertEqual(student_1_2.year, 2) student_1_spty = Student.objects.get( student_id=students['1-spty'].student_id) self.assertEqual(student_1_spty.year, 1) self.assertTrue(student_1_spty.second_part_time_year) student_spty_2 = Student.objects.get( student_id=students['spty-2'].student_id) self.assertEqual(student_spty_2.year, 2) self.assertFalse(student_spty_2.second_part_time_year) student_2_3 = Student.objects.get( student_id=students['2-3'].student_id) self.assertEqual(student_2_3.year, 3) student_3_4 = Student.objects.get( student_id=students['3-4'].student_id) self.assertEqual(student_3_4.year, 9) self.assertEqual(student_3_4.achieved_degree, 1)
tobi2006/nomosdb
main/tests/test_views.py
Python
gpl-3.0
145,259
from logging import getLogger from abstract_step import AbstractStep import os logger = getLogger('uap_logger') class GffreadExtractTranscripts(AbstractStep): ''' extract transcripts from gtf http://ccb.jhu.edu/software/stringtie/gff.shtml write a fasta file with spliced exons for each GFF transcript gffread -w transcripts.fa -g /path/to/genome.fa transcripts.gtf ''' def __init__(self, pipeline): super(GffreadExtractTranscripts, self).__init__(pipeline) self.set_cores(1) self.add_connection('in/fasta') self.add_connection('in/anno') self.add_connection('out/fasta') self.add_connection('out/log_stderr') self.add_connection('out/log_stdout') self.require_tool('gffread') self.add_option('gtf', str, optional=True, default=None, description="path to gtf file") self.add_option( 'output-fasta-name', str, optional=False, default=None, description="name of the outputfile trancriptom myfasta.fa") def runs(self, run_ids_connections_files): run_id = self.get_option('output-fasta-name') # dependenency files (r for required) rfiles = [] with self.declare_run(run_id) as run: cmd = [self.get_tool('gffread'), '-w'] cmd.append(self.get_option('output-fasta-name')) for __, connection in run_ids_connections_files.items(): if 'in/fasta' in connection: cmd.append('-g') cmd.append(connection['in/fasta'][0]) rfiles.append(connection['in/fasta'][0]) continue if self.is_option_set_in_config('gtf'): cmd.append(os.path.abspath(self.get_option('gtf'))) else: for __, connection in run_ids_connections_files.items(): if 'in/anno' in connection: cmd.append(connection['in/anno'][0]) rfiles.append(connection['in/anno'][0]) continue stderr_file = "%s-gffread_extract_transcripts-log_stderr.txt" % ( run_id) log_stderr = run.add_output_file("log_stderr", stderr_file, rfiles) stdout_file = "%s-gffread_extract_transcripts-log_stdout.txt" % ( run_id) log_stdout = run.add_output_file("log_stdout", stdout_file, rfiles) run.add_output_file("fasta", self.get_option('output-fasta-name'), rfiles) exec_group = run.new_exec_group() exec_group.add_command(cmd, stdout_path=log_stdout, stderr_path=log_stderr)
kmpf/uap
include/steps/gffread_extract_transcripts.py
Python
gpl-3.0
2,927
import re import sys import socket from src.config import * class IRC: def __init__(self, queue): self.queue = queue def end_connection(self): """Send a goodbye message through irc and close the socket.""" try: self.send_custom_message('goodbye') channel = get_channel() if channel: self.sock.send('PART {0}\r\n'.format(channel)) self.sock.close() except Exception: self.queue.put(("{0}".format(sys.exc_info()[0]), 'BG_error')) self.queue.put(("irc.end_connection() - Could not close socket!", 'BG_error')) def check_for_message(self, data, queue): """Parse data from twitch into a nice dictionary.""" try: data = data.decode('utf-8').split(" :") except UnicodeDecodeError: # TODO - This generally means the message is spam, so /timeout the user queue.put(("check_for_message() - Can't decode chat message!", 'BG_error')) return False if len(data) > 3: return False if len(data) > 1 and len(data[1].split(' ')) > 1: msg_type = data[1].split(' ')[1] msg_data = { 'type': msg_type } params = data[0][1:].split(';') for param in params: p = param.split('=') # NOTE - This gave an index error on something, possibly when the stream ended in a raid msg_data[p[0]] = p[1] if len(data) > 2: msg_data['message'] = data[2][:-2] return msg_data else: print(data) return False def check_for_ping(self, data): """If given data starts with PING, send PONG + rest of the data back.""" if data.startswith('PING'): try: self.sock.send('PONG {0}\r\n'.format(data[5:])) except Exception: self.queue.put(("{0}".format(sys.exc_info()[0]), 'BG_error')) self.queue.put(("irc.check_for_ping() - Could not respond with PONG!", 'BG_error')) def check_login_status(self, data): """Return false if given data says login was unsuccessful. True otherwise.""" if re.match(r'^:(tmi\.twitch\.tv) NOTICE \* :Login unsuccessful\r\n$', data): return False return True def send_message(self, message): """Try to send given message as PRIVMSG through the irc socket.""" if message == None: return try: channel = get_channel() if channel: self.sock.send('PRIVMSG {0} : {1}\n'.format(channel, message.encode('utf-8'))) except Exception: self.queue.put(("{0}".format(sys.exc_info()[0]), 'BG_error')) self.queue.put(("irc.send_message() - Could not send message!", 'BG_error')) def send_custom_message(self, message, data=None): """Try to send given message (if defined in the config).""" config = get_config() try: if config['messages'][message]['enabled']: msg = self.format_custom_message(message, config['messages'][message]['msg'], data) self.queue.put(("[{0}]: {1}".format(get_botname(), msg), 'BG_chat')) self.send_message(msg) except KeyError: self.queue.put(("irc.send_custom_message() - Could not send message '{0}'!".format(message), 'BG_error')) def format_custom_message(self, message, text, data): """Add relevant data to custom bot messages.""" if message == "cheer" and len(data) == 2: text = text.format(user=data[0], bits=data[1]) elif message == "sub" and len(data) == 5: text = text.format(user=data[0], streak=data[1], tier=data[2], plan=data[3], count=data[4]) elif message == "raid" and len(data) == 2: text = text.format(user=data[0], raiders=data[1]) return text def get_socket_object(self): """Connect and join irc channels as setup in the config. Return None or the socket.""" config = get_config() sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(10) self.sock = sock if not self.connect_socket(config): return None sock.settimeout(None) channel = self.login_socket(config) if not channel: return None self.sock.send('CAP REQ :twitch.tv/tags\r\n'.encode('utf-8')) self.sock.send('CAP REQ :twitch.tv/commands\r\n'.encode('utf-8')) self.sock.send('JOIN {0}\r\n'.format(channel)) self.send_custom_message('greeting') return sock def connect_socket(self, config): """Connect our socket as defined in the config. Return true on success.""" try: self.sock.connect((config['irc']['server'], config['irc']['port'])) return True except KeyError: self.queue.put(("irc.connect_socket() - IRC config is corrupted!", 'BG_error')) except Exception: self.queue.put(("{0}".format(sys.exc_info()[0]), 'BG_error')) self.queue.put(("irc.connect_socket() - Cannot connect to server!", 'BG_error')) return False def login_socket(self, config): """Login to the IRC channel as defined in the config. Return the channel name joined.""" try: self.sock.send('USER {0}\r\n'.format(config['irc']['username'])) self.sock.send('PASS {0}\r\n'.format(config['irc']['oauth_password'])) self.sock.send('NICK {0}\r\n'.format(config['irc']['username'])) channel = config['irc']['channel'] except KeyError: self.queue.put(("irc.login_socket() - IRC config is corrupted!", 'BG_error')) return False if self.check_login_status(self.sock.recv(1024)): self.queue.put(("Login successful, joining channel {0}".format(channel), 'BG_success')) else: self.queue.put(("Login failed (possibly invalid oauth token)", 'BG_error')) return False return channel
Shakajiub/RoboJiub
src/irc.py
Python
gpl-3.0
6,161
# # Copyright 2010 Free Software Foundation, Inc. # # This file was generated by gr_modtool, a tool from the GNU Radio framework # This file is a part of gr-paint # # SPDX-License-Identifier: GPL-3.0-or-later # # """ Utilities for extracting text from generated classes. """ def is_string(txt): if isinstance(txt, str): return True try: if isinstance(txt, str): return True except NameError: pass return False def description(obj): if obj is None: return None return description_bit(obj).strip() def description_bit(obj): if hasattr(obj, 'content'): contents = [description_bit(item) for item in obj.content] result = ''.join(contents) elif hasattr(obj, 'content_'): contents = [description_bit(item) for item in obj.content_] result = ''.join(contents) elif hasattr(obj, 'value'): result = description_bit(obj.value) elif is_string(obj): return obj else: raise Exception('Expecting a string or something with content, content_ or value attribute') # If this bit is a paragraph then add one some line breaks. if hasattr(obj, 'name') and obj.name == 'para': result += "\n\n" return result
drmpeg/gr-paint
docs/doxygen/doxyxml/text.py
Python
gpl-3.0
1,255
import numpy as np def analytical_gradients(network, X, Y): print("Calculating analytical gradients...") print("Forward pass:", end=" ") preds = network.predict(X) print("done! Backward pass:", end=" ") delta = network.cost.derivative(preds, Y) network.backpropagate(delta) print("done!") return network.get_gradients(unfold=True) def numerical_gradients(network, X, Y, epsilon): ws = network.layers.get_weights(unfold=True) numgrads = np.zeros_like(ws) perturb = np.zeros_like(ws) nparams = ws.size lstr = len(str(nparams)) print("Calculating numerical gradients...") for i in range(nparams): print("\r{0:>{1}} / {2}".format(i + 1, lstr, nparams), end=" ") perturb[i] += epsilon network.layers.set_weights(ws + perturb, fold=True) pred1 = network.predict(X) cost1 = network.cost(pred1, Y) network.layers.set_weights(ws - perturb, fold=True) pred2 = network.predict(X) cost2 = network.cost(pred2, Y) numgrads[i] = (cost1 - cost2) perturb[i] = 0. numgrads /= 2. * epsilon network.layers.set_weights(ws, fold=True) print("Done!") return numgrads
csxeba/brainforge
brainforge/gradientcheck/raw_gradients.py
Python
gpl-3.0
1,211
#!/usr/bin/env python # # -*- coding: utf-8 -*- # # gui_helper.py - # Copyright 2013 Tomas Hozza <thozza@gmail.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. # # Authors: # Tomas Hozza <thozza@gmail.com> import os from datetime import date from gi.repository import Gtk, Gdk class GuiHelper(object): (DIALOG_TYPE_ERROR, DIALOG_TYPE_WARNING, DIALOG_TYPE_INFO) = range(3) @staticmethod def enable_item(widget, enable=True): widget.set_sensitive(enable) @staticmethod def calendar_get_date(calendar): year, month, day = calendar.get_date() return date(year, month + 1, day) @staticmethod def create_msg_dialog(type=DIALOG_TYPE_INFO, message="message", buttons=Gtk.ButtonsType.CLOSE, parent=None, sec_message=None): if type == GuiHelper.DIALOG_TYPE_INFO: dlg = Gtk.MessageDialog(parent, Gtk.DialogFlags.DESTROY_WITH_PARENT, Gtk.MessageType.INFO, buttons, message) elif type == GuiHelper.DIALOG_TYPE_WARNING: dlg = Gtk.MessageDialog(parent, Gtk.DialogFlags.DESTROY_WITH_PARENT, Gtk.MessageType.WARNING, buttons, message) else: dlg = Gtk.MessageDialog(parent, Gtk.DialogFlags.DESTROY_WITH_PARENT, Gtk.MessageType.ERROR, buttons, message) if sec_message is not None: dlg.format_secondary_text(sec_message) return dlg @staticmethod def show_msg_dialog(type=DIALOG_TYPE_INFO, message="message", buttons=Gtk.ButtonsType.CLOSE, parent=None, sec_message=None): dlg = GuiHelper.create_msg_dialog(type, message, buttons, parent, sec_message) response = dlg.run() dlg.hide() return response @staticmethod def show_info_dialog(message="info", buttons=Gtk.ButtonsType.CLOSE, parent=None, sec_message=None): return GuiHelper.show_msg_dialog(GuiHelper.DIALOG_TYPE_INFO, message, buttons, parent, sec_message) @staticmethod def show_warning_dialog(message="warning", buttons=Gtk.ButtonsType.CLOSE, parent=None, sec_message=None): return GuiHelper.show_msg_dialog(GuiHelper.DIALOG_TYPE_WARNING, message, buttons, parent, sec_message) @staticmethod def show_error_dialog(message="Error", buttons=Gtk.ButtonsType.CLOSE, parent=None, sec_message=None): return GuiHelper.show_msg_dialog(GuiHelper.DIALOG_TYPE_ERROR, message, buttons, parent, sec_message) @staticmethod def get_RGBA_color(color_string=None): if color_string: color = Gdk.RGBA() if Gdk.RGBA.parse(color, color_string): return color return None @staticmethod def widget_override_color(widget, color_string=None): if color_string: widget.override_color(Gtk.StateFlags.NORMAL, GuiHelper.get_RGBA_color(color_string)) else: widget.override_color(Gtk.StateFlags.NORMAL, None) @staticmethod def replace_widget(current, new): """ Replace one widget with another. 'current' has to be inside a container (e.g. gtk.VBox). """ container = current.get_parent() assert container # is "current" inside a container widget? # stolen from gazpacho code (widgets/base/base.py): props = {} for pspec in Gtk.ContainerClass.list_child_properties(container): props[pspec.name] = container.child_get_property(current, pspec.name) Gtk.Container.remove(container, current) container.add(new) for name, value in props.items(): container.child_set_property(new, name, value) @staticmethod def replace_widget2(cur, replace): """replace cur widget with another in a container keeping child properties""" con = cur.get_parent() pos = con.child_get_property(cur, "position", "") pak = con.query_child_packing(cur) con.remove(cur) if replace.get_parent(): replace.get_parent().remove(replace) #con.add_with_properties(replace, "position", pos) con.add(replace) con.child_set_property(replace, "position", pos) con.set_child_packing(replace, *pak)
thozza/energy-track
energytrack/gui/gui_helper.py
Python
gpl-3.0
6,070
from django.contrib.sitemaps import Sitemap from .models import BlogEntry class BlogEntrySitemap(Sitemap): changefreq = "yearly" priority = 0.6 protocol = 'https' def items(self): return BlogEntry.on_site.filter(is_visible=True) def lastmod(self, item): return item.modification
nim65s/MarkDownBlog
dmdb/sitemaps.py
Python
gpl-3.0
320
from test.support import verbose, run_unittest, gc_collect, bigmemtest, _2G, \ cpython_only import io import re from re import Scanner import sys import string import traceback from weakref import proxy # Misc tests from Tim Peters' re.doc # WARNING: Don't change details in these tests if you don't know # what you're doing. Some of these tests were carefully modeled to # cover most of the code. import unittest class ReTests(unittest.TestCase): def test_keep_buffer(self): # See bug 14212 b = bytearray(b'x') it = re.finditer(b'a', b) with self.assertRaises(BufferError): b.extend(b'x'*400) list(it) del it gc_collect() b.extend(b'x'*400) def test_weakref(self): s = 'QabbbcR' x = re.compile('ab+c') y = proxy(x) self.assertEqual(x.findall('QabbbcR'), y.findall('QabbbcR')) def test_search_star_plus(self): self.assertEqual(re.search('x*', 'axx').span(0), (0, 0)) self.assertEqual(re.search('x*', 'axx').span(), (0, 0)) self.assertEqual(re.search('x+', 'axx').span(0), (1, 3)) self.assertEqual(re.search('x+', 'axx').span(), (1, 3)) self.assertEqual(re.search('x', 'aaa'), None) self.assertEqual(re.match('a*', 'xxx').span(0), (0, 0)) self.assertEqual(re.match('a*', 'xxx').span(), (0, 0)) self.assertEqual(re.match('x*', 'xxxa').span(0), (0, 3)) self.assertEqual(re.match('x*', 'xxxa').span(), (0, 3)) self.assertEqual(re.match('a+', 'xxx'), None) def bump_num(self, matchobj): int_value = int(matchobj.group(0)) return str(int_value + 1) def test_basic_re_sub(self): self.assertEqual(re.sub("(?i)b+", "x", "bbbb BBBB"), 'x x') self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y'), '9.3 -3 24x100y') self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y', 3), '9.3 -3 23x99y') self.assertEqual(re.sub('.', lambda m: r"\n", 'x'), '\\n') self.assertEqual(re.sub('.', r"\n", 'x'), '\n') s = r"\1\1" self.assertEqual(re.sub('(.)', s, 'x'), 'xx') self.assertEqual(re.sub('(.)', re.escape(s), 'x'), s) self.assertEqual(re.sub('(.)', lambda m: s, 'x'), s) self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<a>', 'xx'), 'xxxx') self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<1>', 'xx'), 'xxxx') self.assertEqual(re.sub('(?P<unk>x)', '\g<unk>\g<unk>', 'xx'), 'xxxx') self.assertEqual(re.sub('(?P<unk>x)', '\g<1>\g<1>', 'xx'), 'xxxx') self.assertEqual(re.sub('a',r'\t\n\v\r\f\a\b\B\Z\a\A\w\W\s\S\d\D','a'), '\t\n\v\r\f\a\b\\B\\Z\a\\A\\w\\W\\s\\S\\d\\D') self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'), '\t\n\v\r\f\a') self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'), (chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7))) self.assertEqual(re.sub('^\s*', 'X', 'test'), 'Xtest') def test_bug_449964(self): # fails for group followed by other escape self.assertEqual(re.sub(r'(?P<unk>x)', '\g<1>\g<1>\\b', 'xx'), 'xx\bxx\b') def test_bug_449000(self): # Test for sub() on escaped characters self.assertEqual(re.sub(r'\r\n', r'\n', 'abc\r\ndef\r\n'), 'abc\ndef\n') self.assertEqual(re.sub('\r\n', r'\n', 'abc\r\ndef\r\n'), 'abc\ndef\n') self.assertEqual(re.sub(r'\r\n', '\n', 'abc\r\ndef\r\n'), 'abc\ndef\n') self.assertEqual(re.sub('\r\n', '\n', 'abc\r\ndef\r\n'), 'abc\ndef\n') def test_bug_1661(self): # Verify that flags do not get silently ignored with compiled patterns pattern = re.compile('.') self.assertRaises(ValueError, re.match, pattern, 'A', re.I) self.assertRaises(ValueError, re.search, pattern, 'A', re.I) self.assertRaises(ValueError, re.findall, pattern, 'A', re.I) self.assertRaises(ValueError, re.compile, pattern, re.I) def test_bug_3629(self): # A regex that triggered a bug in the sre-code validator re.compile("(?P<quote>)(?(quote))") def test_sub_template_numeric_escape(self): # bug 776311 and friends self.assertEqual(re.sub('x', r'\0', 'x'), '\0') self.assertEqual(re.sub('x', r'\000', 'x'), '\000') self.assertEqual(re.sub('x', r'\001', 'x'), '\001') self.assertEqual(re.sub('x', r'\008', 'x'), '\0' + '8') self.assertEqual(re.sub('x', r'\009', 'x'), '\0' + '9') self.assertEqual(re.sub('x', r'\111', 'x'), '\111') self.assertEqual(re.sub('x', r'\117', 'x'), '\117') self.assertEqual(re.sub('x', r'\1111', 'x'), '\1111') self.assertEqual(re.sub('x', r'\1111', 'x'), '\111' + '1') self.assertEqual(re.sub('x', r'\00', 'x'), '\x00') self.assertEqual(re.sub('x', r'\07', 'x'), '\x07') self.assertEqual(re.sub('x', r'\08', 'x'), '\0' + '8') self.assertEqual(re.sub('x', r'\09', 'x'), '\0' + '9') self.assertEqual(re.sub('x', r'\0a', 'x'), '\0' + 'a') self.assertEqual(re.sub('x', r'\400', 'x'), '\0') self.assertEqual(re.sub('x', r'\777', 'x'), '\377') self.assertRaises(re.error, re.sub, 'x', r'\1', 'x') self.assertRaises(re.error, re.sub, 'x', r'\8', 'x') self.assertRaises(re.error, re.sub, 'x', r'\9', 'x') self.assertRaises(re.error, re.sub, 'x', r'\11', 'x') self.assertRaises(re.error, re.sub, 'x', r'\18', 'x') self.assertRaises(re.error, re.sub, 'x', r'\1a', 'x') self.assertRaises(re.error, re.sub, 'x', r'\90', 'x') self.assertRaises(re.error, re.sub, 'x', r'\99', 'x') self.assertRaises(re.error, re.sub, 'x', r'\118', 'x') # r'\11' + '8' self.assertRaises(re.error, re.sub, 'x', r'\11a', 'x') self.assertRaises(re.error, re.sub, 'x', r'\181', 'x') # r'\18' + '1' self.assertRaises(re.error, re.sub, 'x', r'\800', 'x') # r'\80' + '0' # in python2.3 (etc), these loop endlessly in sre_parser.py self.assertEqual(re.sub('(((((((((((x)))))))))))', r'\11', 'x'), 'x') self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\118', 'xyz'), 'xz8') self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\11a', 'xyz'), 'xza') def test_qualified_re_sub(self): self.assertEqual(re.sub('a', 'b', 'aaaaa'), 'bbbbb') self.assertEqual(re.sub('a', 'b', 'aaaaa', 1), 'baaaa') def test_bug_114660(self): self.assertEqual(re.sub(r'(\S)\s+(\S)', r'\1 \2', 'hello there'), 'hello there') def test_bug_462270(self): # Test for empty sub() behaviour, see SF bug #462270 self.assertEqual(re.sub('x*', '-', 'abxd'), '-a-b-d-') self.assertEqual(re.sub('x+', '-', 'abxd'), 'ab-d') def test_symbolic_groups(self): re.compile('(?P<a>x)(?P=a)(?(a)y)') re.compile('(?P<a1>x)(?P=a1)(?(a1)y)') self.assertRaises(re.error, re.compile, '(?P<a>)(?P<a>)') self.assertRaises(re.error, re.compile, '(?Px)') self.assertRaises(re.error, re.compile, '(?P=)') self.assertRaises(re.error, re.compile, '(?P=1)') self.assertRaises(re.error, re.compile, '(?P=a)') self.assertRaises(re.error, re.compile, '(?P=a1)') self.assertRaises(re.error, re.compile, '(?P=a.)') self.assertRaises(re.error, re.compile, '(?P<)') self.assertRaises(re.error, re.compile, '(?P<>)') self.assertRaises(re.error, re.compile, '(?P<1>)') self.assertRaises(re.error, re.compile, '(?P<a.>)') self.assertRaises(re.error, re.compile, '(?())') self.assertRaises(re.error, re.compile, '(?(a))') self.assertRaises(re.error, re.compile, '(?(1a))') self.assertRaises(re.error, re.compile, '(?(a.))') def test_symbolic_refs(self): self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a', 'xx') self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<', 'xx') self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g', 'xx') self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a a>', 'xx') self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<>', 'xx') self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<1a1>', 'xx') self.assertRaises(IndexError, re.sub, '(?P<a>x)', '\g<ab>', 'xx') self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\g<b>', 'xx') self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\\2', 'xx') self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<-1>', 'xx') def test_re_subn(self): self.assertEqual(re.subn("(?i)b+", "x", "bbbb BBBB"), ('x x', 2)) self.assertEqual(re.subn("b+", "x", "bbbb BBBB"), ('x BBBB', 1)) self.assertEqual(re.subn("b+", "x", "xyz"), ('xyz', 0)) self.assertEqual(re.subn("b*", "x", "xyz"), ('xxxyxzx', 4)) self.assertEqual(re.subn("b*", "x", "xyz", 2), ('xxxyz', 2)) def test_re_split(self): self.assertEqual(re.split(":", ":a:b::c"), ['', 'a', 'b', '', 'c']) self.assertEqual(re.split(":*", ":a:b::c"), ['', 'a', 'b', 'c']) self.assertEqual(re.split("(:*)", ":a:b::c"), ['', ':', 'a', ':', 'b', '::', 'c']) self.assertEqual(re.split("(?::*)", ":a:b::c"), ['', 'a', 'b', 'c']) self.assertEqual(re.split("(:)*", ":a:b::c"), ['', ':', 'a', ':', 'b', ':', 'c']) self.assertEqual(re.split("([b:]+)", ":a:b::c"), ['', ':', 'a', ':b::', 'c']) self.assertEqual(re.split("(b)|(:+)", ":a:b::c"), ['', None, ':', 'a', None, ':', '', 'b', None, '', None, '::', 'c']) self.assertEqual(re.split("(?:b)|(?::+)", ":a:b::c"), ['', 'a', '', '', 'c']) def test_qualified_re_split(self): self.assertEqual(re.split(":", ":a:b::c", 2), ['', 'a', 'b::c']) self.assertEqual(re.split(':', 'a:b:c:d', 2), ['a', 'b', 'c:d']) self.assertEqual(re.split("(:)", ":a:b::c", 2), ['', ':', 'a', ':', 'b::c']) self.assertEqual(re.split("(:*)", ":a:b::c", 2), ['', ':', 'a', ':', 'b::c']) def test_re_findall(self): self.assertEqual(re.findall(":+", "abc"), []) self.assertEqual(re.findall(":+", "a:b::c:::d"), [":", "::", ":::"]) self.assertEqual(re.findall("(:+)", "a:b::c:::d"), [":", "::", ":::"]) self.assertEqual(re.findall("(:)(:*)", "a:b::c:::d"), [(":", ""), (":", ":"), (":", "::")]) def test_bug_117612(self): self.assertEqual(re.findall(r"(a|(b))", "aba"), [("a", ""),("b", "b"),("a", "")]) def test_re_match(self): self.assertEqual(re.match('a', 'a').groups(), ()) self.assertEqual(re.match('(a)', 'a').groups(), ('a',)) self.assertEqual(re.match(r'(a)', 'a').group(0), 'a') self.assertEqual(re.match(r'(a)', 'a').group(1), 'a') self.assertEqual(re.match(r'(a)', 'a').group(1, 1), ('a', 'a')) pat = re.compile('((a)|(b))(c)?') self.assertEqual(pat.match('a').groups(), ('a', 'a', None, None)) self.assertEqual(pat.match('b').groups(), ('b', None, 'b', None)) self.assertEqual(pat.match('ac').groups(), ('a', 'a', None, 'c')) self.assertEqual(pat.match('bc').groups(), ('b', None, 'b', 'c')) self.assertEqual(pat.match('bc').groups(""), ('b', "", 'b', 'c')) # A single group m = re.match('(a)', 'a') self.assertEqual(m.group(0), 'a') self.assertEqual(m.group(0), 'a') self.assertEqual(m.group(1), 'a') self.assertEqual(m.group(1, 1), ('a', 'a')) pat = re.compile('(?:(?P<a1>a)|(?P<b2>b))(?P<c3>c)?') self.assertEqual(pat.match('a').group(1, 2, 3), ('a', None, None)) self.assertEqual(pat.match('b').group('a1', 'b2', 'c3'), (None, 'b', None)) self.assertEqual(pat.match('ac').group(1, 'b2', 3), ('a', None, 'c')) def test_re_groupref_exists(self): self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a)').groups(), ('(', 'a')) self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a').groups(), (None, 'a')) self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a)'), None) self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a'), None) self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'ab').groups(), ('a', 'b')) self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'cd').groups(), (None, 'd')) self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'cd').groups(), (None, 'd')) self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'a').groups(), ('a', '')) # Tests for bug #1177831: exercise groups other than the first group p = re.compile('(?P<g1>a)(?P<g2>b)?((?(g2)c|d))') self.assertEqual(p.match('abc').groups(), ('a', 'b', 'c')) self.assertEqual(p.match('ad').groups(), ('a', None, 'd')) self.assertEqual(p.match('abd'), None) self.assertEqual(p.match('ac'), None) def test_re_groupref(self): self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a|').groups(), ('|', 'a')) self.assertEqual(re.match(r'^(\|)?([^()]+)\1?$', 'a').groups(), (None, 'a')) self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', 'a|'), None) self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a'), None) self.assertEqual(re.match(r'^(?:(a)|c)(\1)$', 'aa').groups(), ('a', 'a')) self.assertEqual(re.match(r'^(?:(a)|c)(\1)?$', 'c').groups(), (None, None)) def test_groupdict(self): self.assertEqual(re.match('(?P<first>first) (?P<second>second)', 'first second').groupdict(), {'first':'first', 'second':'second'}) def test_expand(self): self.assertEqual(re.match("(?P<first>first) (?P<second>second)", "first second") .expand(r"\2 \1 \g<second> \g<first>"), "second first second first") def test_repeat_minmax(self): self.assertEqual(re.match("^(\w){1}$", "abc"), None) self.assertEqual(re.match("^(\w){1}?$", "abc"), None) self.assertEqual(re.match("^(\w){1,2}$", "abc"), None) self.assertEqual(re.match("^(\w){1,2}?$", "abc"), None) self.assertEqual(re.match("^(\w){3}$", "abc").group(1), "c") self.assertEqual(re.match("^(\w){1,3}$", "abc").group(1), "c") self.assertEqual(re.match("^(\w){1,4}$", "abc").group(1), "c") self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c") self.assertEqual(re.match("^(\w){3}?$", "abc").group(1), "c") self.assertEqual(re.match("^(\w){1,3}?$", "abc").group(1), "c") self.assertEqual(re.match("^(\w){1,4}?$", "abc").group(1), "c") self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c") self.assertEqual(re.match("^x{1}$", "xxx"), None) self.assertEqual(re.match("^x{1}?$", "xxx"), None) self.assertEqual(re.match("^x{1,2}$", "xxx"), None) self.assertEqual(re.match("^x{1,2}?$", "xxx"), None) self.assertNotEqual(re.match("^x{3}$", "xxx"), None) self.assertNotEqual(re.match("^x{1,3}$", "xxx"), None) self.assertNotEqual(re.match("^x{1,4}$", "xxx"), None) self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None) self.assertNotEqual(re.match("^x{3}?$", "xxx"), None) self.assertNotEqual(re.match("^x{1,3}?$", "xxx"), None) self.assertNotEqual(re.match("^x{1,4}?$", "xxx"), None) self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None) self.assertEqual(re.match("^x{}$", "xxx"), None) self.assertNotEqual(re.match("^x{}$", "x{}"), None) def test_getattr(self): self.assertEqual(re.compile("(?i)(a)(b)").pattern, "(?i)(a)(b)") self.assertEqual(re.compile("(?i)(a)(b)").flags, re.I | re.U) self.assertEqual(re.compile("(?i)(a)(b)").groups, 2) self.assertEqual(re.compile("(?i)(a)(b)").groupindex, {}) self.assertEqual(re.compile("(?i)(?P<first>a)(?P<other>b)").groupindex, {'first': 1, 'other': 2}) self.assertEqual(re.match("(a)", "a").pos, 0) self.assertEqual(re.match("(a)", "a").endpos, 1) self.assertEqual(re.match("(a)", "a").string, "a") self.assertEqual(re.match("(a)", "a").regs, ((0, 1), (0, 1))) self.assertNotEqual(re.match("(a)", "a").re, None) def test_special_escapes(self): self.assertEqual(re.search(r"\b(b.)\b", "abcd abc bcd bx").group(1), "bx") self.assertEqual(re.search(r"\B(b.)\B", "abc bcd bc abxd").group(1), "bx") self.assertEqual(re.search(r"\b(b.)\b", "abcd abc bcd bx", re.LOCALE).group(1), "bx") self.assertEqual(re.search(r"\B(b.)\B", "abc bcd bc abxd", re.LOCALE).group(1), "bx") self.assertEqual(re.search(r"\b(b.)\b", "abcd abc bcd bx", re.UNICODE).group(1), "bx") self.assertEqual(re.search(r"\B(b.)\B", "abc bcd bc abxd", re.UNICODE).group(1), "bx") self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc") self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc") self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None) self.assertEqual(re.search(r"\b(b.)\b", "abcd abc bcd bx").group(1), "bx") self.assertEqual(re.search(r"\B(b.)\B", "abc bcd bc abxd").group(1), "bx") self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc") self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc") self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None) self.assertEqual(re.search(r"\d\D\w\W\s\S", "1aa! a").group(0), "1aa! a") self.assertEqual(re.search(r"\d\D\w\W\s\S", "1aa! a", re.LOCALE).group(0), "1aa! a") self.assertEqual(re.search(r"\d\D\w\W\s\S", "1aa! a", re.UNICODE).group(0), "1aa! a") def test_string_boundaries(self): # See http://bugs.python.org/issue10713 self.assertEqual(re.search(r"\b(abc)\b", "abc").group(1), "abc") # There's a word boundary at the start of a string. self.assertTrue(re.match(r"\b", "abc")) # A non-empty string includes a non-boundary zero-length match. self.assertTrue(re.search(r"\B", "abc")) # There is no non-boundary match at the start of a string. self.assertFalse(re.match(r"\B", "abc")) # However, an empty string contains no word boundaries, and also no # non-boundaries. self.assertEqual(re.search(r"\B", ""), None) # This one is questionable and different from the perlre behaviour, # but describes current behavior. self.assertEqual(re.search(r"\b", ""), None) # A single word-character string has two boundaries, but no # non-boundary gaps. self.assertEqual(len(re.findall(r"\b", "a")), 2) self.assertEqual(len(re.findall(r"\B", "a")), 0) # If there are no words, there are no boundaries self.assertEqual(len(re.findall(r"\b", " ")), 0) self.assertEqual(len(re.findall(r"\b", " ")), 0) # Can match around the whitespace. self.assertEqual(len(re.findall(r"\B", " ")), 2) def test_bigcharset(self): self.assertEqual(re.match("([\u2222\u2223])", "\u2222").group(1), "\u2222") self.assertEqual(re.match("([\u2222\u2223])", "\u2222", re.UNICODE).group(1), "\u2222") def test_big_codesize(self): # Issue #1160 r = re.compile('|'.join(('%d'%x for x in range(10000)))) self.assertIsNotNone(r.match('1000')) self.assertIsNotNone(r.match('9999')) def test_anyall(self): self.assertEqual(re.match("a.b", "a\nb", re.DOTALL).group(0), "a\nb") self.assertEqual(re.match("a.*b", "a\n\nb", re.DOTALL).group(0), "a\n\nb") def test_non_consuming(self): self.assertEqual(re.match("(a(?=\s[^a]))", "a b").group(1), "a") self.assertEqual(re.match("(a(?=\s[^a]*))", "a b").group(1), "a") self.assertEqual(re.match("(a(?=\s[abc]))", "a b").group(1), "a") self.assertEqual(re.match("(a(?=\s[abc]*))", "a bc").group(1), "a") self.assertEqual(re.match(r"(a)(?=\s\1)", "a a").group(1), "a") self.assertEqual(re.match(r"(a)(?=\s\1*)", "a aa").group(1), "a") self.assertEqual(re.match(r"(a)(?=\s(abc|a))", "a a").group(1), "a") self.assertEqual(re.match(r"(a(?!\s[^a]))", "a a").group(1), "a") self.assertEqual(re.match(r"(a(?!\s[abc]))", "a d").group(1), "a") self.assertEqual(re.match(r"(a)(?!\s\1)", "a b").group(1), "a") self.assertEqual(re.match(r"(a)(?!\s(abc|a))", "a b").group(1), "a") def test_ignore_case(self): self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC") self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC") self.assertEqual(re.match(r"(a\s[^a])", "a b", re.I).group(1), "a b") self.assertEqual(re.match(r"(a\s[^a]*)", "a bb", re.I).group(1), "a bb") self.assertEqual(re.match(r"(a\s[abc])", "a b", re.I).group(1), "a b") self.assertEqual(re.match(r"(a\s[abc]*)", "a bb", re.I).group(1), "a bb") self.assertEqual(re.match(r"((a)\s\2)", "a a", re.I).group(1), "a a") self.assertEqual(re.match(r"((a)\s\2*)", "a aa", re.I).group(1), "a aa") self.assertEqual(re.match(r"((a)\s(abc|a))", "a a", re.I).group(1), "a a") self.assertEqual(re.match(r"((a)\s(abc|a)*)", "a aa", re.I).group(1), "a aa") def test_category(self): self.assertEqual(re.match(r"(\s)", " ").group(1), " ") def test_getlower(self): import _sre self.assertEqual(_sre.getlower(ord('A'), 0), ord('a')) self.assertEqual(_sre.getlower(ord('A'), re.LOCALE), ord('a')) self.assertEqual(_sre.getlower(ord('A'), re.UNICODE), ord('a')) self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC") self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC") def test_not_literal(self): self.assertEqual(re.search("\s([^a])", " b").group(1), "b") self.assertEqual(re.search("\s([^a]*)", " bb").group(1), "bb") def test_search_coverage(self): self.assertEqual(re.search("\s(b)", " b").group(1), "b") self.assertEqual(re.search("a\s", "a ").group(0), "a ") def assertMatch(self, pattern, text, match=None, span=None, matcher=re.match): if match is None and span is None: # the pattern matches the whole text match = text span = (0, len(text)) elif match is None or span is None: raise ValueError('If match is not None, span should be specified ' '(and vice versa).') m = matcher(pattern, text) self.assertTrue(m) self.assertEqual(m.group(), match) self.assertEqual(m.span(), span) def test_re_escape(self): alnum_chars = string.ascii_letters + string.digits + '_' p = ''.join(chr(i) for i in range(256)) for c in p: if c in alnum_chars: self.assertEqual(re.escape(c), c) elif c == '\x00': self.assertEqual(re.escape(c), '\\000') else: self.assertEqual(re.escape(c), '\\' + c) self.assertMatch(re.escape(c), c) self.assertMatch(re.escape(p), p) def test_re_escape_byte(self): alnum_chars = (string.ascii_letters + string.digits + '_').encode('ascii') p = bytes(range(256)) for i in p: b = bytes([i]) if b in alnum_chars: self.assertEqual(re.escape(b), b) elif i == 0: self.assertEqual(re.escape(b), b'\\000') else: self.assertEqual(re.escape(b), b'\\' + b) self.assertMatch(re.escape(b), b) self.assertMatch(re.escape(p), p) def test_re_escape_non_ascii(self): s = 'xxx\u2620\u2620\u2620xxx' s_escaped = re.escape(s) self.assertEqual(s_escaped, 'xxx\\\u2620\\\u2620\\\u2620xxx') self.assertMatch(s_escaped, s) self.assertMatch('.%s+.' % re.escape('\u2620'), s, 'x\u2620\u2620\u2620x', (2, 7), re.search) def test_re_escape_non_ascii_bytes(self): b = 'y\u2620y\u2620y'.encode('utf-8') b_escaped = re.escape(b) self.assertEqual(b_escaped, b'y\\\xe2\\\x98\\\xa0y\\\xe2\\\x98\\\xa0y') self.assertMatch(b_escaped, b) res = re.findall(re.escape('\u2620'.encode('utf-8')), b) self.assertEqual(len(res), 2) def pickle_test(self, pickle): oldpat = re.compile('a(?:b|(c|e){1,2}?|d)+?(.)') s = pickle.dumps(oldpat) newpat = pickle.loads(s) self.assertEqual(oldpat, newpat) def test_constants(self): self.assertEqual(re.I, re.IGNORECASE) self.assertEqual(re.L, re.LOCALE) self.assertEqual(re.M, re.MULTILINE) self.assertEqual(re.S, re.DOTALL) self.assertEqual(re.X, re.VERBOSE) def test_flags(self): for flag in [re.I, re.M, re.X, re.S, re.L]: self.assertNotEqual(re.compile('^pattern$', flag), None) def test_sre_character_literals(self): for i in [0, 8, 16, 32, 64, 127, 128, 255, 256, 0xFFFF, 0x10000, 0x10FFFF]: if i < 256: self.assertIsNotNone(re.match(r"\%03o" % i, chr(i))) self.assertIsNotNone(re.match(r"\%03o0" % i, chr(i)+"0")) self.assertIsNotNone(re.match(r"\%03o8" % i, chr(i)+"8")) self.assertIsNotNone(re.match(r"\x%02x" % i, chr(i))) self.assertIsNotNone(re.match(r"\x%02x0" % i, chr(i)+"0")) self.assertIsNotNone(re.match(r"\x%02xz" % i, chr(i)+"z")) if i < 0x10000: self.assertIsNotNone(re.match(r"\u%04x" % i, chr(i))) self.assertIsNotNone(re.match(r"\u%04x0" % i, chr(i)+"0")) self.assertIsNotNone(re.match(r"\u%04xz" % i, chr(i)+"z")) self.assertIsNotNone(re.match(r"\U%08x" % i, chr(i))) self.assertIsNotNone(re.match(r"\U%08x0" % i, chr(i)+"0")) self.assertIsNotNone(re.match(r"\U%08xz" % i, chr(i)+"z")) self.assertIsNotNone(re.match(r"\0", "\000")) self.assertIsNotNone(re.match(r"\08", "\0008")) self.assertIsNotNone(re.match(r"\01", "\001")) self.assertIsNotNone(re.match(r"\018", "\0018")) self.assertIsNotNone(re.match(r"\567", chr(0o167))) self.assertRaises(re.error, re.match, r"\911", "") self.assertRaises(re.error, re.match, r"\x1", "") self.assertRaises(re.error, re.match, r"\x1z", "") self.assertRaises(re.error, re.match, r"\u123", "") self.assertRaises(re.error, re.match, r"\u123z", "") self.assertRaises(re.error, re.match, r"\U0001234", "") self.assertRaises(re.error, re.match, r"\U0001234z", "") self.assertRaises(re.error, re.match, r"\U00110000", "") def test_sre_character_class_literals(self): for i in [0, 8, 16, 32, 64, 127, 128, 255, 256, 0xFFFF, 0x10000, 0x10FFFF]: if i < 256: self.assertIsNotNone(re.match(r"[\%o]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\%o8]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\%03o]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\%03o0]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\%03o8]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\x%02x]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\x%02x0]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\x%02xz]" % i, chr(i))) if i < 0x10000: self.assertIsNotNone(re.match(r"[\u%04x]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\u%04x0]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\u%04xz]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\U%08x]" % i, chr(i))) self.assertIsNotNone(re.match(r"[\U%08x0]" % i, chr(i)+"0")) self.assertIsNotNone(re.match(r"[\U%08xz]" % i, chr(i)+"z")) self.assertIsNotNone(re.match(r"[\U0001d49c-\U0001d4b5]", "\U0001d49e")) self.assertRaises(re.error, re.match, r"[\911]", "") self.assertRaises(re.error, re.match, r"[\x1z]", "") self.assertRaises(re.error, re.match, r"[\u123z]", "") self.assertRaises(re.error, re.match, r"[\U0001234z]", "") self.assertRaises(re.error, re.match, r"[\U00110000]", "") def test_sre_byte_literals(self): for i in [0, 8, 16, 32, 64, 127, 128, 255]: self.assertIsNotNone(re.match((r"\%03o" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match((r"\%03o0" % i).encode(), bytes([i])+b"0")) self.assertIsNotNone(re.match((r"\%03o8" % i).encode(), bytes([i])+b"8")) self.assertIsNotNone(re.match((r"\x%02x" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match((r"\x%02x0" % i).encode(), bytes([i])+b"0")) self.assertIsNotNone(re.match((r"\x%02xz" % i).encode(), bytes([i])+b"z")) self.assertIsNotNone(re.match(br"\u", b'u')) self.assertIsNotNone(re.match(br"\U", b'U')) self.assertIsNotNone(re.match(br"\0", b"\000")) self.assertIsNotNone(re.match(br"\08", b"\0008")) self.assertIsNotNone(re.match(br"\01", b"\001")) self.assertIsNotNone(re.match(br"\018", b"\0018")) self.assertIsNotNone(re.match(br"\567", bytes([0o167]))) self.assertRaises(re.error, re.match, br"\911", b"") self.assertRaises(re.error, re.match, br"\x1", b"") self.assertRaises(re.error, re.match, br"\x1z", b"") def test_sre_byte_class_literals(self): for i in [0, 8, 16, 32, 64, 127, 128, 255]: self.assertIsNotNone(re.match((r"[\%o]" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match((r"[\%o8]" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match((r"[\%03o]" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match((r"[\%03o0]" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match((r"[\%03o8]" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match((r"[\x%02x]" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match((r"[\x%02x0]" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match((r"[\x%02xz]" % i).encode(), bytes([i]))) self.assertIsNotNone(re.match(br"[\u]", b'u')) self.assertIsNotNone(re.match(br"[\U]", b'U')) self.assertRaises(re.error, re.match, br"[\911]", "") self.assertRaises(re.error, re.match, br"[\x1z]", "") def test_bug_113254(self): self.assertEqual(re.match(r'(a)|(b)', 'b').start(1), -1) self.assertEqual(re.match(r'(a)|(b)', 'b').end(1), -1) self.assertEqual(re.match(r'(a)|(b)', 'b').span(1), (-1, -1)) def test_bug_527371(self): # bug described in patches 527371/672491 self.assertEqual(re.match(r'(a)?a','a').lastindex, None) self.assertEqual(re.match(r'(a)(b)?b','ab').lastindex, 1) self.assertEqual(re.match(r'(?P<a>a)(?P<b>b)?b','ab').lastgroup, 'a') self.assertEqual(re.match("(?P<a>a(b))", "ab").lastgroup, 'a') self.assertEqual(re.match("((a))", "a").lastindex, 1) def test_bug_545855(self): # bug 545855 -- This pattern failed to cause a compile error as it # should, instead provoking a TypeError. self.assertRaises(re.error, re.compile, 'foo[a-') def test_bug_418626(self): # bugs 418626 at al. -- Testing Greg Chapman's addition of op code # SRE_OP_MIN_REPEAT_ONE for eliminating recursion on simple uses of # pattern '*?' on a long string. self.assertEqual(re.match('.*?c', 10000*'ab'+'cd').end(0), 20001) self.assertEqual(re.match('.*?cd', 5000*'ab'+'c'+5000*'ab'+'cde').end(0), 20003) self.assertEqual(re.match('.*?cd', 20000*'abc'+'de').end(0), 60001) # non-simple '*?' still used to hit the recursion limit, before the # non-recursive scheme was implemented. self.assertEqual(re.search('(a|b)*?c', 10000*'ab'+'cd').end(0), 20001) def test_bug_612074(self): pat="["+re.escape("\u2039")+"]" self.assertEqual(re.compile(pat) and 1, 1) def test_stack_overflow(self): # nasty cases that used to overflow the straightforward recursive # implementation of repeated groups. self.assertEqual(re.match('(x)*', 50000*'x').group(1), 'x') self.assertEqual(re.match('(x)*y', 50000*'x'+'y').group(1), 'x') self.assertEqual(re.match('(x)*?y', 50000*'x'+'y').group(1), 'x') def test_unlimited_zero_width_repeat(self): # Issue #9669 self.assertIsNone(re.match(r'(?:a?)*y', 'z')) self.assertIsNone(re.match(r'(?:a?)+y', 'z')) self.assertIsNone(re.match(r'(?:a?){2,}y', 'z')) self.assertIsNone(re.match(r'(?:a?)*?y', 'z')) self.assertIsNone(re.match(r'(?:a?)+?y', 'z')) self.assertIsNone(re.match(r'(?:a?){2,}?y', 'z')) def test_scanner(self): def s_ident(scanner, token): return token def s_operator(scanner, token): return "op%s" % token def s_float(scanner, token): return float(token) def s_int(scanner, token): return int(token) scanner = Scanner([ (r"[a-zA-Z_]\w*", s_ident), (r"\d+\.\d*", s_float), (r"\d+", s_int), (r"=|\+|-|\*|/", s_operator), (r"\s+", None), ]) self.assertNotEqual(scanner.scanner.scanner("").pattern, None) self.assertEqual(scanner.scan("sum = 3*foo + 312.50 + bar"), (['sum', 'op=', 3, 'op*', 'foo', 'op+', 312.5, 'op+', 'bar'], '')) def test_bug_448951(self): # bug 448951 (similar to 429357, but with single char match) # (Also test greedy matches.) for op in '','?','*': self.assertEqual(re.match(r'((.%s):)?z'%op, 'z').groups(), (None, None)) self.assertEqual(re.match(r'((.%s):)?z'%op, 'a:z').groups(), ('a:', 'a')) def test_bug_725106(self): # capturing groups in alternatives in repeats self.assertEqual(re.match('^((a)|b)*', 'abc').groups(), ('b', 'a')) self.assertEqual(re.match('^(([ab])|c)*', 'abc').groups(), ('c', 'b')) self.assertEqual(re.match('^((d)|[ab])*', 'abc').groups(), ('b', None)) self.assertEqual(re.match('^((a)c|[ab])*', 'abc').groups(), ('b', None)) self.assertEqual(re.match('^((a)|b)*?c', 'abc').groups(), ('b', 'a')) self.assertEqual(re.match('^(([ab])|c)*?d', 'abcd').groups(), ('c', 'b')) self.assertEqual(re.match('^((d)|[ab])*?c', 'abc').groups(), ('b', None)) self.assertEqual(re.match('^((a)c|[ab])*?c', 'abc').groups(), ('b', None)) def test_bug_725149(self): # mark_stack_base restoring before restoring marks self.assertEqual(re.match('(a)(?:(?=(b)*)c)*', 'abb').groups(), ('a', None)) self.assertEqual(re.match('(a)((?!(b)*))*', 'abb').groups(), ('a', None, None)) def test_bug_764548(self): # bug 764548, re.compile() barfs on str/unicode subclasses class my_unicode(str): pass pat = re.compile(my_unicode("abc")) self.assertEqual(pat.match("xyz"), None) def test_finditer(self): iter = re.finditer(r":+", "a:b::c:::d") self.assertEqual([item.group(0) for item in iter], [":", "::", ":::"]) pat = re.compile(r":+") iter = pat.finditer("a:b::c:::d", 1, 10) self.assertEqual([item.group(0) for item in iter], [":", "::", ":::"]) pat = re.compile(r":+") iter = pat.finditer("a:b::c:::d", pos=1, endpos=10) self.assertEqual([item.group(0) for item in iter], [":", "::", ":::"]) pat = re.compile(r":+") iter = pat.finditer("a:b::c:::d", endpos=10, pos=1) self.assertEqual([item.group(0) for item in iter], [":", "::", ":::"]) pat = re.compile(r":+") iter = pat.finditer("a:b::c:::d", pos=3, endpos=8) self.assertEqual([item.group(0) for item in iter], ["::", "::"]) def test_bug_926075(self): self.assertTrue(re.compile('bug_926075') is not re.compile(b'bug_926075')) def test_bug_931848(self): pattern = eval('"[\u002E\u3002\uFF0E\uFF61]"') self.assertEqual(re.compile(pattern).split("a.b.c"), ['a','b','c']) def test_bug_581080(self): iter = re.finditer(r"\s", "a b") self.assertEqual(next(iter).span(), (1,2)) self.assertRaises(StopIteration, next, iter) scanner = re.compile(r"\s").scanner("a b") self.assertEqual(scanner.search().span(), (1, 2)) self.assertEqual(scanner.search(), None) def test_bug_817234(self): iter = re.finditer(r".*", "asdf") self.assertEqual(next(iter).span(), (0, 4)) self.assertEqual(next(iter).span(), (4, 4)) self.assertRaises(StopIteration, next, iter) def test_bug_6561(self): # '\d' should match characters in Unicode category 'Nd' # (Number, Decimal Digit), but not those in 'Nl' (Number, # Letter) or 'No' (Number, Other). decimal_digits = [ '\u0037', # '\N{DIGIT SEVEN}', category 'Nd' '\u0e58', # '\N{THAI DIGIT SIX}', category 'Nd' '\uff10', # '\N{FULLWIDTH DIGIT ZERO}', category 'Nd' ] for x in decimal_digits: self.assertEqual(re.match('^\d$', x).group(0), x) not_decimal_digits = [ '\u2165', # '\N{ROMAN NUMERAL SIX}', category 'Nl' '\u3039', # '\N{HANGZHOU NUMERAL TWENTY}', category 'Nl' '\u2082', # '\N{SUBSCRIPT TWO}', category 'No' '\u32b4', # '\N{CIRCLED NUMBER THIRTY NINE}', category 'No' ] for x in not_decimal_digits: self.assertIsNone(re.match('^\d$', x)) def test_empty_array(self): # SF buf 1647541 import array for typecode in 'bBuhHiIlLfd': a = array.array(typecode) self.assertEqual(re.compile(b"bla").match(a), None) self.assertEqual(re.compile(b"").match(a).groups(), ()) def test_inline_flags(self): # Bug #1700 upper_char = chr(0x1ea0) # Latin Capital Letter A with Dot Bellow lower_char = chr(0x1ea1) # Latin Small Letter A with Dot Bellow p = re.compile(upper_char, re.I | re.U) q = p.match(lower_char) self.assertNotEqual(q, None) p = re.compile(lower_char, re.I | re.U) q = p.match(upper_char) self.assertNotEqual(q, None) p = re.compile('(?i)' + upper_char, re.U) q = p.match(lower_char) self.assertNotEqual(q, None) p = re.compile('(?i)' + lower_char, re.U) q = p.match(upper_char) self.assertNotEqual(q, None) p = re.compile('(?iu)' + upper_char) q = p.match(lower_char) self.assertNotEqual(q, None) p = re.compile('(?iu)' + lower_char) q = p.match(upper_char) self.assertNotEqual(q, None) def test_dollar_matches_twice(self): "$ matches the end of string, and just before the terminating \n" pattern = re.compile('$') self.assertEqual(pattern.sub('#', 'a\nb\n'), 'a\nb#\n#') self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a\nb\nc#') self.assertEqual(pattern.sub('#', '\n'), '#\n#') pattern = re.compile('$', re.MULTILINE) self.assertEqual(pattern.sub('#', 'a\nb\n' ), 'a#\nb#\n#' ) self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a#\nb#\nc#') self.assertEqual(pattern.sub('#', '\n'), '#\n#') def test_bytes_str_mixing(self): # Mixing str and bytes is disallowed pat = re.compile('.') bpat = re.compile(b'.') self.assertRaises(TypeError, pat.match, b'b') self.assertRaises(TypeError, bpat.match, 'b') self.assertRaises(TypeError, pat.sub, b'b', 'c') self.assertRaises(TypeError, pat.sub, 'b', b'c') self.assertRaises(TypeError, pat.sub, b'b', b'c') self.assertRaises(TypeError, bpat.sub, b'b', 'c') self.assertRaises(TypeError, bpat.sub, 'b', b'c') self.assertRaises(TypeError, bpat.sub, 'b', 'c') def test_ascii_and_unicode_flag(self): # String patterns for flags in (0, re.UNICODE): pat = re.compile('\xc0', flags | re.IGNORECASE) self.assertNotEqual(pat.match('\xe0'), None) pat = re.compile('\w', flags) self.assertNotEqual(pat.match('\xe0'), None) pat = re.compile('\xc0', re.ASCII | re.IGNORECASE) self.assertEqual(pat.match('\xe0'), None) pat = re.compile('(?a)\xc0', re.IGNORECASE) self.assertEqual(pat.match('\xe0'), None) pat = re.compile('\w', re.ASCII) self.assertEqual(pat.match('\xe0'), None) pat = re.compile('(?a)\w') self.assertEqual(pat.match('\xe0'), None) # Bytes patterns for flags in (0, re.ASCII): pat = re.compile(b'\xc0', re.IGNORECASE) self.assertEqual(pat.match(b'\xe0'), None) pat = re.compile(b'\w') self.assertEqual(pat.match(b'\xe0'), None) # Incompatibilities self.assertRaises(ValueError, re.compile, b'\w', re.UNICODE) self.assertRaises(ValueError, re.compile, b'(?u)\w') self.assertRaises(ValueError, re.compile, '\w', re.UNICODE | re.ASCII) self.assertRaises(ValueError, re.compile, '(?u)\w', re.ASCII) self.assertRaises(ValueError, re.compile, '(?a)\w', re.UNICODE) self.assertRaises(ValueError, re.compile, '(?au)\w') def test_bug_6509(self): # Replacement strings of both types must parse properly. # all strings pat = re.compile('a(\w)') self.assertEqual(pat.sub('b\\1', 'ac'), 'bc') pat = re.compile('a(.)') self.assertEqual(pat.sub('b\\1', 'a\u1234'), 'b\u1234') pat = re.compile('..') self.assertEqual(pat.sub(lambda m: 'str', 'a5'), 'str') # all bytes pat = re.compile(b'a(\w)') self.assertEqual(pat.sub(b'b\\1', b'ac'), b'bc') pat = re.compile(b'a(.)') self.assertEqual(pat.sub(b'b\\1', b'a\xCD'), b'b\xCD') pat = re.compile(b'..') self.assertEqual(pat.sub(lambda m: b'bytes', b'a5'), b'bytes') def test_dealloc(self): # issue 3299: check for segfault in debug build import _sre # the overflow limit is different on wide and narrow builds and it # depends on the definition of SRE_CODE (see sre.h). # 2**128 should be big enough to overflow on both. For smaller values # a RuntimeError is raised instead of OverflowError. long_overflow = 2**128 self.assertRaises(TypeError, re.finditer, "a", {}) self.assertRaises(OverflowError, _sre.compile, "abc", 0, [long_overflow]) self.assertRaises(TypeError, _sre.compile, {}, 0, []) def test_search_dot_unicode(self): self.assertIsNotNone(re.search("123.*-", '123abc-')) self.assertIsNotNone(re.search("123.*-", '123\xe9-')) self.assertIsNotNone(re.search("123.*-", '123\u20ac-')) self.assertIsNotNone(re.search("123.*-", '123\U0010ffff-')) self.assertIsNotNone(re.search("123.*-", '123\xe9\u20ac\U0010ffff-')) def test_compile(self): # Test return value when given string and pattern as parameter pattern = re.compile('random pattern') self.assertIsInstance(pattern, re._pattern_type) same_pattern = re.compile(pattern) self.assertIsInstance(same_pattern, re._pattern_type) self.assertIs(same_pattern, pattern) # Test behaviour when not given a string or pattern as parameter self.assertRaises(TypeError, re.compile, 0) def test_bug_13899(self): # Issue #13899: re pattern r"[\A]" should work like "A" but matches # nothing. Ditto B and Z. self.assertEqual(re.findall(r'[\A\B\b\C\Z]', 'AB\bCZ'), ['A', 'B', '\b', 'C', 'Z']) @bigmemtest(size=_2G, memuse=1) def test_large_search(self, size): # Issue #10182: indices were 32-bit-truncated. s = 'a' * size m = re.search('$', s) self.assertIsNotNone(m) self.assertEqual(m.start(), size) self.assertEqual(m.end(), size) # The huge memuse is because of re.sub() using a list and a join() # to create the replacement result. @bigmemtest(size=_2G, memuse=16 + 2) def test_large_subn(self, size): # Issue #10182: indices were 32-bit-truncated. s = 'a' * size r, n = re.subn('', '', s) self.assertEqual(r, s) self.assertEqual(n, size + 1) def test_bug_16688(self): # Issue 16688: Backreferences make case-insensitive regex fail on # non-ASCII strings. self.assertEqual(re.findall(r"(?i)(a)\1", "aa \u0100"), ['a']) self.assertEqual(re.match(r"(?s).{1,3}", "\u0100\u0100").span(), (0, 2)) def test_repeat_minmax_overflow(self): # Issue #13169 string = "x" * 100000 self.assertEqual(re.match(r".{65535}", string).span(), (0, 65535)) self.assertEqual(re.match(r".{,65535}", string).span(), (0, 65535)) self.assertEqual(re.match(r".{65535,}?", string).span(), (0, 65535)) self.assertEqual(re.match(r".{65536}", string).span(), (0, 65536)) self.assertEqual(re.match(r".{,65536}", string).span(), (0, 65536)) self.assertEqual(re.match(r".{65536,}?", string).span(), (0, 65536)) # 2**128 should be big enough to overflow both SRE_CODE and Py_ssize_t. self.assertRaises(OverflowError, re.compile, r".{%d}" % 2**128) self.assertRaises(OverflowError, re.compile, r".{,%d}" % 2**128) self.assertRaises(OverflowError, re.compile, r".{%d,}?" % 2**128) self.assertRaises(OverflowError, re.compile, r".{%d,%d}" % (2**129, 2**128)) @cpython_only def test_repeat_minmax_overflow_maxrepeat(self): try: from _sre import MAXREPEAT except ImportError: self.skipTest('requires _sre.MAXREPEAT constant') string = "x" * 100000 self.assertIsNone(re.match(r".{%d}" % (MAXREPEAT - 1), string)) self.assertEqual(re.match(r".{,%d}" % (MAXREPEAT - 1), string).span(), (0, 100000)) self.assertIsNone(re.match(r".{%d,}?" % (MAXREPEAT - 1), string)) self.assertRaises(OverflowError, re.compile, r".{%d}" % MAXREPEAT) self.assertRaises(OverflowError, re.compile, r".{,%d}" % MAXREPEAT) self.assertRaises(OverflowError, re.compile, r".{%d,}?" % MAXREPEAT) def run_re_tests(): from test.re_tests import tests, SUCCEED, FAIL, SYNTAX_ERROR if verbose: print('Running re_tests test suite') else: # To save time, only run the first and last 10 tests #tests = tests[:10] + tests[-10:] pass for t in tests: sys.stdout.flush() pattern = s = outcome = repl = expected = None if len(t) == 5: pattern, s, outcome, repl, expected = t elif len(t) == 3: pattern, s, outcome = t else: raise ValueError('Test tuples should have 3 or 5 fields', t) try: obj = re.compile(pattern) except re.error: if outcome == SYNTAX_ERROR: pass # Expected a syntax error else: print('=== Syntax error:', t) except KeyboardInterrupt: raise KeyboardInterrupt except: print('*** Unexpected error ***', t) if verbose: traceback.print_exc(file=sys.stdout) else: try: result = obj.search(s) except re.error as msg: print('=== Unexpected exception', t, repr(msg)) if outcome == SYNTAX_ERROR: # This should have been a syntax error; forget it. pass elif outcome == FAIL: if result is None: pass # No match, as expected else: print('=== Succeeded incorrectly', t) elif outcome == SUCCEED: if result is not None: # Matched, as expected, so now we compute the # result string and compare it to our expected result. start, end = result.span(0) vardict={'found': result.group(0), 'groups': result.group(), 'flags': result.re.flags} for i in range(1, 100): try: gi = result.group(i) # Special hack because else the string concat fails: if gi is None: gi = "None" except IndexError: gi = "Error" vardict['g%d' % i] = gi for i in result.re.groupindex.keys(): try: gi = result.group(i) if gi is None: gi = "None" except IndexError: gi = "Error" vardict[i] = gi repl = eval(repl, vardict) if repl != expected: print('=== grouping error', t, end=' ') print(repr(repl) + ' should be ' + repr(expected)) else: print('=== Failed incorrectly', t) # Try the match with both pattern and string converted to # bytes, and check that it still succeeds. try: bpat = bytes(pattern, "ascii") bs = bytes(s, "ascii") except UnicodeEncodeError: # skip non-ascii tests pass else: try: bpat = re.compile(bpat) except Exception: print('=== Fails on bytes pattern compile', t) if verbose: traceback.print_exc(file=sys.stdout) else: bytes_result = bpat.search(bs) if bytes_result is None: print('=== Fails on bytes pattern match', t) # Try the match with the search area limited to the extent # of the match and see if it still succeeds. \B will # break (because it won't match at the end or start of a # string), so we'll ignore patterns that feature it. if pattern[:2] != '\\B' and pattern[-2:] != '\\B' \ and result is not None: obj = re.compile(pattern) result = obj.search(s, result.start(0), result.end(0) + 1) if result is None: print('=== Failed on range-limited match', t) # Try the match with IGNORECASE enabled, and check that it # still succeeds. obj = re.compile(pattern, re.IGNORECASE) result = obj.search(s) if result is None: print('=== Fails on case-insensitive match', t) # Try the match with LOCALE enabled, and check that it # still succeeds. if '(?u)' not in pattern: obj = re.compile(pattern, re.LOCALE) result = obj.search(s) if result is None: print('=== Fails on locale-sensitive match', t) # Try the match with UNICODE locale enabled, and check # that it still succeeds. obj = re.compile(pattern, re.UNICODE) result = obj.search(s) if result is None: print('=== Fails on unicode-sensitive match', t) def test_main(): run_unittest(ReTests) run_re_tests() if __name__ == "__main__": test_main()
mancoast/CPythonPyc_test
fail/331_test_re.py
Python
gpl-3.0
54,426
# -*- coding: utf-8 -*- import sys from argparse import ArgumentParser from DatabaseLogin import DatabaseLogin from GlobalInstaller import GlobalInstaller from PyQt5 import QtWidgets from Ui_MainWindow import Ui_MainWindow # import damit Installer funktioniert. auch wenn diese nicht hier benoetigt werden. from PyQt5 import QtCore, QtGui import cx_Oracle import json import base64 import urllib from Crypto.Cipher import AES from chardet import UniversalDetector def get_parser(): parser = ArgumentParser() # Parameter, welche die Gui Initalisierung Regeln. parser.add_argument('--inst_synonym', action='store_true', default=False, help=r"Setzt Flag für die Installation von Synonymen.") parser.add_argument('--inst_sequence', action='store_true', default=False, help=r"Setzt Flag für die Installation von Sequenzen.") parser.add_argument('--inst_tab_save', action='store_true', default=False, help=r"Setzt Flag für die Installation von Tab Save Tabellen.") parser.add_argument('--inst_tab', action='store_false', default=True, help=r"Entfernt Flag für die Installation von Tab Tabellen.") parser.add_argument('--inst_view', action='store_false', default=True, help=r"Entfernt Flag für die Installation von Views.") parser.add_argument('--inst_package', action='store_false', default=True, help=r"Entfernt Flag für die Installation von Packages.") parser.add_argument('--inst_sql', action='store_false', default=True, help=r"Entfernt Flag für die Installation von Sqls.") # Erweiterte Parameter, welche die Gui Initalisierung Regeln. parser.add_argument('--username', default=r"", help=r"Benutzername der Datenbank Verbindung.") parser.add_argument('--password', default=r"", help=r"Passwort der Datenbank Verbindung.") parser.add_argument('--connection', default=r"", help=r"Connection der Datenbank Verbindung.") parser.add_argument('--svnBasePath', default=r"", help=r"Schreibt Pfad in SVN Basis Pfad.") parser.add_argument('--svnKndPath', default=r"", help=r"Schreibt Pfad in SVN Kassen Pfad.") parser.add_argument('--installationPath', default=r"", help=r"Schreibt Pfad in Installation Pfad.") parser.add_argument('--global_defines_file', default=r"", help=r"Pfad zu einem TAB seperierten File wo die Defines vordefiniert sind.") # jsonl_parameters ueberschreibt alle anderen Parameter. parser.add_argument('--jsonl_parameters', type=str, default=r'', help=(r"Übergabe von allen Parameter in einem JSONL Format." "Dieses Format überschreibt alle Parameter.")) # Parameter welche eine blinde Installation ohne Gui zulassen. Dazu muss showGui Paramter zwingend False sein. parser.add_argument('--hideGui', action='store_true', default=False, help=r"Startet DB Installer ohne GUI.") parser.add_argument('--clean_installation_path', action='store_true', default=False, help=r"Führt Aktion Installationspfad Bereinigen durch. Nur in Kombi-nation von Parameter –-hideGui oder --json_file_path.") parser.add_argument('--copy_all_data_to_installation', action='store_true', default=False, help=r"Führt Aktion Dateien ab Pfade Laden durch. Nur in Kombination von Parameter -–hideGui oder --json_file_path.") parser.add_argument('--install_objects', action='store_true', default=False, help=r"Führt Aktion Objekte installieren durch. Nur in Kombination von Parameter –-hideGui oder --json_file_path.") parser.add_argument('--json_file_path', default=r"", help=(r"Übergabe eines Parameter Files in Jsonl Format." "Zusammen mit den Argumenten für die Aktionen kann damit eine ganze Kette von " "Arbeiten mit einem einzigen Aufruf erledigt werden. " "Arbeiten in einem Jsonl File sind immer ohne Gui " "und schreiben Debug Informationen auf die Konsole.")) return parser # # Main Programm. All starts at this point. # if __name__ == "__main__": parser = get_parser() args = parser.parse_args() dbLogin = DatabaseLogin(userName=args.username, passWord=args.password, connection=args.connection) dbLogin.testConnection(printInfo=False) globalInstaller = GlobalInstaller(dbLogin=dbLogin, svnBasePath=args.svnBasePath, svnKndPath=args.svnKndPath, installationPath=args.installationPath, flag_synonym=args.inst_synonym, flag_sequence=args.inst_sequence, flag_tab_save=args.inst_tab_save, flag_tab=args.inst_tab, flag_view=args.inst_view, flag_package=args.inst_package, flag_sql=args.inst_sql, global_defines_file=args.global_defines_file, jsonl_parameters=args.jsonl_parameters ) if len(args.json_file_path) > 0: globalInstaller.workJsonlFile(json_file_path=args.json_file_path, cleanInstallationPath=args.clean_installation_path, copy_all_data_to_installation=args.copy_all_data_to_installation, install_objects=args.install_objects) elif args.hideGui: # Calls function without gui. # used in command line only. if args.clean_installation_path: globalInstaller.cleanInstallationPath() if args.copy_all_data_to_installation: globalInstaller.readInstallationObjectFromPath() globalInstaller.copyAllData2InstallationPath() if args.install_objects: globalInstaller.installAllObjects2Database() else: # Default Obption starts Gui app = QtWidgets.QApplication(sys.argv) MainWindow = QtWidgets.QMainWindow() ui = Ui_MainWindow() ui.setupUi(MainWindow) ui.connect_user_isgnals() ui.set_user_variables(globalInstaller=globalInstaller) MainWindow.show() sys.exit(app.exec_())
handbaggerli/DbInstaller
Python/DbInstaller.py
Python
gpl-3.0
6,513
# This file is part of Tryton. The COPYRIGHT file at the top level of # this repository contains the full copyright notices and license terms. import re import heapq from sql import Null from sql.aggregate import Max from sql.conditionals import Case from collections import defaultdict try: import simplejson as json except ImportError: import json from ..model import ModelView, ModelSQL, fields, Unique from ..report import Report from ..wizard import Wizard, StateView, StateAction, Button from ..transaction import Transaction from ..cache import Cache from ..pool import Pool from ..pyson import Bool, Eval from ..rpc import RPC from .. import backend from ..protocols.jsonrpc import JSONDecoder, JSONEncoder from ..tools import is_instance_method try: from ..tools.StringMatcher import StringMatcher except ImportError: from difflib import SequenceMatcher as StringMatcher __all__ = [ 'Model', 'ModelField', 'ModelAccess', 'ModelFieldAccess', 'ModelButton', 'ModelData', 'PrintModelGraphStart', 'PrintModelGraph', 'ModelGraph', ] IDENTIFIER = re.compile(r'^[a-zA-z_][a-zA-Z0-9_]*$') class Model(ModelSQL, ModelView): "Model" __name__ = 'ir.model' _order_name = 'model' name = fields.Char('Model Description', translate=True, loading='lazy', states={ 'readonly': Bool(Eval('module')), }, depends=['module']) model = fields.Char('Model Name', required=True, states={ 'readonly': Bool(Eval('module')), }, depends=['module']) info = fields.Text('Information', states={ 'readonly': Bool(Eval('module')), }, depends=['module']) module = fields.Char('Module', help="Module in which this model is defined", readonly=True) global_search_p = fields.Boolean('Global Search') fields = fields.One2Many('ir.model.field', 'model', 'Fields', required=True) @classmethod def __setup__(cls): super(Model, cls).__setup__() table = cls.__table__() cls._sql_constraints += [ ('model_uniq', Unique(table, table.model), 'The model must be unique!'), ] cls._error_messages.update({ 'invalid_module': ('Module name "%s" is not a valid python ' 'identifier.'), }) cls._order.insert(0, ('model', 'ASC')) cls.__rpc__.update({ 'list_models': RPC(), 'list_history': RPC(), 'global_search': RPC(), }) @classmethod def register(cls, model, module_name): pool = Pool() Property = pool.get('ir.property') cursor = Transaction().cursor ir_model = cls.__table__() cursor.execute(*ir_model.select(ir_model.id, where=ir_model.model == model.__name__)) model_id = None if cursor.rowcount == -1 or cursor.rowcount is None: data = cursor.fetchone() if data: model_id, = data elif cursor.rowcount != 0: model_id, = cursor.fetchone() if not model_id: cursor.execute(*ir_model.insert( [ir_model.model, ir_model.name, ir_model.info, ir_model.module], [[model.__name__, model._get_name(), model.__doc__, module_name]])) Property._models_get_cache.clear() cursor.execute(*ir_model.select(ir_model.id, where=ir_model.model == model.__name__)) (model_id,) = cursor.fetchone() elif model.__doc__: cursor.execute(*ir_model.update( [ir_model.name, ir_model.info], [model._get_name(), model.__doc__], where=ir_model.id == model_id)) return model_id @classmethod def validate(cls, models): super(Model, cls).validate(models) cls.check_module(models) @classmethod def check_module(cls, models): ''' Check module ''' for model in models: if model.module and not IDENTIFIER.match(model.module): cls.raise_user_error('invalid_module', (model.rec_name,)) @classmethod def list_models(cls): 'Return a list of all models names' models = cls.search([], order=[ ('module', 'ASC'), # Optimization assumption ('model', 'ASC'), ('id', 'ASC'), ]) return [m.model for m in models] @classmethod def list_history(cls): 'Return a list of all models with history' return [name for name, model in Pool().iterobject() if getattr(model, '_history', False)] @classmethod def create(cls, vlist): pool = Pool() Property = pool.get('ir.property') res = super(Model, cls).create(vlist) # Restart the cache of models_get Property._models_get_cache.clear() return res @classmethod def write(cls, models, values, *args): pool = Pool() Property = pool.get('ir.property') super(Model, cls).write(models, values, *args) # Restart the cache of models_get Property._models_get_cache.clear() @classmethod def delete(cls, models): pool = Pool() Property = pool.get('ir.property') super(Model, cls).delete(models) # Restart the cache of models_get Property._models_get_cache.clear() @classmethod def global_search(cls, text, limit, menu='ir.ui.menu'): """ Search on models for text including menu Returns a list of tuple (ratio, model, model_name, id, name, icon) The size of the list is limited to limit """ pool = Pool() ModelAccess = pool.get('ir.model.access') if not limit > 0: raise ValueError('limit must be > 0: %r' % (limit,)) models = cls.search(['OR', ('global_search_p', '=', True), ('model', '=', menu), ]) access = ModelAccess.get_access([m.model for m in models]) s = StringMatcher() if isinstance(text, str): text = text.decode('utf-8') s.set_seq2(text) def generate(): for model in models: if not access[model.model]['read']: continue Model = pool.get(model.model) if not hasattr(Model, 'search_global'): continue for record, name, icon in Model.search_global(text): if isinstance(name, str): name = name.decode('utf-8') s.set_seq1(name) yield (s.ratio(), model.model, model.rec_name, record.id, name, icon) return heapq.nlargest(int(limit), generate()) class ModelField(ModelSQL, ModelView): "Model field" __name__ = 'ir.model.field' name = fields.Char('Name', required=True, states={ 'readonly': Bool(Eval('module')), }, depends=['module']) relation = fields.Char('Model Relation', states={ 'readonly': Bool(Eval('module')), }, depends=['module']) model = fields.Many2One('ir.model', 'Model', required=True, select=True, ondelete='CASCADE', states={ 'readonly': Bool(Eval('module')), }, depends=['module']) field_description = fields.Char('Field Description', translate=True, loading='lazy', states={ 'readonly': Bool(Eval('module')), }, depends=['module']) ttype = fields.Char('Field Type', states={ 'readonly': Bool(Eval('module')), }, depends=['module']) groups = fields.Many2Many('ir.model.field-res.group', 'field', 'group', 'Groups') help = fields.Text('Help', translate=True, loading='lazy', states={ 'readonly': Bool(Eval('module')), }, depends=['module']) module = fields.Char('Module', help="Module in which this field is defined") @classmethod def __setup__(cls): super(ModelField, cls).__setup__() table = cls.__table__() cls._sql_constraints += [ ('name_model_uniq', Unique(table, table.name, table.model), 'The field name in model must be unique!'), ] cls._error_messages.update({ 'invalid_name': ('Model Field name "%s" is not a valid python ' 'identifier.'), }) cls._order.insert(0, ('name', 'ASC')) @classmethod def register(cls, model, module_name, model_id): pool = Pool() Model = pool.get('ir.model') cursor = Transaction().cursor ir_model_field = cls.__table__() ir_model = Model.__table__() cursor.execute(*ir_model_field.join(ir_model, condition=ir_model_field.model == ir_model.id ).select(ir_model_field.id.as_('id'), ir_model_field.name.as_('name'), ir_model_field.field_description.as_('field_description'), ir_model_field.ttype.as_('ttype'), ir_model_field.relation.as_('relation'), ir_model_field.module.as_('module'), ir_model_field.help.as_('help'), where=ir_model.model == model.__name__)) model_fields = dict((f['name'], f) for f in cursor.dictfetchall()) for field_name, field in model._fields.iteritems(): if hasattr(field, 'model_name'): relation = field.model_name elif hasattr(field, 'relation_name'): relation = field.relation_name else: relation = None if field_name not in model_fields: cursor.execute(*ir_model_field.insert( [ir_model_field.model, ir_model_field.name, ir_model_field.field_description, ir_model_field.ttype, ir_model_field.relation, ir_model_field.help, ir_model_field.module], [[model_id, field_name, field.string, field._type, relation, field.help, module_name]])) elif (model_fields[field_name]['field_description'] != field.string or model_fields[field_name]['ttype'] != field._type or model_fields[field_name]['relation'] != relation or model_fields[field_name]['help'] != field.help): cursor.execute(*ir_model_field.update( [ir_model_field.field_description, ir_model_field.ttype, ir_model_field.relation, ir_model_field.help], [field.string, field._type, relation, field.help], where=ir_model_field.id == model_fields[field_name]['id'])) # Clean ir_model_field from field that are no more existing. for field_name in model_fields: if model_fields[field_name]['module'] == module_name \ and field_name not in model._fields: # XXX This delete field even when it is defined later # in the module cursor.execute(*ir_model_field.delete( where=ir_model_field.id == model_fields[field_name]['id'])) @staticmethod def default_name(): return 'No Name' @staticmethod def default_field_description(): return 'No description available' @classmethod def validate(cls, fields): super(ModelField, cls).validate(fields) cls.check_name(fields) @classmethod def check_name(cls, fields): ''' Check name ''' for field in fields: if not IDENTIFIER.match(field.name): cls.raise_user_error('invalid_name', (field.name,)) @classmethod def read(cls, ids, fields_names=None): pool = Pool() Translation = pool.get('ir.translation') Model = pool.get('ir.model') to_delete = [] if Transaction().context.get('language'): if fields_names is None: fields_names = cls._fields.keys() if 'field_description' in fields_names \ or 'help' in fields_names: if 'model' not in fields_names: fields_names.append('model') to_delete.append('model') if 'name' not in fields_names: fields_names.append('name') to_delete.append('name') res = super(ModelField, cls).read(ids, fields_names=fields_names) if (Transaction().context.get('language') and ('field_description' in fields_names or 'help' in fields_names)): model_ids = set() for rec in res: if isinstance(rec['model'], (list, tuple)): model_ids.add(rec['model'][0]) else: model_ids.add(rec['model']) model_ids = list(model_ids) cursor = Transaction().cursor model = Model.__table__() cursor.execute(*model.select(model.id, model.model, where=model.id.in_(model_ids))) id2model = dict(cursor.fetchall()) trans_args = [] for rec in res: if isinstance(rec['model'], (list, tuple)): model_id = rec['model'][0] else: model_id = rec['model'] if 'field_description' in fields_names: trans_args.append((id2model[model_id] + ',' + rec['name'], 'field', Transaction().language, None)) if 'help' in fields_names: trans_args.append((id2model[model_id] + ',' + rec['name'], 'help', Transaction().language, None)) Translation.get_sources(trans_args) for rec in res: if isinstance(rec['model'], (list, tuple)): model_id = rec['model'][0] else: model_id = rec['model'] if 'field_description' in fields_names: res_trans = Translation.get_source( id2model[model_id] + ',' + rec['name'], 'field', Transaction().language) if res_trans: rec['field_description'] = res_trans if 'help' in fields_names: res_trans = Translation.get_source( id2model[model_id] + ',' + rec['name'], 'help', Transaction().language) if res_trans: rec['help'] = res_trans if to_delete: for rec in res: for field in to_delete: del rec[field] return res class ModelAccess(ModelSQL, ModelView): "Model access" __name__ = 'ir.model.access' _rec_name = 'model' model = fields.Many2One('ir.model', 'Model', required=True, ondelete="CASCADE") group = fields.Many2One('res.group', 'Group', ondelete="CASCADE") perm_read = fields.Boolean('Read Access') perm_write = fields.Boolean('Write Access') perm_create = fields.Boolean('Create Access') perm_delete = fields.Boolean('Delete Access') description = fields.Text('Description') _get_access_cache = Cache('ir_model_access.get_access', context=False) @classmethod def __setup__(cls): super(ModelAccess, cls).__setup__() cls._error_messages.update({ 'read': 'You can not read this document! (%s)', 'write': 'You can not write in this document! (%s)', 'create': 'You can not create this kind of document! (%s)', 'delete': 'You can not delete this document! (%s)', }) cls.__rpc__.update({ 'get_access': RPC(), }) @classmethod def __register__(cls, module_name): TableHandler = backend.get('TableHandler') cursor = Transaction().cursor super(ModelAccess, cls).__register__(module_name) table = TableHandler(cursor, cls, module_name) # Migration from 2.6 (model, group) no more unique table.drop_constraint('model_group_uniq') @staticmethod def check_xml_record(accesses, values): return True @staticmethod def default_perm_read(): return False @staticmethod def default_perm_write(): return False @staticmethod def default_perm_create(): return False @staticmethod def default_perm_delete(): return False @classmethod def get_access(cls, models): 'Return access for models' # root user above constraint if Transaction().user == 0: return defaultdict(lambda: defaultdict(lambda: True)) pool = Pool() Model = pool.get('ir.model') UserGroup = pool.get('res.user-res.group') cursor = Transaction().cursor user = Transaction().user model_access = cls.__table__() ir_model = Model.__table__() user_group = UserGroup.__table__() access = {} for model in models: maccess = cls._get_access_cache.get((user, model), default=-1) if maccess == -1: break access[model] = maccess else: return access default = {'read': True, 'write': True, 'create': True, 'delete': True} access = dict((m, default) for m in models) cursor.execute(*model_access.join(ir_model, 'LEFT', condition=model_access.model == ir_model.id ).join(user_group, 'LEFT', condition=user_group.group == model_access.group ).select( ir_model.model, Max(Case((model_access.perm_read == True, 1), else_=0)), Max(Case((model_access.perm_write == True, 1), else_=0)), Max(Case((model_access.perm_create == True, 1), else_=0)), Max(Case((model_access.perm_delete == True, 1), else_=0)), where=ir_model.model.in_(models) & ((user_group.user == user) | (model_access.group == Null)), group_by=ir_model.model)) access.update(dict( (m, {'read': r, 'write': w, 'create': c, 'delete': d}) for m, r, w, c, d in cursor.fetchall())) for model, maccess in access.iteritems(): cls._get_access_cache.set((user, model), maccess) return access @classmethod def check(cls, model_name, mode='read', raise_exception=True): 'Check access for model_name and mode' assert mode in ['read', 'write', 'create', 'delete'], \ 'Invalid access mode for security' if ((Transaction().user == 0) or (raise_exception and not Transaction().context.get('_check_access'))): return True access = cls.get_access([model_name])[model_name][mode] if not access and access is not None: if raise_exception: cls.raise_user_error(mode, model_name) else: return False return True @classmethod def check_relation(cls, model_name, field_name, mode='read'): 'Check access to relation field for model_name and mode' pool = Pool() Model = pool.get(model_name) field = getattr(Model, field_name) if field._type in ('one2many', 'many2one'): return cls.check(field.model_name, mode=mode, raise_exception=False) elif field._type in ('many2many', 'one2one'): if (field.target and not cls.check(field.target, mode=mode, raise_exception=False)): return False elif (field.relation_name and not cls.check(field.relation_name, mode=mode, raise_exception=False)): return False else: return True elif field._type == 'reference': selection = field.selection if isinstance(selection, basestring): sel_func = getattr(Model, field.selection) if not is_instance_method(Model, field.selection): selection = sel_func() else: # XXX Can not check access right on instance method selection = [] for model_name, _ in selection: if not cls.check(model_name, mode=mode, raise_exception=False): return False return True else: return True @classmethod def write(cls, accesses, values, *args): super(ModelAccess, cls).write(accesses, values, *args) # Restart the cache cls._get_access_cache.clear() ModelView._fields_view_get_cache.clear() @classmethod def create(cls, vlist): res = super(ModelAccess, cls).create(vlist) # Restart the cache cls._get_access_cache.clear() ModelView._fields_view_get_cache.clear() return res @classmethod def delete(cls, accesses): super(ModelAccess, cls).delete(accesses) # Restart the cache cls._get_access_cache.clear() ModelView._fields_view_get_cache.clear() class ModelFieldAccess(ModelSQL, ModelView): "Model Field Access" __name__ = 'ir.model.field.access' _rec_name = 'field' field = fields.Many2One('ir.model.field', 'Field', required=True, ondelete='CASCADE') group = fields.Many2One('res.group', 'Group', ondelete='CASCADE') perm_read = fields.Boolean('Read Access') perm_write = fields.Boolean('Write Access') perm_create = fields.Boolean('Create Access') perm_delete = fields.Boolean('Delete Access') description = fields.Text('Description') _get_access_cache = Cache('ir_model_field_access.check', context=False) @classmethod def __setup__(cls): super(ModelFieldAccess, cls).__setup__() cls._error_messages.update({ 'read': 'You can not read the field! (%s.%s)', 'write': 'You can not write on the field! (%s.%s)', }) @classmethod def __register__(cls, module_name): TableHandler = backend.get('TableHandler') cursor = Transaction().cursor super(ModelFieldAccess, cls).__register__(module_name) table = TableHandler(cursor, cls, module_name) # Migration from 2.6 (field, group) no more unique table.drop_constraint('field_group_uniq') @staticmethod def check_xml_record(field_accesses, values): return True @staticmethod def default_perm_read(): return False @staticmethod def default_perm_write(): return False @staticmethod def default_perm_create(): return True @staticmethod def default_perm_delete(): return True @classmethod def get_access(cls, models): 'Return fields access for models' # root user above constraint if Transaction().user == 0: return defaultdict(lambda: defaultdict( lambda: defaultdict(lambda: True))) pool = Pool() Model = pool.get('ir.model') ModelField = pool.get('ir.model.field') UserGroup = pool.get('res.user-res.group') cursor = Transaction().cursor user = Transaction().user field_access = cls.__table__() ir_model = Model.__table__() model_field = ModelField.__table__() user_group = UserGroup.__table__() accesses = {} for model in models: maccesses = cls._get_access_cache.get((user, model)) if maccesses is None: break accesses[model] = maccesses else: return accesses default = {} accesses = dict((m, default) for m in models) cursor.execute(*field_access.join(model_field, condition=field_access.field == model_field.id ).join(ir_model, condition=model_field.model == ir_model.id ).join(user_group, 'LEFT', condition=user_group.group == field_access.group ).select( ir_model.model, model_field.name, Max(Case((field_access.perm_read == True , 1), else_=0)), Max(Case((field_access.perm_write == True, 1), else_=0)), Max(Case((field_access.perm_create == True, 1), else_=0)), Max(Case((field_access.perm_delete == True, 1), else_=0)), where=ir_model.model.in_(models) & ((user_group.user == user) | (field_access.group == Null)), group_by=[ir_model.model, model_field.name])) for m, f, r, w, c, d in cursor.fetchall(): accesses[m][f] = {'read': r, 'write': w, 'create': c, 'delete': d} for model, maccesses in accesses.iteritems(): cls._get_access_cache.set((user, model), maccesses) return accesses @classmethod def check(cls, model_name, fields, mode='read', raise_exception=True, access=False): ''' Check access for fields on model_name. ''' assert mode in ('read', 'write', 'create', 'delete'), \ 'Invalid access mode' if ((Transaction().user == 0) or (raise_exception and not Transaction().context.get('_check_access'))): if access: return dict((x, True) for x in fields) return True accesses = dict((f, a[mode]) for f, a in cls.get_access([model_name])[model_name].iteritems()) if access: return accesses for field in fields: if not accesses.get(field, True): if raise_exception: cls.raise_user_error(mode, (model_name, field)) else: return False return True @classmethod def write(cls, field_accesses, values, *args): super(ModelFieldAccess, cls).write(field_accesses, values, *args) # Restart the cache cls._get_access_cache.clear() ModelView._fields_view_get_cache.clear() @classmethod def create(cls, vlist): res = super(ModelFieldAccess, cls).create(vlist) # Restart the cache cls._get_access_cache.clear() ModelView._fields_view_get_cache.clear() return res @classmethod def delete(cls, field_accesses): super(ModelFieldAccess, cls).delete(field_accesses) # Restart the cache cls._get_access_cache.clear() ModelView._fields_view_get_cache.clear() class ModelButton(ModelSQL, ModelView): "Model Button" __name__ = 'ir.model.button' name = fields.Char('Name', required=True, readonly=True) model = fields.Many2One('ir.model', 'Model', required=True, readonly=True, ondelete='CASCADE', select=True) groups = fields.Many2Many('ir.model.button-res.group', 'button', 'group', 'Groups') _groups_cache = Cache('ir.model.button.groups') @classmethod def __setup__(cls): super(ModelButton, cls).__setup__() table = cls.__table__() cls._sql_constraints += [ ('name_model_uniq', Unique(table, table.name, table.model), 'The button name in model must be unique!'), ] cls._order.insert(0, ('model', 'ASC')) @classmethod def create(cls, vlist): result = super(ModelButton, cls).create(vlist) # Restart the cache for get_groups cls._groups_cache.clear() return result @classmethod def write(cls, buttons, values, *args): super(ModelButton, cls).write(buttons, values, *args) # Restart the cache for get_groups cls._groups_cache.clear() @classmethod def delete(cls, buttons): super(ModelButton, cls).delete(buttons) # Restart the cache for get_groups cls._groups_cache.clear() @classmethod def get_groups(cls, model, name): ''' Return a set of group ids for the named button on the model. ''' key = (model, name) groups = cls._groups_cache.get(key) if groups is not None: return groups buttons = cls.search([ ('model.model', '=', model), ('name', '=', name), ]) if not buttons: groups = set() else: button, = buttons groups = set(g.id for g in button.groups) cls._groups_cache.set(key, groups) return groups class ModelData(ModelSQL, ModelView): "Model data" __name__ = 'ir.model.data' fs_id = fields.Char('Identifier on File System', required=True, help="The id of the record as known on the file system.", select=True) model = fields.Char('Model', required=True, select=True) module = fields.Char('Module', required=True, select=True) db_id = fields.Integer('Resource ID', help="The id of the record in the database.", select=True, required=True) values = fields.Text('Values') fs_values = fields.Text('Values on File System') noupdate = fields.Boolean('No Update') out_of_sync = fields.Function(fields.Boolean('Out of Sync'), 'get_out_of_sync', searcher='search_out_of_sync') _get_id_cache = Cache('ir_model_data.get_id', context=False) @classmethod def __setup__(cls): super(ModelData, cls).__setup__() table = cls.__table__() cls._sql_constraints = [ ('fs_id_module_model_uniq', Unique(table, table.fs_id, table.module, table.model), 'The triple (fs_id, module, model) must be unique!'), ] cls._buttons.update({ 'sync': { 'invisible': ~Eval('out_of_sync'), }, }) @classmethod def __register__(cls, module_name): TableHandler = backend.get('TableHandler') cursor = Transaction().cursor model_data = cls.__table__() super(ModelData, cls).__register__(module_name) table = TableHandler(cursor, cls, module_name) # Migration from 2.6: remove inherit if table.column_exist('inherit'): cursor.execute(*model_data.delete( where=model_data.inherit == True)) table.drop_column('inherit', True) @staticmethod def default_noupdate(): return False def get_out_of_sync(self, name): return self.values != self.fs_values and self.fs_values is not None @classmethod def search_out_of_sync(cls, name, clause): table = cls.__table__() name, operator, value = clause Operator = fields.SQL_OPERATORS[operator] query = table.select(table.id, where=Operator( (table.fs_values != table.values) & (table.fs_values != Null), value)) return [('id', 'in', query)] @classmethod def write(cls, data, values, *args): super(ModelData, cls).write(data, values, *args) # Restart the cache for get_id cls._get_id_cache.clear() @classmethod def get_id(cls, module, fs_id): """ Return for an fs_id the corresponding db_id. """ key = (module, fs_id) id_ = cls._get_id_cache.get(key) if id_ is not None: return id_ data = cls.search([ ('module', '=', module), ('fs_id', '=', fs_id), ], limit=1) if not data: raise Exception("Reference to %s not found" % ".".join([module, fs_id])) id_ = cls.read([d.id for d in data], ['db_id'])[0]['db_id'] cls._get_id_cache.set(key, id_) return id_ @classmethod def dump_values(cls, values): return json.dumps(sorted(values.iteritems()), cls=JSONEncoder) @classmethod def load_values(cls, values): try: return dict(json.loads(values, object_hook=JSONDecoder())) except ValueError: # Migration from 3.2 from decimal import Decimal import datetime return eval(values, { 'Decimal': Decimal, 'datetime': datetime, }) @classmethod @ModelView.button def sync(cls, records): pool = Pool() to_write = [] for data in records: Model = pool.get(data.model) values = cls.load_values(data.values) fs_values = cls.load_values(data.fs_values) # values could be the same once loaded # if they come from version < 3.2 if values != fs_values: record = Model(data.db_id) Model.write([record], fs_values) values = fs_values to_write.extend([[data], { 'values': cls.dump_values(values), }]) if to_write: cls.write(*to_write) class PrintModelGraphStart(ModelView): 'Print Model Graph' __name__ = 'ir.model.print_model_graph.start' level = fields.Integer('Level', required=True) filter = fields.Text('Filter', help="Entering a Python " "Regular Expression will exclude matching models from the graph.") @staticmethod def default_level(): return 1 class PrintModelGraph(Wizard): __name__ = 'ir.model.print_model_graph' start = StateView('ir.model.print_model_graph.start', 'ir.print_model_graph_start_view_form', [ Button('Cancel', 'end', 'tryton-cancel'), Button('Print', 'print_', 'tryton-ok', default=True), ]) print_ = StateAction('ir.report_model_graph') def transition_print_(self): return 'end' def do_print_(self, action): return action, { 'id': Transaction().context.get('active_id'), 'ids': Transaction().context.get('active_ids'), 'level': self.start.level, 'filter': self.start.filter, } class ModelGraph(Report): __name__ = 'ir.model.graph' @classmethod def execute(cls, ids, data): import pydot pool = Pool() Model = pool.get('ir.model') ActionReport = pool.get('ir.action.report') if not data['filter']: filter = None else: filter = re.compile(data['filter'], re.VERBOSE) action_report_ids = ActionReport.search([ ('report_name', '=', cls.__name__) ]) if not action_report_ids: raise Exception('Error', 'Report (%s) not find!' % cls.__name__) action_report = ActionReport(action_report_ids[0]) models = Model.browse(ids) graph = pydot.Dot(fontsize="8") graph.set('center', '1') graph.set('ratio', 'auto') cls.fill_graph(models, graph, level=data['level'], filter=filter) data = graph.create(prog='dot', format='png') return ('png', fields.Binary.cast(data), False, action_report.name) @classmethod def fill_graph(cls, models, graph, level=1, filter=None): ''' Fills a pydot graph with a models structure. ''' import pydot pool = Pool() Model = pool.get('ir.model') sub_models = set() if level > 0: for model in models: for field in model.fields: if field.name in ('create_uid', 'write_uid'): continue if field.relation and not graph.get_node(field.relation): sub_models.add(field.relation) if sub_models: model_ids = Model.search([ ('model', 'in', list(sub_models)), ]) sub_models = Model.browse(model_ids) if set(sub_models) != set(models): cls.fill_graph(sub_models, graph, level=level - 1, filter=filter) for model in models: if filter and re.search(filter, model.model): continue label = '"{' + model.model + '\\n' if model.fields: label += '|' for field in model.fields: if field.name in ('create_uid', 'write_uid', 'create_date', 'write_date', 'id'): continue label += '+ ' + field.name + ': ' + field.ttype if field.relation: label += ' ' + field.relation label += '\l' label += '}"' node_name = '"%s"' % model.model node = pydot.Node(node_name, shape='record', label=label) graph.add_node(node) for field in model.fields: if field.name in ('create_uid', 'write_uid'): continue if field.relation: node_name = '"%s"' % field.relation if not graph.get_node(node_name): continue args = {} tail = model.model head = field.relation edge_model_name = '"%s"' % model.model edge_relation_name = '"%s"' % field.relation if field.ttype == 'many2one': edge = graph.get_edge(edge_model_name, edge_relation_name) if edge: continue args['arrowhead'] = "normal" elif field.ttype == 'one2many': edge = graph.get_edge(edge_relation_name, edge_model_name) if edge: continue args['arrowhead'] = "normal" tail = field.relation head = model.model elif field.ttype == 'many2many': if graph.get_edge(edge_model_name, edge_relation_name): continue if graph.get_edge(edge_relation_name, edge_model_name): continue args['arrowtail'] = "inv" args['arrowhead'] = "inv" edge = pydot.Edge(str(tail), str(head), **args) graph.add_edge(edge)
kret0s/gnuhealth-live
tryton/server/trytond-3.8.3/trytond/ir/model.py
Python
gpl-3.0
39,540
# ======================================================================= # This file is part of MCLRE. # # MCLRE is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # MCLRE is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with MCLRE. If not, see <http://www.gnu.org/licenses/>. # # Copyright (C) 2015 Augusto Queiroz de Macedo <augustoqmacedo@gmail.com> # ======================================================================= """ Partition Helper Script: MRBPR USER-USER relations """ import os import csv import random import argparse import itertools from collections import defaultdict def generate_relation(data_dir, input_filename, output_filename, relation_id, max_members): """ Generate the weight files """ dict_network_users = defaultdict(set) # Read the Data and store its grouping variables in GROUP -> USERS and USER -> GROUPs with open(os.path.join(data_dir, input_filename), "r") as data_file: data_reader = csv.reader(data_file, delimiter="\t") for row in data_reader: user_id, group_id = int(row[0]), int(row[1]) dict_network_users[group_id].add(user_id) # Define the user RELATIONS and PERSIT them relation_file = os.path.join(data_dir, output_filename) if not os.path.exists(relation_file): with open(relation_file, "w") as relation_file: relation_tsv = csv.writer(relation_file, delimiter="\t") for group_id in sorted(dict_network_users): if len(dict_network_users[group_id]) > max_members: dict_network_users[group_id] = random.sample(dict_network_users[group_id], max_members) for user1, user2 in itertools.combinations(dict_network_users[group_id], 2): # Bi-directional edges relation_tsv.writerow([relation_id, user1, user2, 1]) relation_tsv.writerow([relation_id, user2, user1, 1]) if __name__ == "__main__": PARSER = argparse.ArgumentParser() PARSER.add_argument("-d", "--data_dir", type=str, required=True, help="Data Directory") PARSER.add_argument("-i", "--input_filename", type=str, required=True, help="Input Filename (TSV format)") PARSER.add_argument("-o", "--output_filename", type=str, required=True, help="Output Filename") PARSER.add_argument("-r", "--relation_id", type=int, required=True, help="MRBPR Relation Id") PARSER.add_argument("-m", "--max_members", type=int, default=10000000, help="Maximum numbers of members in one network") ARGS = PARSER.parse_args() generate_relation(ARGS.data_dir, ARGS.input_filename, ARGS.output_filename, ARGS.relation_id, ARGS.max_members)
augustoqm/MCLRE
src/data_partition/partition_helper_mrbpr.py
Python
gpl-3.0
3,228
import logging import warnings from collections import namedtuple logger = logging.getLogger(__name__) Field = namedtuple('Field', ('name', 'type_', 'default', 'desc', 'warn')) class Config: """配置模块 用户可以在 rc 文件中配置各个选项的值 """ def __init__(self): object.__setattr__(self, '_fields', {}) def __getattr__(self, name): # tips: 这里不能用 getattr 来获取值, 否则会死循环 if name == '_fields': return object.__getattribute__(self, '_fields') if name in self._fields: try: object.__getattribute__(self, name) except AttributeError: return self._fields[name].default return object.__getattribute__(self, name) def __setattr__(self, name, value): if name in self._fields: field = self._fields[name] if field.warn is not None: warnings.warn('Config field({}): {}'.format(name, field.warn), stacklevel=2) # TODO: 校验值类型 object.__setattr__(self, name, value) else: logger.warning('Assign to an undeclared config key.') def deffield(self, name, type_=None, default=None, desc='', warn=None): """Define a configuration field :param str name: the field name. It SHOULD be capitalized except the field refers to a sub-config. :param type_: feild type. :param default: default value for the field. :param desc: description for the field. :param warn: if field is deprecated, set a warn message. """ if name not in self._fields: self._fields[name] = Field(name=name, type_=type_, default=default, desc=desc, warn=warn) else: raise ValueError('Field({}) is already defined.'.format(name))
cosven/FeelUOwn
feeluown/config.py
Python
gpl-3.0
2,065
# This script will calculate Shannon entropy from a MSA. # Dependencies: # Biopython, Matplotlib, Math """ Shannon's entropy equation (latex format): H=-\sum_{i=1}^{M} P_i\,log_2\,P_i Entropy is a measure of the uncertainty of a probability distribution (p1, ..... , pM) https://stepic.org/lesson/Scoring-Motifs-157/step/7?course=Bioinformatics-Algorithms&unit=436 Where, Pi is the fraction of nuleotide bases of nuleotide base type i, and M is the number of nuleotide base types (A, T, G or C) H ranges from 0 (only one base/residue in present at that position) to 4.322 (all 20 residues are equally represented in that position). Typically, positions with H >2.0 are considerered variable, whereas those with H < 2 are consider conserved. Highly conserved positions are those with H <1.0 (Litwin and Jores, 1992). A minimum number of sequences is however required (~100) for H to describe the diversity of a protein family. """ import os import sys import warnings import traceback __author__ = "Joe R. J. Healey" __version__ = "1.0.0" __title__ = "ShannonMSA" __license__ = "GPLv3" __author_email__ = "J.R.J.Healey@warwick.ac.uk" def parseArgs(): """Parse command line arguments""" import argparse try: parser = argparse.ArgumentParser( description='Compute per base/residue Shannon entropy of a Multiple Sequence Alignment.') parser.add_argument('-a', '--alignment', action='store', required=True, help='The multiple sequence alignment (MSA) in any of the formats supported by Biopython\'s AlignIO.') parser.add_argument('-f', '--alnformat', action='store', default='fasta', help='Specify the format of the input MSA to be passed in to AlignIO.') parser.add_argument('-v', '--verbose', action='count', default=0, help='Verbose behaviour, printing parameters of the script.') parser.add_argument('-m', '--runningmean', action='store', type=int, default=0, help='Return the running mean (a.k.a moving average) of the MSAs Shannon Entropy. Makes for slightly smoother plots. Providing the number of points to average over switches this on.') parser.add_argument('--makeplot', action='store_true', help='Plot the results via Matplotlib.') except: print "An exception occurred with argument parsing. Check your provided options." traceback.print_exc() return parser.parse_args() def parseMSA(msa, alnformat, verbose): """Parse in the MSA file using Biopython's AlignIO""" from Bio import AlignIO alignment = AlignIO.read(msa, alnformat) # Do a little sanity checking: seq_lengths_list = [] for record in alignment: seq_lengths_list.append(len(record)) seq_lengths = set(seq_lengths_list) if verbose > 0: print("Alignment length is:" + str(list(seq_lengths))) if len(seq_lengths) != 1: sys.stderr.write("Your alignment lengths aren't equal. Check your alignment file.") sys.exit(1) index = range(1, list(seq_lengths)[0]+1) return alignment, list(seq_lengths), index ################################################################## # Function to calcuate the Shannon's entropy per alignment column # H=-\sum_{i=1}^{M} P_i\,log_2\,P_i (http://imed.med.ucm.es/Tools/svs_help.html) # Gaps and N's are included in the calculation ################################################################## def shannon_entropy(list_input): """Calculate Shannon's Entropy per column of the alignment (H=-\sum_{i=1}^{M} P_i\,log_2\,P_i)""" import math unique_base = set(list_input) M = len(list_input) entropy_list = [] # Number of residues in column for base in unique_base: n_i = list_input.count(base) # Number of residues of type i P_i = n_i/float(M) # n_i(Number of residues of type i) / M(Number of residues in column) entropy_i = P_i*(math.log(P_i,2)) entropy_list.append(entropy_i) sh_entropy = -(sum(entropy_list)) return sh_entropy def shannon_entropy_list_msa(alignment): """Calculate Shannon Entropy across the whole MSA""" shannon_entropy_list = [] for col_no in xrange(len(list(alignment[0]))): list_input = list(alignment[:, col_no]) shannon_entropy_list.append(shannon_entropy(list_input)) return shannon_entropy_list def plot(index, sel, verbose): """"Create a quick plot via matplotlib to visualise the extended spectrum""" import matplotlib.pyplot as plt if verbose > 0: print("Plotting data...") plt.plot(index, sel) plt.xlabel('MSA Position Index', fontsize=16) plt.ylabel('Shannon Entropy', fontsize=16) plt.show() def running_mean(l, N): sum = 0 result = list(0 for x in l) for i in range( 0, N ): sum = sum + l[i] result[i] = sum / (i+1) for i in range( N, len(l) ): sum = sum - l[i-N] + l[i] result[i] = sum / N return result def main(): """Compute Shannon Entropy from a provided MSA.""" # Parse arguments args = parseArgs() # Convert object elements to standard variables for functions msa = args.alignment alnformat = args.alnformat verbose = args.verbose makeplot = args.makeplot runningmean = args.runningmean # Start calling functions to do the heavy lifting alignment, seq_lengths, index = parseMSA(msa, alnformat, verbose) sel = shannon_entropy_list_msa(alignment) if runningmean > 0: sel = running_mean(sel, runningmean) if makeplot is True: plot(index, sel, verbose) if verbose > 0: print("Index" + '\t' + "Entropy") for c1, c2 in zip(index, sel): print(str(c1) + '\t' + str(c2)) if __name__ == '__main__': main()
jrjhealey/bioinfo-tools
Shannon.py
Python
gpl-3.0
6,290
# # My wish for Pynamite # from __future__ import with_statement from pynamite import * from pynamite.actor import TextBox def scene1(): # define some actors x = TextBox("Pynamite") y = TextBox("Rocks!!!") # tell the first actor to enter enter(x) # wait for a keypress to continue pause() # fade out one actor while other comes in # # You can use with blocks # with parallel(): # fadeout(1.0,x) # fadein(1.0,y) # Or the functional notation set_var(y, "opacity", 0.0) enter(y) def together(): fadeout(4.0,x) with serial(): linear(y, "opacity", end_val=.5, duration=1.0) linear(y, "opacity", end_val=.0, duration=1.0) linear(y, "opacity", end_val=1.0, duration=2.0) #fadeout(.5,y) #fadein(.5,y) in_parallel(together) # wait for intput pause() # last actor leaves fadeout(1.0,y) pause() # add that scene to the play add_scene(scene1) def scene2(): # define the actor x = TextBox("Yes, it Rocks!!!") # set its opacity to 0.0 set_var(x, "opacity", 0.0) # have it enter (but remember it's still not visible) enter(x) # have it become visible, but in a fancy way smooth(x, "opacity", end_val=.5,duration=.5) smooth(x, "opacity", end_val=.25,duration=.25) smooth(x, "opacity", end_val=.75,duration=.5) smooth(x, "opacity", end_val=.5,duration=.25) smooth(x, "opacity", end_val=1.0,duration=.5) # wait for input pause() # have the actor leave leave() # add this scene add_scene(scene2) # run it run()
psederberg/pynamite
docs/examples/wish.py
Python
gpl-3.0
1,653
from test_support import * prove_all(no_fail=True, steps = 400)
ptroja/spark2014
testsuite/gnatprove/tests/intro/test.py
Python
gpl-3.0
65
""" Main controller. """ import json from Server.Importer import ImportFromModule class ControllerMain(ImportFromModule("Server.ControllerBase", "ControllerBase")): """ Main controller. """ def ShowPage(self, uriParameters, postedParameters): """ Shows the home page. """ webPage = ImportFromModule("WebApplication.Views.PageView", "PageView")("Main") self.SetOutput(webPage.GetContent()) def EchoText(self, uriParameters, postedParameters): """ Echo the incomming text. """ self.SetOutput(json.dumps(uriParameters, indent=4))
allembedded/python_web_framework
WebApplication/Controllers/ControllerMain.py
Python
gpl-3.0
630
from platform import system import os from os.path import sep, expanduser, join as join_path from collections import defaultdict import pygame as pg import settings from util import ir, dd, split class Conf (object): IDENT = 'wvoas' USE_SAVEDATA = True USE_FONTS = False # save data SAVE = ('CURRENT_LEVEL', 'COMPLETED_LEVELS', 'COMPLETED', 'STARS', 'VOL_MUL') # need to take care to get unicode path if system() == 'Windows': try: import ctypes n = ctypes.windll.kernel32.GetEnvironmentVariableW(u'APPDATA', None, 0) if n == 0: raise ValueError() except Exception: # fallback (doesn't get unicode string) CONF_DIR = os.environ[u'APPDATA'] else: buf = ctypes.create_unicode_buffer(u'\0' * n) ctypes.windll.kernel32.GetEnvironmentVariableW(u'APPDATA', buf, n) CONF_DIR = buf.value CONF_DIR = join_path(CONF_DIR, IDENT) else: CONF_DIR = join_path(os.path.expanduser(u'~'), '.config', IDENT) CONF = join_path(CONF_DIR, 'conf') # paths DATA_DIR = '' IMG_DIR = DATA_DIR + 'img' + sep SOUND_DIR = DATA_DIR + 'sound' + sep MUSIC_DIR = DATA_DIR + 'music' + sep # display WINDOW_ICON = IMG_DIR + 'icon.png' WINDOW_TITLE = 'World View of a Slime' MOUSE_VISIBLE = dd(True, {'level': False}) FLAGS = 0 FULLSCREEN = False RESIZABLE = False # also determines whether fullscreen togglable RES_W = (960, 540) RES_F = pg.display.list_modes()[0] RES = RES_W MIN_RES_W = (320, 180) ASPECT_RATIO = None # timing FPS = dd(60) # keys are backend IDs # debug DEBUG = False PROFILE_TIME = 5 PROFILE_STATS_FILE = '.profile_stats' PROFILE_NUM_STATS = 20 PROFILE_STATS_SORT = 'cumulative' # input KEYS_NEXT = (pg.K_RETURN, pg.K_SPACE, pg.K_KP_ENTER) KEYS_BACK = (pg.K_ESCAPE, pg.K_BACKSPACE) KEYS_MINIMISE = (pg.K_F10,) KEYS_FULLSCREEN = (pg.K_F11, (pg.K_RETURN, pg.KMOD_ALT, True), (pg.K_KP_ENTER, pg.KMOD_ALT, True)) KEYS_LEFT = (pg.K_LEFT, pg.K_a, pg.K_q) KEYS_RIGHT = (pg.K_RIGHT, pg.K_d, pg.K_e) KEYS_UP = (pg.K_UP, pg.K_w, pg.K_z, pg.K_COMMA) KEYS_DOWN = (pg.K_DOWN, pg.K_s, pg.K_o) KEYS_MOVE = (KEYS_LEFT, KEYS_UP, KEYS_RIGHT, KEYS_DOWN) KEYS_JUMP = (pg.K_SPACE,) + KEYS_UP KEYS_RESET = (pg.K_r, pg.K_p) KEYS_VOL_UP = ((pg.K_PLUS, 0, True), (pg.K_KP_PLUS, 0, True), (pg.K_EQUALS, 0, True)) KEYS_VOL_DOWN = ((pg.K_MINUS, 0, True), (pg.K_KP_MINUS, 0, True)) # audio MUSIC_VOLUME = dd(1, paused = .3) SOUND_VOLUME = 1 EVENT_ENDMUSIC = pg.USEREVENT SOUNDS = {'hit': 10, 'die': 4, 'collectstar': 1} SOUND_VOLUMES = {'hit': .04, 'die': 1, 'move': .4, 'star': 1500} HIT_VOL_THRESHOLD = 2 # before scaling VOL_MUL = .6 VOL_CHANGE_AMOUNT = .1 VOL_REPEAT_DELAY = ir(FPS[None] * .5) VOL_REPEAT_RATE = ir(FPS[None] * .2) STAR_SND_CUTOFF = 1000 # gameplay (sizes must be ints) PLAYER_SIZE = (15, 30) PLAYER_SPEED = 1 PLAYER_AIR_SPEED = .2 LAUNCH_SPEED = .6 INITIAL_JUMP = 5 CONTINUE_JUMP = .7 FAIL_JUMP = 2 JUMP_TIME = 10 ON_GROUND_TIME = 2 DIE_TIME = 120 # can skip when counted down this low from DIE_TIME DIE_SKIP_THRESHOLD = 100 WIN_SKIP_THRESHOLD = 100 GRAV = .5 FRICT = .15 AIR_RES = .0025 GOAL_SIZE = (5, 60) CHECKPOINT_SIZE = (10, 10) STAR_SIZE = (20, 20) HALF_WINDOW_SIZE = (125, 75) WINDOW_SIZE = [x * 2 for x in HALF_WINDOW_SIZE] ERR = 10 ** -10 WINDOW_MOVE_AMOUNT = 3 # levels (all positions must be ints) LEVELS = [{ 'bgs': ('bg', ('bg0', (154, 75))), 'player_pos': (100, 25), 'goal': (100, 440), 'stars': [(720, 340)], 'rects': [(0, 300, 700, 100), (760, 300, 200, 100), (700, 300, 60, 20), (700, 380, 60, 20), (0, 500, 960, 40)], 'arects': [(0, 55, 850, 5), (110, 185, 960, 5)] }, { 'player_pos': (100, 420), 'goal': (900, 390), 'stars': [(215, 15)], 'rects': [(0, 460, 325, 80), (325, 450, 635, 90), (200, 50, 50, 400), (400, 0, 300, 450)], 'arects': [(0, 450, 325, 10)] }, { 'player_pos': (100, 420), 'goal': (900, 390), 'stars': [(215, 190)], 'rects': [(0, 450, 960, 90)], 'arects': [(200, 0, 50, 150), (200, 250, 50, 200), (400, 400, 50, 140), (600, 0, 50, 450)] }, { 'player_pos': (200, 120), 'goal': (850, 440), 'stars': [(40, 440)], 'rects': [(0, 150, 400, 10), (560, 500, 400, 10)] }, { 'player_pos': (200, 120), 'goal': (850, 440), 'rects': [(0, 150, 400, 10), (560, 500, 400, 10)], 'arects': [(400, 0, 10, 160)] }, { 'player_pos': (472, 70), 'goal': (478, 440), 'stars': [(470, -15)], 'rects': [(40, 60, 20, 10), (30, 100, 40, 10), (20, 140, 60, 10), (220, 100, 100, 10), (640, 100, 100, 10), (200, 140, 140, 10), (470, 200, 20, 10)], 'arects': [(0, 60, 40, 10), (60, 60, 900, 10), (0, 100, 30, 10), (70, 100, 150, 10), (320, 100, 320, 10), (740, 100, 220, 10), (0, 140, 20, 10), (80, 140, 120, 10), (760, 140, 200, 10), (340, 140, 10, 60), (610, 140, 10, 60), (340, 200, 130, 10), (490, 200, 130, 10)] }, { 'player_pos': (100, 320), 'goal': (860, 290), 'stars': [(470, 150)], 'rects': [(0, 350, 960, 45)], 'arects': [(250, 250, 460, 100)] }, { 'player_pos': (100, 240), 'goal': (50, 50), 'checkpoints': [(785, 210)], 'rects': [(100, 0, 570, 150), (0, 270, 250, 10), (330, 520, 150, 20), (570, 150, 100, 130), (570, 400, 100, 140), (740, 230, 100, 220)] }, { 'player_pos': (150, 420), 'goal': (900, 120), 'checkpoints': [(520, 330), (270, 160)], 'stars': [(5, 150)], 'rects': [(0, 450, 960, 10), (0, 400, 960, 10), (500, 350, 460, 10), (300, 290, 200, 10), (150, 210, 150, 40), (0, 215, 150, 35), (150, 180, 100, 40), (550, 180, 960, 10), (500, 0, 10, 140)], 'arects': [(500, 290, 460, 10), (250, 180, 50, 10), (30, -300, 120, 515)] }, { 'player_pos': (50, 240), 'goal': (850, 240), 'rects': [(10, 270, 140, 50), (310, 270, 140, 50), (750, 70, 30, 400)], 'vrects': [(160, 270, 140, 50), (460, 70, 490, 400)] }, { 'player_pos': (230, 180), 'goal': (720, 95), 'checkpoints': [(475, 420)], 'stars': [(470, -15)], 'rects': [(0, 210, 475, 50), (0, 320, 475, 50), (485, 265, 475, 50)], 'vrects': [(0, 265, 475, 50), (0, 375, 960, 130), (485, 155, 475, 50)], 'arects': [(475, 5, 10, 365)] }, 'disable jump', { 'player_pos': (100, 310), 'goal': (860, 280), 'stars': [(660, 450)], 'rects': [(0, 340, 200, 60), (760, 340, 200, 60)], 'vrects': [(380, 340, 200, 60)], 'arects': [(200, 240, 180, 160), (580, 240, 180, 160)] }, { 'player_pos': (200, 340), 'goal': (760, 310), 'rects': [(0, 370, 455, 170), (505, 370, 455, 170)], 'arects': [(455, 0, 50, 540)] }, { 'player_pos': (100, 120), 'goal': (860, 390), 'rects': [(0, 150, 300, 10), (660, 450, 300, 10)], 'arects': [(0, 110, 300, 10)] }, { 'player_pos': (180, 120), 'goal': (750, 450), 'checkpoints': [(595, 320)], 'stars': [(590, 505)], 'rects': [(0, 150, 200, 10), (0, 400, 200, 10)], 'vrects': [(550, 0, 100, 340)], 'arects': [(550, 340, 100, 150)] }, { 'player_pos': (100, 220), 'goal': (50, 80), 'checkpoints': [(615, 80)], 'rects': [(0, 250, 250, 10), (570, 100, 100, 180), (570, 400, 100, 100)], 'vrects': [(100, 100, 200, 50)], 'arects': [(570, 500, 300, 40)] }, { 'player_pos': (322, 265), 'goal': (428, 135), 'stars': [(400, 400)], 'rects': [(380, 195, 95, 75), (475, 195, 105, 55), (583, 245, 97, 55), (480, 295, 100, 50)], 'vrects': [(480, 245, 97, 50), (580, 295, 100, 100)], 'arects': [(280, 295, 200, 50)] }, { 'player_pos': (53, 480), 'goal': (898, 450), 'checkpoints': [(470, 490)], 'stars': [(465, -15)], 'rects': [(0, 510, 960, 30), (185, 50, 15, 260), (685, 50, 15, 160)], 'vrects': [(185, 310, 15, 200), (0, 110, 15, 200), (685, 210, 15, 300)], 'arects': [(0, -90, 450, 100), (200, 50, 50, 460), (450, 5, 50, 455), (700, 50, 50, 460)] }, 'disable move', { 'player_pos': (370, 170), 'goal': (580, 390), 'rects': [(0, 200, 960, 150), (0, 450, 960, 60)], 'arects': [(0, 510, 960, 30)] }, { 'player_pos': (370, 170), 'goal': (580, 390), 'stars': [(260, 150)], 'rects': [(0, 200, 960, 150), (0, 450, 960, 90)], 'arects': [(220, 190, 100, 10)] }, { 'player_pos': (60, 420), 'goal': (900, 390), 'checkpoints': [(300, 430)], 'stars': [(195, 410)], 'rects': [(230, 450, 730, 60)], 'vrects': [(0, 450, 180, 60)], 'arects': [(0, 510, 960, 30), (180, 400, 15, 110), (215, 400, 15, 110), (380, 0, 50, 450), (580, 400, 50, 110), (780, 0, 50, 450)] }, { 'player_pos': (50, 310), 'goal': (860, 280), 'checkpoints': [(600, 220)], 'stars': [(650, 420)], 'rects': [(0, 340, 300, 60), (300, 240, 460, 10), (760, 340, 200, 60)], 'arects': [(300, 250, 460, 150)] }, 'enable jump', { 'player_pos': (60, 470), 'goal': (900, 10), 'checkpoints': [(690, 380), (130, 130)], 'stars': [(30, -15)], 'rects': [(650, 350, 10, 50), (100, 150, 700, 10)], 'vrects': [(0, 500, 300, 10), (300, 450, 350, 10), (650, 400, 150, 10), (0, 200, 800, 100), (0, 150, 100, 50), (800, 70, 160, 10)], 'arects': [(0, 510, 960, 140), (300, 460, 660, 50), (650, 410, 310, 50), (800, 80, 160, 330), (100, 160, 700, 40), (0, 10, 100, 30)] }, 'disable jump', 'disable exists', { 'player_pos': (473, 255), 'goal': (500, -100), 'vrects': [(0, 0, 960, 540)] }, 'enable jump', 'enable move', { 'player_pos': (473, 480), 'goal': (478, 100), 'checkpoints': [(930, 290), (20, 40)], 'stars': [(930, -15)], 'rects': [(50, 10, 110, 50), (0, 110, 180, 50), (0, 210, 200, 50), (0, 310, 220, 50), (0, 410, 240, 50), (780, 60, 180, 50), (760, 160, 200, 50), (740, 260, 170, 50), (720, 360, 240, 50)], 'arects': [(0, 510, 960, 30), (0, 60, 180, 50), (0, 160, 200, 50), (0, 260, 220, 50), (0, 360, 240, 50), (0, 460, 260, 50), (780, 10, 180, 50), (760, 110, 200, 50), (740, 210, 220, 50), (720, 310, 240, 50), (700, 410, 260, 100)] }, { 'player_pos': (480, 470), 'goal': (520, 365), 'rects': [(0, 0, 960, 15), (0, 15, 60, 90), (140, 15, 150, 35), (805, 15, 155, 35), (95, 50, 65, 55), (490, 50, 280, 15), (325, 50, 130, 35), (195, 85, 260, 20), (805, 50, 40, 60), (0, 105, 15, 110), (50, 140, 185, 40), (270, 140, 185, 20), (270, 160, 100, 40), (490, 65, 25, 130), (570, 100, 60, 15), (615, 115, 15, 60), (665, 100, 60, 35), (715, 135, 10, 55), (725, 180, 35, 10), (760, 65, 10, 95), (770, 145, 110, 15), (760, 160, 70, 30), (815, 190, 15, 65), (880, 85, 45, 75), (915, 160, 10, 35), (0, 215, 80, 35), (40, 250, 40, 80), (115, 215, 65, 40), (160, 255, 20, 35), (215, 180, 20, 110), (270, 235, 100, 50), (405, 195, 110, 20), (405, 215, 15, 70), (455, 250, 125, 35), (550, 150, 30, 100), (615, 175, 65, 50), (615, 225, 165, 15), (615, 275, 5, 10), (620, 275, 110, 15), (765, 240, 15, 60), (815, 255, 70, 10), (875, 255, 10, 85), (865, 195, 60, 25), (920, 255, 45, 120), (0, 250, 5, 115), (0, 365, 45, 65), (0, 430, 90, 35), (80, 290, 45, 105), (125, 365, 50, 30), (160, 290, 75, 40), (125, 395, 50, 15), (175, 390, 195, 20), (125, 445, 245, 20), (330, 410, 40, 35), (210, 330, 25, 25), (270, 285, 15, 35), (270, 320, 230, 35), (405, 355, 35, 35), (475, 355, 25, 75), (405, 425, 180, 40), (535, 320, 50, 105), (620, 290, 25, 140), (620, 430, 155, 35), (680, 325, 35, 70), (715, 325, 50, 10), (765, 300, 75, 35), (810, 335, 30, 40), (810, 375, 150, 90), # rects only (515, 15, 35, 35), (160, 85, 35, 20), (805, 110, 40, 35), (455, 285, 25, 35), (125, 330, 50, 35), (175, 330, 35, 25), (500, 320, 35, 40)], 'vrects': [(0, 0, 960, 15), (0, 15, 60, 90), (140, 15, 150, 35), (805, 15, 155, 35), (95, 50, 65, 55), (490, 50, 280, 15), (325, 50, 130, 35), (195, 85, 260, 20), (805, 50, 40, 60), (0, 105, 15, 110), (50, 140, 185, 40), (270, 140, 185, 20), (270, 160, 100, 40), (490, 65, 25, 130), (550, 100, 80, 15), (615, 115, 15, 60), (665, 100, 60, 35), (715, 135, 10, 55), (725, 180, 35, 10), (760, 65, 10, 95), (770, 145, 110, 15), (760, 160, 70, 30), (815, 190, 15, 65), (880, 85, 45, 75), (915, 160, 10, 35), (0, 215, 80, 35), (40, 250, 40, 80), (115, 215, 65, 40), (160, 255, 20, 35), (215, 180, 20, 110), (270, 235, 100, 50), (405, 195, 110, 20), (405, 215, 15, 70), (455, 250, 125, 35), (550, 150, 30, 100), (615, 175, 65, 50), (615, 225, 165, 15), (615, 275, 5, 10), (620, 275, 110, 15), (765, 240, 15, 60), (815, 255, 70, 10), (875, 255, 10, 85), (865, 195, 60, 25), (920, 255, 45, 120), (0, 250, 5, 115), (0, 365, 45, 65), (0, 430, 90, 35), (80, 290, 45, 105), (125, 365, 50, 30), (160, 290, 75, 40), (125, 395, 50, 15), (175, 390, 195, 20), (125, 445, 245, 20), (330, 410, 40, 35), (210, 330, 25, 25), (270, 285, 15, 35), (270, 320, 230, 35), (405, 355, 35, 35), (475, 355, 25, 75), (405, 425, 180, 40), (535, 320, 50, 105), (620, 290, 25, 140), (620, 430, 155, 35), (680, 325, 35, 70), (715, 325, 50, 10), (765, 300, 75, 35), (810, 335, 30, 40), (810, 375, 150, 90), # vrects only (555, 15, 35, 35), (340, 105, 25, 35), (270, 200, 30, 35), (420, 250, 35, 35), (475, 285, 25, 35), (690, 290, 30, 35), (750, 370, 25, 60), (885, 255, 35, 85), (440, 355, 35, 70)], 'arects': [(0, 500, 960, 40)] }, { 'player_pos': (100, 470), 'goal': (860, 440), 'vrects': [(0, 500, 960, 10)], 'arects': [(0, 510, 960, 30), (455, 200, 50, 310)] }] # compile some properties CAN_JUMP = [True] CAN_MOVE = [True] EXISTS = [True] _properties = {'jump': CAN_JUMP, 'move': CAN_MOVE, 'exists': EXISTS} i = 0 while i < len(LEVELS): s = LEVELS[i] if isinstance(s, basestring): # property modifier: alter property list's first item if s.startswith('disable'): s = s[8:] v = False else: # enable s = s[7:] v = True for p, l in _properties.iteritems(): if s == p: l[0] = v # remove from LEVELS LEVELS.pop(i) else: # level for p, l in _properties.iteritems(): if l[0]: l.append(i) i += 1 for l in _properties.itervalues(): l.pop(0) del _properties CURRENT_LEVEL = 0 COMPLETED_LEVELS = [] COMPLETED = False STARS = [] # graphics # level select LS_BG_COLOUR = (120, 120, 120) LS_HL_COLOUR = (150, 150, 0) LS_HL_WIDTH = 2 LS_FADE_IN = ((0, 0, 0), (False, 1.5)) LS_FADE_OUT = (False, ((0, 0, 0), 1.5)) LS_LEVEL_START_TIME = 2 LS_WON_OVERLAY = (0, 0, 0, 150) # images DEFAULT_BGS = ('bg',) BGS = DEFAULT_BGS + sum((l.get('bgs', ()) for l in LEVELS), ()) BGS = tuple(set([bg if isinstance(bg, str) else bg[0] for bg in BGS])) NUM_CLOUDS = 4 CLOUDS = tuple('cloud{0}'.format(i) for i in xrange(NUM_CLOUDS)) CLOUD_SPEED = .5 CLOUD_VERT_SPEED_RATIO = .1 CLOUD_MOD_SPEED_RATIO = .2 CLOUD_JITTER = .01 PLAYER_OFFSET = (-7, -2) PLAYER_SQUASH_ELAST = .7 PLAYER_SQUASH_STIFFNESS = .2 PLAYER_MIN_SQUASH = .4 PLAYER_MAX_SQUASH = 1.5 PLAYER_SKEW_ELAST = .85 PLAYER_SKEW_STIFFNESS = .1 PLAYER_MAX_SKEW = 4 GOAL_OFFSET = (-17, -2) STAR_PULSE_SPEED = .005 VOID_JITTER_X = 10 VOID_JITTER_Y = 10 VOID_JITTER_T = 5 # fades FADE_TIME = 120 FADE_RATE = 300 # rate * time_ratio = 255 * alpha PAUSE_FADE_TIME = 60 PAUSE_FADE_RATE = 200 # rate * time_ratio = 255 * alpha # particles PARTICLES = { 'die': { 'colours': (((36, 130, 36), 1500), ((25, 91, 25), 1000), ((47, 169, 47), 500)), 'speed': 10, 'life': 180, 'size': 5, 'damping': .98, 'jitter': .035 }, 'move': { 'colours': (((10, 10, 10), .2), ((30, 30, 30), .1), ((36, 130, 36), .05), ((25, 91, 25), .02), ((47, 169, 47), .02)), 'speed': 2, 'life': 60, 'size': 4, 'damping': 1, 'jitter': 0 }, 'jump': { 'colours': (((10, 10, 10), 20), ((30, 30, 30), 10), ((36, 130, 36), 5), ((25, 91, 25), 2), ((47, 169, 47), 2)), 'speed': 5, 'life': 90, 'size': 4, 'damping': .98, 'jitter': 0 } } def translate_dd (d): if isinstance(d, defaultdict): return defaultdict(d.default_factory, d) else: # should be (default, dict) return dd(*d) conf = dict((k, v) for k, v in Conf.__dict__.iteritems() if k.isupper() and not k.startswith('__')) types = { defaultdict: translate_dd } if Conf.USE_SAVEDATA: conf = settings.SettingsManager(conf, Conf.CONF, Conf.SAVE, types) else: conf = settings.DummySettingsManager(conf, types)
ikn/wvoas
game/conf.py
Python
gpl-3.0
19,522
import htmlPy import socket import json import os sock = socket.socket() sock.connect(('localhost', 5002)) sock.send(b'') sock.recv(1024) sock.recv(1024) sock.recv(1024) app = htmlPy.AppGUI(title=u"Python Best Ever", maximized=True) app.template_path = os.path.abspath("./html") app.static_path = os.path.abspath("./html") template_name = 'index.html' app_data = { 'val': '0' } def processor(response): response = str(response) response = json.loads(response)['message'] print(response) command, data = response.split('#') if '@' in command: command, subcommand = command.split('@') if command == 'put': app_data[subcommand] = data class App(htmlPy.Object): def __init__(self): super(App, self).__init__() @htmlPy.Slot(str) def link(self, url): template_name = str(url) app.template = (template_name, app_data) @htmlPy.Slot(str) def command(self, cmd): cmd = bytes(cmd) sock.send(cmd) response = sock.recv(1024) processor(response) app.template = (template_name, app_data) app.template = (template_name, app_data) app.bind(App()) app.start()
JPro173/cloudpie
client/main.py
Python
gpl-3.0
1,186
# -*- coding: utf-8 -*- import getopt import sys class SlopeOrientation: def __init__(self, dem, out_layer): print "Entering SlopeOrientation" #ALGORITHM: Aspect # INPUT <ParameterRaster> # BAND <ParameterNumber> # COMPUTE_EDGES <ParameterBoolean> # ZEVENBERGEN <ParameterBoolean> # TRIG_ANGLE <ParameterBoolean> # ZERO_FLAT <ParameterBoolean> # OUTPUT <OutputRaster> processing.runalg("gdalogr:aspect", dem, 1, False, False, False, False, out_layer) def main(): try: opts, args = getopt.getopt(sys.argv[1:], '') except: pass n = SlopeOrientation(args[0]) if __name__ == '__main__': main()
Minganie/qgis-plugin-peregrine-falcon
slope_orientation.py
Python
gpl-3.0
742
from django.utils.translation import ugettext_lazy as _ from crystal_dashboard.dashboards.crystal import dashboard import horizon class Controllers(horizon.Panel): name = _("Controllers") slug = 'controllers' dashboard.CrystalController.register(Controllers)
Crystal-SDS/dashboard
crystal_dashboard/dashboards/crystal/controllers/panel.py
Python
gpl-3.0
271
# Copyright (C) 2012 Vivek Haldar # # Take in a dict containing fetched RSS data, and output to printable files in # the current directory. # # Dict looks like: # feed_title -> [list of articles] # each article has (title, body). # # Author: Vivek Haldar <vh@vivekhaldar.com> import codecs import escpos from datetime import datetime import textwrap import output class OutputPrn(output.Output): def output(self): articles = self._articles for f in articles: prn = escpos.Escpos('%s.prn' % f.replace('/', '_')) for a in articles[f]: title, body = a # Cut body down to 100 words. short_body = ' '.join(body.split()[:100]) prn.bigtext(f + '\n') prn.bigtext(textwrap.fill(title, 32) + '\n') prn.text(textwrap.fill(body, 32)) prn.text('\n\n\n') prn.flush()
vivekhaldar/fetch_rss
output_prn.py
Python
gpl-3.0
928
__version__='0.5.1'
havardgulldahl/jottalib
src/jottalib/__init__.py
Python
gpl-3.0
20
#!/usr/bin/env python # -*- coding: utf-8 -*- DEBUG = True observer = None ser_port = None s = 0 ser = None #-------------------------------------------------------------------- import signal import sys import os def signal_handler(signal, frame): global s, ser print '\nYou pressed Ctrl+C!' if s > 18: print "MTK_Finalize" serialPost(ser, "B7".decode("hex")) time.sleep(0.1) if ser.isOpen(): ser.close() #sys.exit(0) os._exit(0) signal.signal(signal.SIGINT, signal_handler) #-------------------------------------------------------------------- import os import serial from serial.tools import list_ports def serial_ports(): """ Returns a generator for all available serial ports """ if os.name == 'nt': # windows for i in range(256): try: s = serial.Serial(i) s.close() yield 'COM' + str(i + 1) except serial.SerialException: pass else: # unix for port in list_ports.comports(): yield port[0] #if __name__ == '__main__': # print(list(serial_ports())) #exit() #-------------------------------------------------------------------- import serial, time, binascii def serialPost(ser, data): #time.sleep(0.5) #data = chr(0x44) print " -> " + binascii.b2a_hex(data) ser.write(data) #ser.flush() def serialPostL(ser, data, slen, scnt): sys.stdout.write("\r" + str(scnt) + " of " + str(slen) + " <- " + binascii.b2a_hex(data)) if slen == scnt: sys.stdout.write("\n") #sys.stdout.flush() ser.write(data) def summ(block, length): res = 0 for i in range(length): res = res + ord(block[i]) #print str(res) return chr(res & int(0xFF)) def swapSerialData(data): l = len(data) #if l > 16: # print "-> " + str(l) + " bytes" #else: # print "-> " + binascii.b2a_hex(data) if len(data) > 0: ser.write(data) n = 0 while n < 1: n = ser.inWaiting() #time.sleep(1) data = ser.read(n) l = len(data) #print "RX is L: " + str(l) + " -> " + binascii.b2a_hex(data) return data #----- CONNECT TO PORT---------- def conn_port (ser_port): print ser_port print "module PySerial version: " + serial.VERSION # if: error open serial port: (22, 'Invalid argument') # http://superuser.com/questions/572034/how-to-restart-ttyusb # cat /proc/tty/drivers # lsmod | grep usbserial # sudo modprobe -r pl2303 qcaux # sudo modprobe -r usbserial #import subprocess #subprocess.call(['statserial', ser_port]) #subprocess.call(['setserial', '-G', ser_port]) # http://www.roman10.net/serial-port-communication-in-python/ # initialization and open the port # possible timeout values: # 1. None: wait forever, block call # 2. 0: non-blocking mode, return immediately # 3. x, x is bigger than 0, float allowed, timeout block call global ser ser = serial.Serial() #ser.port = "COM29" ser.port = ser_port ser.baudrate = 115200 ser.bytesize = serial.EIGHTBITS # number of bits per bytes ser.parity = serial.PARITY_EVEN ser.stopbits = serial.STOPBITS_ONE # number of stop bits ser.timeout = None # block read ser.rtscts = True # enable hardware (RTS/CTS) flow control (Hardware handshaking) #ser.port = "/dev/ttyS0" #ser.port = "/dev/ttyUSB0" #ser.port = "2" # COM3 #ser.baudrate = 9600 #ser.parity = serial.PARITY_NONE # set parity check: no parity #ser.timeout = 0 # non-block read #ser.xonxoff = False # disable software flow control #ser.rtscts = False # disable hardware (RTS/CTS) flow control #ser.dsrdtr = False # disable hardware (DSR/DTR) flow control #ser.writeTimeout = 2 # timeout for write #data = chr(0x44) + chr(0x59) #print "-> " + binascii.b2a_hex(data) #exit() try: ser.open() except Exception, e: print "error open serial port: " + str(e) print "for full reset serial device you must reload drivers:" print " " print " cat /proc/tty/drivers " print " lsmod | grep usbserial " print " sudo modprobe -r pl2303 qcaux " print " sudo modprobe -r usbserial " print " " exit() from hktool.bootload.samsung import sgh_e730 #loader1 = open("loader1.bin", "rb").read() loader1 = sgh_e730.load_bootcode_first() print "loader1.bin data size is: " + str(len(loader1)) ldr1_i = 0 ldr1_l = len(loader1) ldr1_c = "4c00".decode("hex") #loader2 = open("loader2.bin", "rb").read() loader2 = sgh_e730.load_bootcode_second() print "loader2.bin data size is: " + str(len(loader2)) ldr2_i = 0 ldr2_l = len(loader2) #f = open("loader1.bin", "rb") #try: # byte = f.read(1) # while byte != "": # # Do stuff with byte. # byte = f.read(1) #except Exception, e1: # print "error: " + str(e1) # ser.close() # import traceback # traceback.print_exc() #finally: # f.close() global s if ser.isOpen(): try: print 'Work with Samsung SGH-E730:' print '- wait for SWIFT power on...' ser.flushInput() # flush input buffer, discarding all its contents ser.flushOutput() # flush output buffer, aborting current output # and discard all that is in buffer # write data #ser.write("AT+CSQ=?\x0D") #print("write data: AT+CSQ=?\x0D") # steps s = 0 serialPost(ser, "A0".decode("hex")) while True: n = 0 s += 1 while n < 1: n = ser.inWaiting() #time.sleep(1) data = ser.read(n) l = len(data) #if s != 6 or ldr1_i == 0: print "RX is L: " + str(l) + " <- " + binascii.b2a_hex(data) if s == 1: if data[l-1] == chr(0x5F): serialPost(ser, chr(0x0A)) elif s == 2: if data[l-1] == chr(0xF5): serialPost(ser, chr(0x50)) elif s == 3: #if l == 16: # serialPost(ser, "4412345678".decode("hex") + data) # -> AF serialPost(ser, "05".decode("hex")) elif s == 4: #if data[l-1] == chr(0x4f): # # set timeout to 1600 ms (10h) # serialPost(ser, chr(0x54) + chr(0x10)) # # set timeout to 1600 ms (20h) # #serialPost(ser, chr(0x54) + chr(0x20)) # -> FA # A2 - read from memory serialPost(ser, "A2".decode("hex")) elif s == 5: #if data[l-1] == chr(0x4f): # serialPost(ser, "530000000c".decode("hex")) # -> A2 - read command ACK # 80 01 00 00 - Configuration Register: Hardware Version Register serialPost(ser, "80010000".decode("hex")) elif s == 6: # -> 80 01 00 00 # 00 00 00 01 - read one byte serialPost(ser, "00000001".decode("hex")) #ldr1_i4 = 4*ldr1_i #ldr1_i8 = 4*ldr1_i + 4 #if ldr1_i8 < ldr1_l: # serialPostL(ser, ldr1_c + loader1[ldr1_i4:ldr1_i8], ldr1_l, ldr1_i8) # s -= 1 #else: # serialPostL(ser, ldr1_c + loader1[ldr1_i4:ldr1_l ], ldr1_l, ldr1_l ) #ldr1_i += 1 elif s == 7: if l == 6: s += 1 elif s == 8: # -> 00 00 00 01 - byte is read # -> XX XX - byte: serialPost(ser, "A2".decode("hex")) #if data[l-1] == chr(0x4f): # serialPost(ser, "530000000c".decode("hex")) elif s == 9: # -> A2 # 80 01 00 08 - Hardware Code Register serialPost(ser, "80010008".decode("hex")) #if data[l-1] == chr(0x4f): # serialPost(ser, "4a".decode("hex")) elif s == 10: # -> 80 01 00 08 serialPost(ser, "00000001".decode("hex")) #s = 20; #if data[l-1] == chr(0xAB): # # 0x00 -> Speed = 115200 # # 0x01 -> Speed = 230400 # # 0x02 -> Speed = 460800 # # 0x03 -> Speed = 921600 # serialPost(ser, "00".decode("hex")) # # close comms, bootup completed # ser.flushInput() # flush input buffer, discarding all its contents # ser.flushOutput() # flush output buffer, aborting current output # ser.close() # # reopen comms at the new speed # time.sleep(0.1) # ser.port = "COM3" # ser.baudrate = 115200 # ser.parity = serial.PARITY_NONE # set parity check: no parity # ser.open() # ser.flushInput() # flush input buffer, discarding all its contents # ser.flushOutput() # flush output buffer, aborting current output # serialPost(ser, "d9".decode("hex")) elif s == 11: if l == 6: s += 1 elif s == 12: # -> 00 00 00 01 # -> XX XX - we hawe a MediaTek MT6253 serialPost(ser, "A2".decode("hex")) elif s == 13: # -> A2 # 80 01 00 04 - Software Version Register serialPost(ser, "80010004".decode("hex")) elif s == 14: # -> 80 01 00 04 serialPost(ser, "00000001".decode("hex")) elif s == 15: if l == 6: s += 1 elif s == 16: # -> 00 00 00 01 # -> XX XX - # A1 - write to register serialPost(ser, "A1".decode("hex")) elif s == 17: # -> A1 - write command ack # 80 03 00 00 - Reset Generation Unit (RGU): Watchdog Timer Control Register serialPost(ser, "80030000".decode("hex")) elif s == 18: # -> 80 03 00 00 serialPost(ser, "00000001".decode("hex")) elif s == 19: # -> 00 00 00 01 # 22 00 - set serialPost(ser, "2200".decode("hex")) elif s == 20: s -= 1 elif s == 111: data = "d4".decode("hex") data0 = chr((ldr2_l >> 24) & int(0xFF)) data0 += chr((ldr2_l >> 16) & int(0xFF)) data0 += chr((ldr2_l >> 8) & int(0xFF)) data0 += chr((ldr2_l ) & int(0xFF)) data += data0 serialPost(ser, data) elif s == 112: # zapominaem CRC crc = data my_crc = summ(data0, 4) print "crc is: " + binascii.b2a_hex(crc) print "my_crc is: " + binascii.b2a_hex(my_crc) if crc == my_crc: send_len = 0 for i in range((ldr2_l - 1) >> 11): send_len = ldr2_l - (i << 11) if send_len > 2048: send_len = 2048 # calculate sum ss = i << 11 su = summ(loader2[ss:ss+send_len], send_len) # send command data = swapSerialData("f7".decode("hex")) data = swapSerialData(loader2[ss:ss+send_len]) #print "2 crc is: " + binascii.b2a_hex(data) #print "2 my_crc is: " + binascii.b2a_hex(su) #print "i: " + str(i) sys.stdout.write("\ri: " + str(i)) sys.stdout.write("\n") serialPost(ser, "FF".decode("hex")) elif s == 113: serialPost(ser, "D010000000".decode("hex")) elif s == 114: serialPost(ser, "D1".decode("hex")) elif s == 115: nand_id = (ord(data[8])<<8) + ord(data[9]) # nado proverit, chto 2,3,4 baity ravny sootvetstvenno 0xEC 0x22 0xFC # # additionally identify NAND for Swift print "Flash... " if nand_id == int(0x04): print " 16MB (128Mbit) NAND" elif nand_id == int(0x14): print " 32MB (256Mbit) NAND" elif nand_id == int(0x24): print " 64MB (512Mbit) NAND" elif nand_id == int(0x34): print "128MB ( 1Gbit) NAND" elif nand_id == int(0x0C): print " 16MB (128Mbit) NAND" elif nand_id == int(0x1C): print " 32MB (256Mbit) NAND" elif nand_id == int(0x2C): print " 64MB (512Mbit) NAND" elif nand_id == int(0x3C): print "128MB ( 1Gbit) NAND" else: print "Unknown NAND: " + str("%02x" % nand_id) # here, the bootup is completed # delay slightly (required!) time.sleep(0.25) else: #data = chr(0x44) data = chr(0x00) print "-> " + binascii.b2a_hex(data) #ser.write(data) data = ser.read() print "serial RX: " + binascii.b2a_hex(data) data = chr(0x44) print "-> " + binascii.b2a_hex(data) ser.write(data) #ser.flush() data = ser.read() print "serial RX: " + binascii.b2a_hex(data) data = chr(0x51) print "-> " + binascii.b2a_hex(data) ser.write(data) data = ser.read() print "serial RX: " + binascii.b2a_hex(data) #print ser.portstr time.sleep(0.5) # give the serial port sometime to receive the data numOfLines = 0 while True: response = ser.readline() print("read data: " + response) numOfLines = numOfLines + 1 if (numOfLines >= 5): break ser.close() except Exception, e1: print "error communicating...: " + str(e1) ser.close() import traceback traceback.print_exc() except KeyboardInterrupt: print "\nmanual interrupted!" ser.close() else: print "cannot open serial port " exit() #=========================================================== #from hktool.bootload import mediatek from hktool.bootload.mediatek import MTKBootload from threading import Thread from time import sleep as Sleep def logical_xor(str1, str2): return bool(str1) ^ bool(str2) #----- MAIN CODE ------------------------------------------- if __name__=='__main__': from sys import platform as _platform import os if _platform == "linux" or _platform == "linux2": # linux print "it is linux?" from hktool.hotplug import linux_udev as port_notify elif _platform == "darwin": # OS X print "it is osx?" print "WARNING: port_notify is not realised !!!" elif _platform == "win32": # Windows... print "it is windows?" from hktool.hotplug import windevnotif as port_notify print "sys.platform: " + _platform + ", os.name: " + os.name print "" print "Select: xml, boot, sgh, crc, usb, exit, quit, q" print "" tsk = str(raw_input("enter command > ")) if tsk.lower() in ['exit', 'quit', 'q']: os._exit(0) if tsk.lower() in ['boot']: print "Working with device communication..." print "" Thread(target = port_notify.run_notify).start() Sleep(1) port = port_notify.get_notify() print "port_name is: " + port #conn_port(port) #mediatek.init(port) m = MTKBootload(port) if 'sgh' in tsk.lower(): tsks = tsk.split() print "" print "Working with device communication..." print "" Sleep(1) port = tsks[1] print "port_name is: " + port #m = SGHBootload(port) if tsk.lower() in ['xml', 'lxml']: print "Working with lxml..." print "" from lxml import etree tree = etree.parse('../../mtk-tests/Projects/_lg-a290/data/UTLog_DownloadAgent_FlashTool.xml') root = tree.getroot() print root #entries = tree.xpath("//atom:category[@term='accessibility']/..", namespaces=NSMAP) entries = tree.xpath("//UTLOG/Request[@Dir='[OUT]']/Data") #print entries old_text = None dmp_text = False cnt_text = 0 bin_file = None for xent in entries: new_text = xent.text if new_text == old_text: continue old_text = new_text #print "-> " + new_text bin_text = new_text.replace(" ", "") bin_text = bin_text.decode("hex") bin_len = len(bin_text) print str(bin_len) + " -> " + new_text if dmp_text is False and bin_len == 1024: dmp_text = True prt = xent.getparent() atr = prt.attrib num = atr["Number"] nam = "big_" + num + ".bin" bin_file = open(nam, 'wb') print "" print "start dump big data to: " + nam if dmp_text is True: #--- import array a = array.array('H', bin_text) # array.array('H', bin_text) a.byteswap() bin_text = a.tostring() #--- bin_file.write(bin_text) if bin_len == 1024: cnt_text += 1 else: cnt_text = cnt_text * 1024 + bin_len dmp_text = False bin_file.close() print "big data length is: " + str(cnt_text) print "" cnt_text = 0 pass if tsk.lower() in ['crc']: str1 = raw_input("Enter string one:") str2 = raw_input("Enter string two:") if logical_xor(str1, str2): print "ok" else: print "bad" pass print hex(0x12ef ^ 0xabcd) print hex(int("12ef", 16) ^ int("abcd", 16)) str1 = raw_input("Enter string one: ") str2 = raw_input("Enter string two: ") print hex(int(str1, 16) ^ int(str2, 16)) pass if tsk.lower() in ['usb']: import usb.core #import usb.backend.libusb1 import usb.backend.libusb0 import logging #PYUSB_DEBUG_LEVEL = "debug" #PYUSB_LOG_FILENAME = "C:\dump" __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) __backend__ = os.path.join(__location__, "libusb0.dll") #PYUSB_LOG_FILENAME = __location__ #backend = usb.backend.libusb1.get_backend(find_library=lambda x: "/usr/lib/libusb-1.0.so") #backend = usb.backend.libusb1.get_backend(find_library=lambda x: __backend__) backend = usb.backend.libusb0.get_backend(find_library=lambda x: __backend__) dev = usb.core.find(find_all=True, backend=backend) #dev = usb.core.find(find_all=True) busses = usb.busses() print busses if dev is None: raise ValueError('Our device is not connected') for bus in busses: devices = bus.devices for dev in devices: try: _name = usb.util.get_string(dev.dev, 19, 1) except: continue dev.set_configuration() cfg = dev.get_active_configuration() interface_number = cfg[(0,0)].bInterfaceNumber alternate_settting = usb.control.get_interface(interface_number) print "Device name:",_name print "Device:", dev.filename print " idVendor:",hex(dev.idVendor) print " idProduct:",hex(dev.idProduct) for config in dev.configurations: print " Configuration:", config.value print " Total length:", config.totalLength print " selfPowered:", config.selfPowered print " remoteWakeup:", config.remoteWakeup print " maxPower:", config.maxPower print
Ma3X/boot-talker
codes/python/talk.py
Python
gpl-3.0
19,742
import numpy as np from assorted.GraphInput import GraphInput from model.component.component_model import ComponentModel class CsvInput(ComponentModel): name = "CsvInput" default_out_sockets = [{'position': [0, -20], 'name': 'Output'}] default_attributes = {'path': '<argument>', 'n_columns': '3', 'separator': ','} graph_input = None def __init__(self, manifest=None, identifier=None): ComponentModel.__init__(self, manifest=manifest, identifier=identifier) self.graph_input = GraphInput(self.get_name()+"_input", [3]) def parse_column_string(self, string): return np.fromstring(string, sep=' ', dtype=np.int32) def theano_inputs(self): return [self.graph_input] def compile_theano(self): self.graph_input.compile_theano() self.push_by_index(0, self.graph_input.variable)
MichSchli/Mindblocks
packages/experiment_io/csv_input.py
Python
gpl-3.0
917
from rest_framework import serializers from .models import Community, Objective, SocialNetwork, Sector class ObjectiveSerializer(serializers.ModelSerializer): class Meta: model = Objective class CommunitySerializer(serializers.ModelSerializer): objectives = ObjectiveSerializer(many=True) class Meta: model = Community depth = 1 class SectorSerializer(serializers.ModelSerializer): class Meta: model = Sector
david2307/backend_159
communities/serializers.py
Python
gpl-3.0
460
from .operation import Operation from ..operands import Register, Operand2, Opcodes, Zero class LedOperation(Operation): opcodes = {"but": "1111100"} structure = [Opcodes(opcodes), Zero(21), Register] class ButOperation(Operation): opcodes = {"led": "1111101"} structure = [Opcodes(opcodes), Operand2(25)]
svenstaro/uni-projekt
assembler/operations/hwiOperation.py
Python
gpl-3.0
329
#!/usr/bin/python # -*- coding: utf-8 -*- from mstm_studio.contributions import MieLognormSpheres from mstm_studio.alloy_AuAg import AlloyAuAg import numpy as np mie = MieLognormSpheres(name='mie', wavelengths=np.linspace(300,800,51)) mie.set_material(AlloyAuAg(x_Au=1), 1.5) # golden sphere in glass values = [1, 1.5, 0.5] # scale, mu, sigma fig, _ = mie.plot(values) fig.savefig('mie_contrib.png', bbox_inches='tight') mie.MAX_DIAMETER_TO_PLOT = 20 # 100 nm is by default fig, _ = mie.plot_distrib(values) fig.savefig('mie_distrib.png', bbox_inches='tight')
lavakyan/mstm-spectrum
doc/source/scripting/mie_contrib.py
Python
gpl-3.0
567
# # Copyright (C) 2003-2022 Sébastien Helleu <flashcode@flashtux.org> # # This file is part of WeeChat.org. # # WeeChat.org is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # WeeChat.org is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with WeeChat.org. If not, see <https://www.gnu.org/licenses/>. # """Some useful path functions.""" import os from django.conf import settings def __path_join(base, *args): """ Join multiple paths after 'base' and ensure the result is still under 'base'. """ base = os.path.normpath(base) directory = os.path.normpath(os.path.join(base, *args)) if directory.startswith(base): return directory return '' def project_path_join(*args): """Join multiple paths after settings.BASE_DIR.""" return __path_join(settings.BASE_DIR, *args) def files_path_join(*args): """Join multiple paths after settings.FILES_ROOT.""" return __path_join(settings.FILES_ROOT, *args) def media_path_join(*args): """Join multiple paths after settings.MEDIA_ROOT.""" return __path_join(settings.MEDIA_ROOT, *args) def repo_path_join(*args): """Join multiple paths after settings.REPO_DIR.""" return __path_join(settings.REPO_DIR, *args)
weechat/weechat.org
weechat/common/path.py
Python
gpl-3.0
1,672