repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
CylonicRaider/Instant
script/colorlogs.py
1
5716
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Perform syntax highlighting on Scribe logs. """ import sys, os, re import time import errno import instabot # Hard-coded ANSI escape sequences for coloring. COLORS = {None: '\033[0m', 'bold': '\033[1m', 'black': '\033[30m', 'red': '\033[31m', 'green': '\033[32m', 'orange': '\033[33m', 'blue': '\033[34m', 'magenta': '\033[35m', 'cyan': '\033[36m', 'gray': '\033[37m'} def highlight(line, filt=None): def highlight_scalar(val): if val in instabot.LOG_CONSTANTS: return (COLORS['magenta'], val) elif instabot.INTEGER_RE.match(val) or instabot.FLOAT_RE.match(val): return (COLORS['cyan'], val) else: return (COLORS[None], val) def highlight_tuple(val): if val[:1] != '(': return (COLORS['red'], val) idx, ret = 1, [COLORS['orange'], '('] m = instabot.WHITESPACE_RE.match(val, idx) if m: ret.append(m.group()) idx = m.end() while idx < len(val): m = instabot.SCALAR_RE.match(val, idx) if not m: break ret.extend(highlight_scalar(m.group())) idx = m.end() m = instabot.COMMA_RE.match(val, idx) if not m: break ret.extend((COLORS['orange'], m.group())) idx = m.end() m = instabot.WHITESPACE_RE.match(val, idx) if m: ret.extend((COLORS['orange'], m.group())) idx = m.end() if val[idx:] == ')': ret.extend((COLORS['orange'], ')')) else: # Should not happen... ret.extend((COLORS['red'], val[idx:])) return ret def highlight_scalar_or_tuple(val): if val.startswith('('): return highlight_tuple(val) else: return highlight_scalar(val) def highlight_dict(val): if val[:1] != '{': return (COLORS['red'], val) idx, ret = 1, [COLORS['orange'], '{'] m = instabot.WHITESPACE_RE.match(val, idx) if m: ret.append(m.group()) idx = m.end() while idx < len(val): m = instabot.DICT_ENTRY_RE.match(val, idx) if not m: break ret.extend(highlight_scalar_or_tuple(m.group(1))) ret.extend((COLORS['orange'], val[m.end(1):m.start(2)])) ret.extend(highlight_scalar_or_tuple(m.group(2))) idx = m.end() m = instabot.COMMA_RE.match(val, idx) if not m: break ret.extend((COLORS['orange'], m.group())) idx = m.end() m = instabot.WHITESPACE_RE.match(val, idx) if m: ret.extend((COLORS['orange'], m.group())) idx = m.end() if val[idx:] == '}': ret.extend((COLORS['orange'], '}')) else: # Should not happen... ret.extend((COLORS['red'], val[idx:])) return ret def highlight_any(val): if val.startswith('{'): return highlight_dict(val) elif val.startswith('('): return highlight_tuple(val) else: return highlight_scalar(val) m = instabot.LOGLINE_RE.match(line) if not m: return line if filt and not filt(m.group(2)): return None ret = [line[:m.start(2)], COLORS['bold'], m.group(2), COLORS[None], line[m.end(2):m.start(3)]] idx = m.start(3) if idx != -1: while idx < len(line): # Skip whitespace m = instabot.WHITESPACE_RE.match(line, idx) if m: ret.extend((COLORS[None], m.group())) idx = m.end() if idx == len(line): break # Match the next parameter; output name m = instabot.PARAM_RE.match(line, idx) if not m: break name, val = m.group(1, 2) ret.extend((COLORS['green'], name, '=')) # Output value ret.extend(highlight_any(val)) idx = m.end() ret.extend((COLORS[None], line[idx:])) return ''.join(ret) def highlight_stream(it, newlines=False, filt=None): if not newlines: for line in it: hl = highlight(line, filt) if hl is not None: yield hl else: for line in it: hl = highlight(line.rstrip('\n'), filt) if hl is not None: yield hl + '\n' def main(): p = instabot.OptionParser(sys.argv[0]) p.help_action(desc='A syntax highlighter for Scribe logs.') p.option('exclude', short='x', default=[], accum=True, help='Filter out lines of this type (may be repeated)') p.option('out', short='o', help='File to write output to (- is standard output and ' 'the default)') p.flag_ex('append', short='a', varname='outmode', value='a', default='w', help='Append to output file instead of overwriting it') p.flag('line-buffered', short='u', help='Flush output after each input line') p.argument('in', default='-', help='File to read from (- is standard input and ' 'the default)') p.parse(sys.argv[1:]) ignore, inpath, outpath = p.get('exclude', 'in', 'out') outmode, linebuf = p.get('outmode', 'line-buffered') try: filt = (lambda t: t not in ignore) if ignore else None of = instabot.open_file with of(inpath, 'r') as fi, of(outpath, outmode) as fo: for l in highlight_stream(fi, True, filt): fo.write(l) if linebuf: fo.flush() except KeyboardInterrupt: # Suppress noisy stack traces. pass if __name__ == '__main__': main()
mit
5,965,448,285,206,380,000
35.177215
77
0.520469
false
3.574734
false
false
false
openstack/senlin
contrib/kubernetes/kube/master.py
1
10788
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import jinja2 from oslo_log import log as logging from oslo_utils import encodeutils from kube import base from senlin.common import consts from senlin.common import exception as exc from senlin.common.i18n import _ from senlin.common import schema LOG = logging.getLogger(__name__) class ServerProfile(base.KubeBaseProfile): """Profile for an kubernetes master server.""" VERSIONS = { '1.0': [ {'status': consts.EXPERIMENTAL, 'since': '2017.10'} ] } KEYS = ( CONTEXT, FLAVOR, IMAGE, KEY_NAME, PUBLIC_NETWORK, BLOCK_DEVICE_MAPPING_V2, ) = ( 'context', 'flavor', 'image', 'key_name', 'public_network', 'block_device_mapping_v2', ) INTERNAL_KEYS = ( KUBEADM_TOKEN, KUBE_MASTER_IP, SECURITY_GROUP, PRIVATE_NETWORK, PRIVATE_SUBNET, PRIVATE_ROUTER, KUBE_MASTER_FLOATINGIP, KUBE_MASTER_FLOATINGIP_ID, SCALE_OUT_RECV_ID, SCALE_OUT_URL, ) = ( 'kubeadm_token', 'kube_master_ip', 'security_group', 'private_network', 'private_subnet', 'private_router', 'kube_master_floatingip', 'kube_master_floatingip_id', 'scale_out_recv_id', 'scale_out_url', ) NETWORK_KEYS = ( PORT, FIXED_IP, NETWORK, PORT_SECURITY_GROUPS, FLOATING_NETWORK, FLOATING_IP, ) = ( 'port', 'fixed_ip', 'network', 'security_groups', 'floating_network', 'floating_ip', ) BDM2_KEYS = ( BDM2_UUID, BDM2_SOURCE_TYPE, BDM2_DESTINATION_TYPE, BDM2_DISK_BUS, BDM2_DEVICE_NAME, BDM2_VOLUME_SIZE, BDM2_GUEST_FORMAT, BDM2_BOOT_INDEX, BDM2_DEVICE_TYPE, BDM2_DELETE_ON_TERMINATION, ) = ( 'uuid', 'source_type', 'destination_type', 'disk_bus', 'device_name', 'volume_size', 'guest_format', 'boot_index', 'device_type', 'delete_on_termination', ) properties_schema = { CONTEXT: schema.Map( _('Customized security context for operating servers.'), ), FLAVOR: schema.String( _('ID of flavor used for the server.'), required=True, updatable=True, ), IMAGE: schema.String( # IMAGE is not required, because there could be BDM or BDMv2 # support and the corresponding settings effective _('ID of image to be used for the new server.'), updatable=True, ), KEY_NAME: schema.String( _('Name of Nova keypair to be injected to server.'), ), PUBLIC_NETWORK: schema.String( _('Public network for kubernetes.'), required=True, ), BLOCK_DEVICE_MAPPING_V2: schema.List( _('A list specifying the properties of block devices to be used ' 'for this server.'), schema=schema.Map( _('A map specifying the properties of a block device to be ' 'used by the server.'), schema={ BDM2_UUID: schema.String( _('ID of the source image, snapshot or volume'), ), BDM2_SOURCE_TYPE: schema.String( _("Volume source type, must be one of 'image', " "'snapshot', 'volume' or 'blank'"), required=True, ), BDM2_DESTINATION_TYPE: schema.String( _("Volume destination type, must be 'volume' or " "'local'"), required=True, ), BDM2_DISK_BUS: schema.String( _('Bus of the device.'), ), BDM2_DEVICE_NAME: schema.String( _('Name of the device(e.g. vda, xda, ....).'), ), BDM2_VOLUME_SIZE: schema.Integer( _('Size of the block device in MB(for swap) and ' 'in GB(for other formats)'), required=True, ), BDM2_GUEST_FORMAT: schema.String( _('Specifies the disk file system format(e.g. swap, ' 'ephemeral, ...).'), ), BDM2_BOOT_INDEX: schema.Integer( _('Define the boot order of the device'), ), BDM2_DEVICE_TYPE: schema.String( _('Type of the device(e.g. disk, cdrom, ...).'), ), BDM2_DELETE_ON_TERMINATION: schema.Boolean( _('Whether to delete the volume when the server ' 'stops.'), ), } ), ), } def __init__(self, type_name, name, **kwargs): super(ServerProfile, self).__init__(type_name, name, **kwargs) self.server_id = None def do_cluster_create(self, obj): self._generate_kubeadm_token(obj) self._create_security_group(obj) self._create_network(obj) def do_cluster_delete(self, obj): if obj.dependents and 'kube-node' in obj.dependents: msg = ("Cluster %s delete failed, " "Node clusters %s must be deleted first." % (obj.id, obj.dependents['kube-node'])) raise exc.EResourceDeletion(type='kubernetes.master', id=obj.id, message=msg) self._delete_network(obj) self._delete_security_group(obj) def do_create(self, obj): """Create a server for the node object. :param obj: The node object for which a server will be created. """ kwargs = {} for key in self.KEYS: if self.properties[key] is not None: kwargs[key] = self.properties[key] image_ident = self.properties[self.IMAGE] if image_ident is not None: image = self._validate_image(obj, image_ident, 'create') kwargs.pop(self.IMAGE) kwargs['imageRef'] = image.id flavor_ident = self.properties[self.FLAVOR] flavor = self._validate_flavor(obj, flavor_ident, 'create') kwargs.pop(self.FLAVOR) kwargs['flavorRef'] = flavor.id keypair_name = self.properties[self.KEY_NAME] if keypair_name: keypair = self._validate_keypair(obj, keypair_name, 'create') kwargs['key_name'] = keypair.name kwargs['name'] = obj.name metadata = self._build_metadata(obj, {}) kwargs['metadata'] = metadata jj_vars = {} cluster_data = self._get_cluster_data(obj) kwargs['networks'] = [{'uuid': cluster_data[self.PRIVATE_NETWORK]}] # Get user_data parameters from metadata jj_vars['KUBETOKEN'] = cluster_data[self.KUBEADM_TOKEN] jj_vars['MASTER_FLOATINGIP'] = cluster_data[ self.KUBE_MASTER_FLOATINGIP] block_device_mapping_v2 = self.properties[self.BLOCK_DEVICE_MAPPING_V2] if block_device_mapping_v2 is not None: kwargs['block_device_mapping_v2'] = self._resolve_bdm( obj, block_device_mapping_v2, 'create') # user_data = self.properties[self.USER_DATA] user_data = base.loadScript('./scripts/master.sh') if user_data is not None: # Use jinja2 to replace variables defined in user_data try: jj_t = jinja2.Template(user_data) user_data = jj_t.render(**jj_vars) except (jinja2.exceptions.UndefinedError, ValueError) as ex: # TODO(anyone) Handle jinja2 error pass ud = encodeutils.safe_encode(user_data) kwargs['user_data'] = encodeutils.safe_decode(base64.b64encode(ud)) sgid = self._get_security_group(obj) kwargs['security_groups'] = [{'name': sgid}] server = None resource_id = None try: server = self.compute(obj).server_create(**kwargs) self.compute(obj).wait_for_server(server.id) server = self.compute(obj).server_get(server.id) self._update_master_ip(obj, server.addresses[''][0]['addr']) self._associate_floatingip(obj, server) LOG.info("Created master node: %s" % server.id) return server.id except exc.InternalError as ex: if server and server.id: resource_id = server.id raise exc.EResourceCreation(type='server', message=str(ex), resource_id=resource_id) def do_delete(self, obj, **params): """Delete the physical resource associated with the specified node. :param obj: The node object to operate on. :param kwargs params: Optional keyword arguments for the delete operation. :returns: This operation always return True unless exception is caught. :raises: `EResourceDeletion` if interaction with compute service fails. """ if not obj.physical_id: return True server_id = obj.physical_id ignore_missing = params.get('ignore_missing', True) internal_ports = obj.data.get('internal_ports', []) force = params.get('force', False) try: self._disassociate_floatingip(obj, server_id) driver = self.compute(obj) if force: driver.server_force_delete(server_id, ignore_missing) else: driver.server_delete(server_id, ignore_missing) driver.wait_for_server_delete(server_id) if internal_ports: ex = self._delete_ports(obj, internal_ports) if ex: raise ex return True except exc.InternalError as ex: raise exc.EResourceDeletion(type='server', id=server_id, message=str(ex))
apache-2.0
4,121,959,655,715,227,600
37.666667
79
0.540786
false
4.157225
false
false
false
richardkiss/pycoin
pycoin/key/electrum.py
1
3426
import hashlib from .subpaths import subpaths_for_path_range from pycoin.encoding.bytes32 import from_bytes_32, to_bytes_32 from pycoin.encoding.hash import double_sha256 from pycoin.encoding.hexbytes import b2h from pycoin.key.Key import Key def initial_key_to_master_key(initial_key): """ initial_key: a hex string of length 32 """ b = initial_key.encode("utf8") orig_input = b for i in range(100000): b = hashlib.sha256(b + orig_input).digest() return from_bytes_32(b) class ElectrumWallet(Key): def __init__(self, initial_key=None, master_private_key=None, public_pair=None, master_public_key=None): if [initial_key, public_pair, master_private_key, master_public_key].count(None) != 3: raise ValueError( "exactly one of initial_key, master_private_key, master_public_key must be non-None") self._initial_key = initial_key if initial_key is not None: master_private_key = initial_key_to_master_key(initial_key) if master_public_key: public_pair = tuple(from_bytes_32(master_public_key[idx:idx+32]) for idx in (0, 32)) super(ElectrumWallet, self).__init__( secret_exponent=master_private_key, public_pair=public_pair, is_compressed=False) @classmethod def deserialize(class_, blob): if len(blob) == 32: return class_(master_private_key=from_bytes_32(blob)) if len(blob) == 64: return class_(master_public_key=blob) def serialize(self): if self._secret_exponent: return to_bytes_32(self._secret_exponent) return self.master_public_key() def secret_exponent(self): if self._secret_exponent is None and self._initial_key: self._secret_exponent = initial_key_to_master_key(b2h(self._initial_key)) return self._secret_exponent def master_private_key(self): return self.secret_exponent() def master_public_key(self): return self.sec()[1:] def public_copy(self): if self.secret_exponent() is None: return self return self.__class__(public_pair=self.public_pair()) def subkey_for_path(self, path): return self.subkey(path) def subkey(self, path): """ path: of the form "K" where K is an integer index, or "K/N" where N is usually a 0 (deposit address) or 1 (change address) """ t = path.split("/") if len(t) == 2: n, for_change = t else: n, = t for_change = 0 b = (str(n) + ':' + str(for_change) + ':').encode("utf8") + self.master_public_key() offset = from_bytes_32(double_sha256(b)) if self.secret_exponent(): return self.__class__( master_private_key=((self.master_private_key() + offset) % self._generator.order()) ) p1 = offset * self._generator x, y = self.public_pair() p2 = self._generator.Point(x, y) p = p1 + p2 return self.__class__(public_pair=p) def subkeys(self, path): """ A generalized form that can return multiple subkeys. """ for _ in subpaths_for_path_range(path, hardening_chars="'pH"): yield self.subkey(_) def __repr__(self): return "Electrum<E:%s>" % b2h(self.master_public_key())
mit
-3,823,016,807,712,825,300
33.26
108
0.593695
false
3.557632
false
false
false
Xilef11/runesofwizardry-classics
createrune.py
1
1958
import sys rune_Path = "./src/main/java/xilef11/mc/runesofwizardry_classics/runes/Rune" lang_file = "src/main/resources/assets/runesofwizardry_classics/lang/en_US.lang" runes_file = "src/main/java/xilef11/mc/runesofwizardry_classics/ModRunes.java" shortName = sys.argv[1] locName = sys.argv[2] clas = open(rune_Path+shortName+".java","w") clas.write(''' package xilef11.mc.runesofwizardry_classics.runes; import java.io.IOException; import java.util.Set; import xilef11.mc.runesofwizardry_classics.Refs; import xilef11.mc.runesofwizardry_classics.runes.entity.RuneEntityUnimplemented; import net.minecraft.item.ItemStack; import net.minecraft.util.BlockPos; import net.minecraft.util.EnumFacing; import net.minecraft.util.Vec3i; import com.zpig333.runesofwizardry.api.RuneEntity; import com.zpig333.runesofwizardry.core.rune.PatternUtils; import com.zpig333.runesofwizardry.tileentity.TileEntityDustActive; public class Rune'''+shortName+''' extends ClassicRune { @Override protected ItemStack[][] setupPattern() throws IOException { return PatternUtils.importFromJson(Refs.PATTERN_PATH+"rune'''+shortName+'''.json"); } @Override protected Vec3i setupEntityPos() { return new Vec3i(0,0,0); } @Override protected ItemStack[][] setupSacrifice() { return new ItemStack[][]{ {} }; } @Override public String getName() { return Refs.Lang.RUNE+".'''+shortName.lower()+'''"; } @Override public RuneEntity createRune(ItemStack[][] actualPattern, EnumFacing front, Set<BlockPos> dusts, TileEntityDustActive entity) { return new RuneEntityUnimplemented(actualPattern, front, dusts, entity, this); } } ''') clas.close() lang = open(lang_file,"a") lang.write('runesofwizardry_classics.rune.'+shortName.lower()+'='+locName+'\n') lang.close() #Note: This will always append to the complete end of the file runes = open(runes_file,"a") runes.write(' DustRegistry.registerRune(new Rune'+shortName+'());\n') runes.close()
gpl-3.0
5,626,017,188,564,619,000
28.223881
85
0.752809
false
2.727019
false
false
false
eudaq/eudaq-configuration
jtag_generation/others/plume/okf7/chip3/jtag_generator.py
1
4579
# JTAG files generator # calculates DAC values for different S/N cuts (3 to 12) and generates JTAG (txt) files, update creation date # by Jan Dreyling-Eschweiler, telescope-coor@desy.de # First version: 4. September 2015 # ----------------------- # modules import re import math import numpy as np import time ################################################## # hard code data input_file = "../default_jtag.txt" sensor_name = "3" # Middlepoints in DAC IVDREF2 = 98 IVDREF1A = 191 IVDREF1B = 145 IVDREF1C = 95 IVDREF1D = 73 # Thermal noise: TN THN_matA = 0.9869 THN_matB = 0.9571 THN_matC = 1.044 THN_matD = 1.065 # Fixed pattern noise: FPN FPN_matA = 0.45 FPN_matB = 0.2892 FPN_matC = 0.5206 FPN_matD = 0.4351 # Offset OFF_matA = -0.0354 OFF_matB = 0.1257 OFF_matC = 0.2244 OFF_matD = -0.005987 # slope stays constant DAC_slope = 0.25 ################################################## # Calculations # Offset in DAC units IVDREF1A_offset = -(IVDREF1A * DAC_slope) IVDREF1B_offset = -(IVDREF1B * DAC_slope) IVDREF1C_offset = -(IVDREF1C * DAC_slope) IVDREF1D_offset = -(IVDREF1D * DAC_slope) # total noise TON_matA = math.sqrt(THN_matA**2 + FPN_matA**2) TON_matB = math.sqrt(THN_matB**2 + FPN_matB**2) TON_matC = math.sqrt(THN_matC**2 + FPN_matC**2) TON_matD = math.sqrt(THN_matD**2 + FPN_matD**2) TON_avg = (TON_matA + TON_matB + TON_matC + TON_matD) / 4 #print TON_matA, TON_matB, TON_matC, TON_matD # Sigma to noise cut SN = np.array([3, 4, 5, 6, 7, 8, 9, 10, 11, 12]) # mV value VmV_matA = (TON_matA * SN) + OFF_matA VmV_matB = (TON_matB * SN) + OFF_matB VmV_matC = (TON_matC * SN) + OFF_matC VmV_matD = (TON_matD * SN) + OFF_matD #print VmV_matA, VmV_matB, VmV_matC, VmV_matD # DAC value DAC_matA = (VmV_matA - IVDREF1A_offset) / DAC_slope # np.rint DAC_matB = (VmV_matB - IVDREF1B_offset) / DAC_slope DAC_matC = (VmV_matC - IVDREF1C_offset) / DAC_slope DAC_matD = (VmV_matD - IVDREF1D_offset) / DAC_slope #print DAC_matA, DAC_matB, DAC_matC, DAC_matD # set 255 as highest value # print np.where(DAC_matD > 255) DAC_matA[np.where(DAC_matA > 255)] = 255. DAC_matB[np.where(DAC_matB > 255)] = 255. DAC_matC[np.where(DAC_matC > 255)] = 255. DAC_matD[np.where(DAC_matD > 255)] = 255. #print DAC_matA, DAC_matB, DAC_matC, DAC_matD #print str(int(round(DAC_matA[i]))), str(int(round(DAC_matB[i]))), str(int(round(DAC_matC[i]))), str(int(round(DAC_matD[i]))) # Adjust DAC values # ----------------- # e.g. DAC-vlaues (XXX) of plane 0 # line 26: XXX ; :BIAS_DAC[0][10] --> IVDREF1D # line 27: XXX ; :BIAS_DAC[0][11] --> IVDREF1C # line 28: XXX ; :BIAS_DAC[0][12] --> IVDREF1B # line 29: XXX ; :BIAS_DAC[0][13] --> IVDREF1A # line 30: XXX ; :BIAS_DAC[0][14] --> IVDREF2 for i, n in enumerate(SN): #print i, n output_file = "chip" + str(sensor_name) + "_thresh" + str(SN[i]) + ".txt" print "Write file:", output_file # IVDREF2 with open(input_file, "r") as sources: lines = sources.readlines() with open(output_file, "w") as sources: for line in lines: sources.write(re.sub(r'^(.*?)BIAS_DAC\[.\]\[14\]', str(IVDREF2) + ' ; :BIAS_DAC[0][14]', line)) # IVDREF1A with open(output_file, "r") as sources: lines = sources.readlines() with open(output_file, "w") as sources: for line in lines: sources.write(re.sub(r'^(.*?)BIAS_DAC\[.\]\[13\]', str(int(round(DAC_matA[i]))) + ' ; :BIAS_DAC[0][13]', line)) # IVDREF1B with open(output_file, "r") as sources: lines = sources.readlines() with open(output_file, "w") as sources: for line in lines: sources.write(re.sub(r'^(.*?)BIAS_DAC\[.\]\[12\]', str(int(round(DAC_matB[i]))) + ' ; :BIAS_DAC[0][12]', line)) # IVDREF1C with open(output_file, "r") as sources: lines = sources.readlines() with open(output_file, "w") as sources: for line in lines: sources.write(re.sub(r'^(.*?)BIAS_DAC\[.\]\[11\]', str(int(round(DAC_matC[i]))) + ' ; :BIAS_DAC[0][11]', line)) # IVDREF1D with open(output_file, "r") as sources: lines = sources.readlines() with open(output_file, "w") as sources: for line in lines: sources.write(re.sub(r'^(.*?)BIAS_DAC\[.\]\[10\]', str(int(round(DAC_matD[i]))) + ' ; :BIAS_DAC[0][10]', line)) # date and time with open(output_file, "r") as sources: lines = sources.readlines() with open(output_file, "w") as sources: for line in lines: sources.write(re.sub(r'^\#JTAG\_MS(.*?)$', '#JTAG_MS ' + time.strftime("%c"), line)) # summary print "Total noise average of sensor", str(sensor_name), "-->", TON_avg exit()
lgpl-3.0
-1,253,853,782,808,532,000
28.928105
125
0.605809
false
2.367632
false
false
false
ericacheong/p4_conference_central
models.py
1
4915
#!/usr/bin/env python """models.py Udacity conference server-side Python App Engine data & ProtoRPC models $Id: models.py,v 1.1 2014/05/24 22:01:10 wesc Exp $ created/forked from conferences.py by wesc on 2014 may 24 """ __author__ = 'wesc+api@google.com (Wesley Chun)' import httplib import endpoints from protorpc import messages from google.appengine.ext import ndb class ConflictException(endpoints.ServiceException): """ConflictException -- exception mapped to HTTP 409 response""" http_status = httplib.CONFLICT class Profile(ndb.Model): """Profile -- User profile object""" displayName = ndb.StringProperty() mainEmail = ndb.StringProperty() teeShirtSize = ndb.StringProperty(default='NOT_SPECIFIED') conferenceKeysToAttend = ndb.StringProperty(repeated=True) sessionKeysWishlist = ndb.StringProperty(repeated=True) class ProfileMiniForm(messages.Message): """ProfileMiniForm -- update Profile form message""" displayName = messages.StringField(1) teeShirtSize = messages.EnumField('TeeShirtSize', 2) class ProfileForm(messages.Message): """ProfileForm -- Profile outbound form message""" displayName = messages.StringField(1) mainEmail = messages.StringField(2) teeShirtSize = messages.EnumField('TeeShirtSize', 3) conferenceKeysToAttend = messages.StringField(4, repeated=True) class StringMessage(messages.Message): """StringMessage-- outbound (single) string message""" data = messages.StringField(1, required=True) class BooleanMessage(messages.Message): """BooleanMessage-- outbound Boolean value message""" data = messages.BooleanField(1) class Conference(ndb.Model): """Conference -- Conference object""" name = ndb.StringProperty(required=True) description = ndb.StringProperty() organizerUserId = ndb.StringProperty() topics = ndb.StringProperty(repeated=True) city = ndb.StringProperty() startDate = ndb.DateProperty() month = ndb.IntegerProperty() # TODO: do we need for indexing like Java? endDate = ndb.DateProperty() maxAttendees = ndb.IntegerProperty() seatsAvailable = ndb.IntegerProperty() class ConferenceForm(messages.Message): """ConferenceForm -- Conference outbound form message""" name = messages.StringField(1) description = messages.StringField(2) organizerUserId = messages.StringField(3) topics = messages.StringField(4, repeated=True) city = messages.StringField(5) startDate = messages.StringField(6) #DateTimeField() month = messages.IntegerField(7) maxAttendees = messages.IntegerField(8) seatsAvailable = messages.IntegerField(9) endDate = messages.StringField(10) #DateTimeField() websafeKey = messages.StringField(11) organizerDisplayName = messages.StringField(12) class ConferenceForms(messages.Message): """ConferenceForms -- multiple Conference outbound form message""" items = messages.MessageField(ConferenceForm, 1, repeated=True) class TeeShirtSize(messages.Enum): """TeeShirtSize -- t-shirt size enumeration value""" NOT_SPECIFIED = 1 XS_M = 2 XS_W = 3 S_M = 4 S_W = 5 M_M = 6 M_W = 7 L_M = 8 L_W = 9 XL_M = 10 XL_W = 11 XXL_M = 12 XXL_W = 13 XXXL_M = 14 XXXL_W = 15 class ConferenceQueryForm(messages.Message): """ConferenceQueryForm -- Conference query inbound form message""" field = messages.StringField(1) operator = messages.StringField(2) value = messages.StringField(3) class ConferenceQueryForms(messages.Message): """ConferenceQueryForms -- multiple ConferenceQueryForm inbound form message""" filters = messages.MessageField(ConferenceQueryForm, 1, repeated=True) class Session(ndb.Model): """Session -- Session object""" name = ndb.StringProperty(required=True) highlights = ndb.StringProperty() speakers = ndb.StringProperty(repeated=True) duration = ndb.IntegerProperty() # Duration in minutes typeOfSession = ndb.StringProperty() date = ndb.DateProperty() startTime = ndb.TimeProperty() class SessionForm(messages.Message): """SessionForm -- Session outbound form message""" name = messages.StringField(1) highlights = messages.StringField(2) speakers = messages.StringField(3, repeated=True) duration = messages.IntegerField(4) typeOfSession = messages.StringField(5) date = messages.StringField(6) startTime = messages.StringField(7) websafeKey = messages.StringField(8) class SessionForms(messages.Message): """SessionForms -- multiple Session outbound form message""" items = messages.MessageField(SessionForm, 1, repeated=True)
apache-2.0
8,344,834,078,810,159,000
34.359712
86
0.684842
false
3.913217
false
false
false
mastizada/kuma
kuma/search/models.py
1
15211
# -*- coding: utf-8 -*- import operator from django.conf import settings from django.contrib.contenttypes.models import ContentType from django.contrib.contenttypes import generic from django.db import models from django.db.models.signals import post_delete from django.utils.html import strip_tags from django.utils import timezone from django.utils.functional import cached_property from django.template.defaultfilters import slugify from elasticutils.contrib.django import MappingType, Indexable from elasticutils.contrib.django.tasks import index_objects from kuma.core.managers import PrefetchTaggableManager from kuma.core.urlresolvers import reverse from kuma.wiki.models import Document from .decorators import register_mapping_type from .queries import DocumentS from .signals import delete_index class IndexManager(models.Manager): """ The model manager to implement a couple of useful methods for handling search indexes. """ def get_current(self): try: return (self.filter(promoted=True, populated=True) .order_by('-created_at'))[0] except (self.model.DoesNotExist, IndexError, AttributeError): fallback_name = settings.ES_INDEXES['default'] return Index(name=fallback_name, populated=True, promoted=True) class Index(models.Model): """ Model to store a bunch of metadata about search indexes including a way to promote it to be picked up as the "current" one. """ created_at = models.DateTimeField(default=timezone.now) name = models.CharField(max_length=30, blank=True, null=True, help_text='The search index name, set to ' 'the created date when left empty') promoted = models.BooleanField(default=False) populated = models.BooleanField(default=False) objects = IndexManager() class Meta: verbose_name = 'Index' verbose_name_plural = 'Indexes' ordering = ['-created_at'] def save(self, *args, **kwargs): if not self.name: self.name = self.created_at.strftime('%Y-%m-%d-%H-%M-%S') super(Index, self).save(*args, **kwargs) def __unicode__(self): return self.name @cached_property def successor(self): try: return self.get_next_by_created_at() except (Index.DoesNotExist, ValueError): return None @cached_property def prefixed_name(self): "The name to use for the search index in ES" return '%s-%s' % (settings.ES_INDEX_PREFIX, self.name) def populate(self): from .tasks import populate_index populate_index.delay(self.pk) def record_outdated(self, instance): if self.successor: return OutdatedObject.objects.create(index=self.successor, content_object=instance) def promote(self): rescheduled = [] for outdated_object in self.outdated_objects.all(): instance = outdated_object.content_object label = ('%s.%s.%s' % (outdated_object.content_type.natural_key() + (instance.id,))) # gives us 'wiki.document.12345' if label in rescheduled: continue mappping_type = instance.get_mapping_type() index_objects.delay(mappping_type, [instance.id]) rescheduled.append(label) self.outdated_objects.all().delete() self.promoted = True self.save() def demote(self): self.promoted = False self.save() post_delete.connect(delete_index, sender=Index, dispatch_uid='search.index.delete') class OutdatedObject(models.Model): index = models.ForeignKey(Index, related_name='outdated_objects') created_at = models.DateTimeField(default=timezone.now) content_type = models.ForeignKey(ContentType) object_id = models.PositiveIntegerField() content_object = generic.GenericForeignKey('content_type', 'object_id') class FilterGroup(models.Model): """ A way to group different kinds of filters from each other. """ name = models.CharField(max_length=255) slug = models.CharField(max_length=255, blank=True, null=True, help_text='the slug to be used as the name of the ' 'query parameter in the search URL') order = models.IntegerField(default=1, help_text='An integer defining which order ' 'the filter group should show up ' 'in the sidebar') class Meta: ordering = ('-order', 'name') unique_together = ( ('name', 'slug'), ) def save(self, *args, **kwargs): if not self.slug: self.slug = slugify(self.name) super(FilterGroup, self).save(*args, **kwargs) def __unicode__(self): return self.name class FilterManager(models.Manager): use_for_related_fields = True def visible_only(self): return self.filter(visible=True) class Filter(models.Model): """ The model to store custom search filters in the database. This is used to dynamically tweak the search filters available to users. """ OPERATOR_AND = 'AND' OPERATOR_OR = 'OR' OPERATOR_CHOICES = ( (OPERATOR_OR, OPERATOR_OR), (OPERATOR_AND, OPERATOR_AND), ) OPERATORS = { OPERATOR_OR: operator.or_, OPERATOR_AND: operator.and_, } name = models.CharField(max_length=255, db_index=True, help_text='the English name of the filter ' 'to be shown in the frontend UI') slug = models.CharField(max_length=255, db_index=True, help_text='the slug to be used as a query ' 'parameter in the search URL') shortcut = models.CharField(max_length=255, db_index=True, null=True, blank=True, help_text='the name of the shortcut to ' 'show in the command and query UI. ' 'e.g. fxos') group = models.ForeignKey(FilterGroup, related_name='filters', help_text='E.g. "Topic", "Skill level" etc') tags = PrefetchTaggableManager(help_text='A comma-separated list of tags. ' 'If more than one tag given a OR ' 'query is executed') operator = models.CharField(max_length=3, choices=OPERATOR_CHOICES, default=OPERATOR_OR, help_text='The logical operator to use ' 'if more than one tag is given') enabled = models.BooleanField(default=True, help_text='Whether this filter is shown ' 'to users or not.') visible = models.BooleanField(default=True, help_text='Whether this filter is shown ' 'at public places, e.g. the ' 'command and query UI') objects = FilterManager() class Meta(object): unique_together = ( ('name', 'slug'), ) def __unicode__(self): return self.name def get_absolute_url(self): path = reverse('search', locale=settings.LANGUAGE_CODE) return '%s%s?%s=%s' % (settings.SITE_URL, path, self.group.slug, self.slug) @register_mapping_type class DocumentType(MappingType, Indexable): excerpt_fields = ['summary', 'content'] exclude_slugs = ['Talk:', 'User:', 'User_talk:', 'Template_talk:', 'Project_talk:'] @classmethod def get_model(cls): return Document @classmethod def get_index(cls): return Index.objects.get_current().prefixed_name @classmethod def search(cls): """Returns a typed S for this class. :returns: an `S` for this DjangoMappingType """ return DocumentS(cls) @classmethod def get_analysis(cls): return { 'filter': { 'kuma_word_delimiter': { 'type': 'word_delimiter', 'preserve_original': True, # hi-fi -> hifi, hi-fi 'catenate_words': True, # hi-fi -> hifi 'catenate_numbers': True, # 90-210 -> 90210 } }, 'analyzer': { 'default': { 'tokenizer': 'standard', 'filter': ['standard', 'elision'] }, # a custom analyzer that strips html and uses our own # word delimiter filter and the elision filter# # (e.g. L'attribut -> attribut). The rest is the same as # the snowball analyzer 'kuma_content': { 'type': 'custom', 'tokenizer': 'standard', 'char_filter': ['html_strip'], 'filter': [ 'elision', 'kuma_word_delimiter', 'lowercase', 'standard', 'stop', 'snowball', ], }, 'kuma_title': { 'type': 'custom', 'tokenizer': 'standard', 'filter': [ 'elision', 'kuma_word_delimiter', 'lowercase', 'standard', 'snowball', ], }, 'case_sensitive': { 'type': 'custom', 'tokenizer': 'keyword' }, 'caseInsensitiveKeyword': { 'type': 'custom', 'tokenizer': 'keyword', 'filter': 'lowercase' } }, } @classmethod def get_mapping(cls): return { # try to not waste so much space '_all': {'enabled': False}, '_boost': {'name': '_boost', 'null_value': 1.0, 'type': 'float'}, 'content': { 'type': 'string', 'analyzer': 'kuma_content', # faster highlighting 'term_vector': 'with_positions_offsets', }, 'id': {'type': 'long', 'index': 'not_analyzed'}, 'locale': {'type': 'string', 'index': 'not_analyzed'}, 'modified': {'type': 'date'}, 'slug': {'type': 'string', 'index': 'not_analyzed'}, 'parent': { 'type': 'nested', 'properties': { 'id': {'type': 'long', 'index': 'not_analyzed'}, 'title': {'type': 'string', 'analyzer': 'kuma_title'}, 'slug': {'type': 'string', 'index': 'not_analyzed'}, 'locale': {'type': 'string', 'index': 'not_analyzed'}, } }, 'summary': { 'type': 'string', 'analyzer': 'kuma_content', # faster highlighting 'term_vector': 'with_positions_offsets', }, 'tags': {'type': 'string', 'analyzer': 'case_sensitive'}, 'title': { 'type': 'string', 'analyzer': 'kuma_title', 'boost': 1.2, # the title usually has the best description }, 'kumascript_macros': { 'type': 'string', 'analyzer': 'caseInsensitiveKeyword' }, 'css_classnames': { 'type': 'string', 'analyzer': 'caseInsensitiveKeyword' }, 'html_attributes': { 'type': 'string', 'analyzer': 'caseInsensitiveKeyword' }, } @classmethod def extract_document(cls, obj_id, obj=None): if obj is None: obj = cls.get_model().objects.get(pk=obj_id) doc = { 'id': obj.id, 'title': obj.title, 'slug': obj.slug, 'summary': obj.get_summary(strip_markup=True), 'locale': obj.locale, 'modified': obj.modified, 'content': strip_tags(obj.rendered_html), 'tags': list(obj.tags.values_list('name', flat=True)), 'kumascript_macros': obj.extract_kumascript_macro_names(), 'css_classnames': obj.extract_css_classnames(), 'html_attributes': obj.extract_html_attributes(), } if obj.zones.exists(): # boost all documents that are a zone doc['_boost'] = 8.0 elif obj.slug.split('/') == 1: # a little boost if no zone but still first level doc['_boost'] = 4.0 else: doc['_boost'] = 1.0 if obj.parent: doc['parent'] = { 'id': obj.parent.id, 'title': obj.parent.title, 'locale': obj.parent.locale, 'slug': obj.parent.slug, } else: doc['parent'] = {} return doc @classmethod def get_indexable(cls): """ For this mapping type return a list of model IDs that should be indexed with the management command, in a full reindex. WARNING: When changing this code make sure to update the ``should_update`` method below, too! """ model = cls.get_model() excludes = [] for exclude in cls.exclude_slugs: excludes.append(models.Q(slug__icontains=exclude)) return (model.objects .filter(is_template=False, is_redirect=False, deleted=False) .exclude(reduce(operator.or_, excludes)) .values_list('id', flat=True)) @classmethod def should_update(cls, obj): """ Given a Document instance should return boolean value whether the instance should be indexed or not. WARNING: This *must* mirror the logic of the ``get_indexable`` method above! """ return (not obj.is_template and not obj.is_redirect and not obj.deleted and not any([exclude in obj.slug for exclude in cls.exclude_slugs])) def get_excerpt(self): for field in self.excerpt_fields: if field in self.es_meta.highlight: return u'…'.join(self.es_meta.highlight[field]) return self.summary
mpl-2.0
168,948,031,515,714,620
35.125891
79
0.509369
false
4.515736
false
false
false
deerwalk/voltdb
lib/python/voltcli/voltadmin.d/stop.py
1
3134
# This file is part of VoltDB. # Copyright (C) 2008-2017 VoltDB Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with VoltDB. If not, see <http://www.gnu.org/licenses/>. # Stop a node. Written to easily support multiple, but configured for # a single host for now. from voltcli.hostinfo import Host from voltcli.hostinfo import Hosts from voltcli import utility import sys @VOLT.Command( bundles = VOLT.AdminBundle(), description = 'Stop one host of a running VoltDB cluster.', arguments = ( VOLT.StringArgument('target_host', 'the target hostname[:port] or address[:port]. (default port=3021)'), ), ) def stop(runner): # Exec @SystemInformation to find out about the cluster. response = runner.call_proc('@SystemInformation', [VOLT.FastSerializer.VOLTTYPE_STRING], ['OVERVIEW']) # Convert @SystemInformation results to objects. hosts = Hosts(runner.abort) for tuple in response.table(0).tuples(): hosts.update(tuple[0], tuple[1], tuple[2]) # Connect to an arbitrary host that isn't being stopped. defaultport = 3021 min_hosts = 1 max_hosts = 1 target_host = utility.parse_hosts(runner.opts.target_host, min_hosts, max_hosts, defaultport)[0] (thost, chost) = hosts.get_target_and_connection_host(target_host.host, target_host.port) if thost is None: runner.abort('Host not found in cluster: %s:%d' % (target_host.host, target_host.port)) if chost is None: runner.abort('The entire cluster is being stopped, use "shutdown" instead.') if runner.opts.username: user_info = ', user: %s' % runner.opts.username else: user_info = '' runner.info('Connecting to %s:%d%s (%s) to issue "stop" command' % (chost.get_admininterface(), chost.adminport, user_info, chost.hostname)) runner.voltdb_connect(chost.get_admininterface(), chost.adminport, runner.opts.username, runner.opts.password, runner.opts.ssl_config) # Stop the requested host using exec @StopNode HOST_ID runner.info('Stopping host %d: %s:%s' % (thost.id, thost.hostname, thost.internalport)) if not runner.opts.dryrun: response = runner.call_proc('@StopNode', [VOLT.FastSerializer.VOLTTYPE_INTEGER], [thost.id], check_status=False) print response if response.status() != 1: # not SUCCESS sys.exit(1)
agpl-3.0
2,137,626,913,957,797,000
40.786667
112
0.64933
false
3.859606
false
false
false
13steinj/praw
praw/objector.py
1
6362
"""Provides the Objector class.""" import re from .exceptions import APIException class Objector(object): """The objector builds :class:`.RedditBase` objects.""" @staticmethod def _camel_to_snake(name): """Return `name` converted from camelCase to snake_case. Code from http://stackoverflow.com/a/1176023/. """ first_break_replaced = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) return re.sub( '([a-z0-9])([A-Z])', r'\1_\2', first_break_replaced).lower() @classmethod def _snake_case_keys(cls, dictionary): """Return a copy of dictionary with keys converted to snake_case. :param dictionary: The dict to be corrected. """ return {cls._camel_to_snake(k): v for k, v in dictionary.items()} def __init__(self, reddit): """Initialize an Objector instance. :param reddit: An instance of :class:`~.Reddit`. """ self.parsers = {} self._reddit = reddit def kind(self, instance): """Return the kind from the instance class. :param instance: An instance of a subclass of RedditBase. """ retval = None for key in self.parsers: if isinstance(instance, self.parsers[key]): retval = key break return retval def _objectify_dict(self, data): """Create RedditBase objects from dicts. :param data: The structured data, assumed to be a dict. :returns: An instance of :class:`~.RedditBase`. """ if {'conversation', 'messages', 'modActions'}.issubset(data): parser = self.parsers['ModmailConversation'] elif {'actionTypeId', 'author', 'date'}.issubset(data): # Modmail mod action data = self._snake_case_keys(data) parser = self.parsers['ModmailAction'] elif {'bodyMarkdown', 'isInternal'}.issubset(data): # Modmail message data = self._snake_case_keys(data) parser = self.parsers['ModmailMessage'] elif {'isAdmin', 'isDeleted'}.issubset(data): # Modmail author data = self._snake_case_keys(data) # Prevent clobbering base-36 id del data['id'] data['is_subreddit_mod'] = data.pop('is_mod') parser = self.parsers[self._reddit.config.kinds['redditor']] elif {'banStatus', 'muteStatus', 'recentComments'}.issubset(data): # Modmail user data = self._snake_case_keys(data) data['created_string'] = data.pop('created') parser = self.parsers[self._reddit.config.kinds['redditor']] elif {'displayName', 'id', 'type'}.issubset(data): # Modmail subreddit data = self._snake_case_keys(data) parser = self.parsers[self._reddit.config.kinds[data['type']]] elif ({'date', 'id', 'name'}.issubset(data) or {'id', 'name', 'permissions'}.issubset(data)): parser = self.parsers[self._reddit.config.kinds['redditor']] elif {'text', 'url'}.issubset(data): if 'color' in data or 'linkUrl' in data: parser = self.parsers['Button'] else: parser = self.parsers['MenuLink'] elif {'children', 'text'}.issubset(data): parser = self.parsers['Submenu'] elif {'height', 'url', 'width'}.issubset(data): parser = self.parsers['Image'] elif {'isSubscribed', 'name', 'subscribers'}.issubset(data): # discards icon and subscribed information return self._reddit.subreddit(data['name']) elif {'authorFlairType', 'name'}.issubset(data): # discards flair information return self._reddit.redditor(data['name']) elif {'parent_id'}.issubset(data): parser = self.parsers[self._reddit.config.kinds['comment']] else: if 'user' in data: parser = self.parsers[self._reddit.config.kinds['redditor']] data['user'] = parser.parse({'name': data['user']}, self._reddit) return data return parser.parse(data, self._reddit) def objectify(self, data): """Create RedditBase objects from data. :param data: The structured data. :returns: An instance of :class:`~.RedditBase`, or ``None`` if given ``data`` is ``None``. """ # pylint: disable=too-many-return-statements if data is None: # 204 no content return None if isinstance(data, list): return [self.objectify(item) for item in data] if 'kind' in data and ('shortName' in data or data['kind'] in ('menu', 'moderators')): # This is a widget parser = self.parsers.get(data['kind'], self.parsers['widget']) return parser.parse(data, self._reddit) elif {'kind', 'data'}.issubset(data) and data['kind'] in self.parsers: parser = self.parsers[data['kind']] return parser.parse(data['data'], self._reddit) elif 'json' in data and 'data' in data['json']: if 'things' in data['json']['data']: # Submission.reply return self.objectify(data['json']['data']['things']) if 'url' in data['json']['data']: # Subreddit.submit # The URL is the URL to the submission, so it's removed. del data['json']['data']['url'] parser = self.parsers[self._reddit.config.kinds['submission']] else: parser = self.parsers['LiveUpdateEvent'] return parser.parse(data['json']['data'], self._reddit) elif 'json' in data and 'errors' in data['json']: errors = data['json']['errors'] if len(errors) == 1: raise APIException(*errors[0]) assert not errors elif isinstance(data, dict): return self._objectify_dict(data) return data def register(self, kind, cls): """Register a class for a given kind. :param kind: The kind in the parsed data to map to ``cls``. :param cls: A RedditBase class. """ self.parsers[kind] = cls
bsd-2-clause
7,777,390,700,536,816,000
38.271605
78
0.553128
false
4.088689
true
false
false
vroomfondle/podi
app/controllers/play_controller.py
1
6940
""" Podi, a command-line interface for Kodi. Copyright (C) 2015 Peter Frost <slimeypete@gmail.com> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ from cement.core import controller from lib.podi.rpc.library.movies import list_movies from lib.podi.rpc.player import play_movie, play_episode,\ enable_subtitles, select_subtitle, list_active_players, select_audio, pause_unpause_player from app.errors import JSONResponseError, NoMediaError, MissingArgumentError import argparse class PlayController(controller.CementBaseController): """ Sends RPC calls to Kodi to request playback of media items. """ class Meta: """ Defines metadata for use by the Cement framework. """ label = 'play' description = "Trigger playback of a given media item "\ "(if no item is specified, any currently-playing media items will be paused or unpaused)." stacked_on = 'base' stacked_type = 'nested' arguments = [(['positional_arguments'], dict( action='store', nargs='*', help=argparse.SUPPRESS),), ] @controller.expose(hide=True) def default(self): """ Called when the user uses the 'play' command without arguments. Requests resumption of playback of the current media item. """ players = self.app.send_rpc_request(list_active_players()) for player in players: self.app.log.info("Pausing/unpausing {0}".format(player['type'])) self.app.send_rpc_request(pause_unpause_player(player['playerid'])) if len(players) == 0: raise MissingArgumentError( "No media item was specified for playback, and there are no currently-playing media items to pause/unpause.") @controller.expose(aliases=['movies', 'film', 'films'], help='Play a movie. You must provide a movie id number, e.g.: play movie 127') def movie(self): """ Instructs Kodi to play the movie specified by the user. """ try: movie_id = self.app.pargs.positional_arguments[0] except IndexError: raise MissingArgumentError( 'You must provide a movie id number, e.g.: play movie 127') self.app.log.info("Playing movie {0}".format( movie_id)) try: self.app.send_rpc_request(play_movie(movie_id)) except JSONResponseError as err: if err.error_code == -32602: raise NoMediaError( "Kodi returned an 'invalid parameters' error; this movie may not exist? " "Use 'list episodes' to see available episodes.") else: raise err @controller.expose(aliases=['tvepisode', 'tv_episode'], help='Play a TV episode. You must provide an episode id number, e.g.: play episode 1340') def episode(self): """ Instructs Kodi to play the TV episode specified by the user. """ try: tv_episode_id = self.app.pargs.positional_arguments[0] except IndexError as err: raise MissingArgumentError( 'You must provide an episode id number, e.g.: play movie 127') self.app.log.info("Playing episode {0}".format(tv_episode_id)) try: self.app.send_rpc_request(play_episode(tv_episode_id)) except JSONResponseError as err: if err.error_code == -32602: raise NoMediaError( "Kodi returned an 'invalid parameters' error; this episode may not exist? " "Use 'list episodes' to see available episodes.") else: raise err @controller.expose( aliases=['subtitles'], help="Show subtitles. You must provide a subtitle stream id (e.g. play subtitles 2). Use "\ "\"inspect player\" to see a list of available streams.") def subtitle(self): """ Instructs Kodi to display the subtitle track specified by the user. """ try: subtitle_id = self.app.pargs.positional_arguments[0] except IndexError as err: raise MissingArgumentError( "You must provide a subtitle id number, e.g.: play subtitle 2. Use \"inspect player\"" " to see a list of available subtitle streams.") for player in self.app.send_rpc_request(list_active_players()): try: self.app.send_rpc_request(enable_subtitles(player['playerid'])) self.app.send_rpc_request( select_subtitle(subtitle_id, player['playerid'])) except JSONResponseError as err: if err.error_code == -32602: raise NoMediaError( "Kodi returned an 'invalid parameters' error; this stream may not exist? Use " "\"inspect player\" to see a list of available streams.") else: raise err @controller.expose( aliases=['audio_stream'], help="Select an audio stream for the currently-playing video. You must provide a audio stream "\ "id (e.g. play audio 2). Use \"inspect player\" to see a list of available audio streams.") def audio(self): """ Instructs Kodi to play the audio track specified by the user. """ try: audio_id = self.app.pargs.positional_arguments[0] except IndexError as err: raise MissingArgumentError( "You must provide a audio id number, e.g.: play audio 2. Use \"inspect player\" to see " "a list of available audio streams.") for player in self.app.send_rpc_request(list_active_players()): try: self.app.send_rpc_request( select_audio(audio_id, player['playerid'])) except JSONResponseError as err: if err.error_code == -32602: raise NoMediaError( "Kodi returned an 'invalid parameters' error; this stream may not exist? Use " "\"inspect player\" to see a list of available streams.") else: raise err
gpl-3.0
7,164,990,353,190,180,000
42.375
125
0.598847
false
4.497732
false
false
false
shailcoolboy/Warp-Trinity
ResearchApps/Measurement/examples/TxPower_vs_BER/TxPower_vs_BER.py
2
3070
from warpnet_client import * from warpnet_common_params import * from warpnet_experiment_structs import * from twisted.internet import reactor from datetime import * import time minTime = 10 pktLen = 1412 pktPeriod = 2000 mod_hdr = 2 mod_payload = 2 txGains = [30, 45, 60]; class ScriptMaster: def startup(self): er_log = DataLogger('twoNode_PER_Test_v0.m', flushTime=0) er_log.log("%%WARPnet PER Test Example - %s\r\n" % datetime.now()) registerWithServer() nodes = dict() #WARP Nodes createNode(nodes, Node(0, NODE_PCAP)) createNode(nodes, Node(1, NODE_PCAP)) #Node entry for the BER processor app createNode(nodes, Node(99, NODE_PCAP)) connectToServer(nodes) controlStruct = ControlStruct() nodes[0].addStruct('controlStruct', controlStruct) nodes[1].addStruct('controlStruct', controlStruct) cmdStructBERen = CommandStruct(COMMANDID_ENABLE_BER_TESTING, 0) nodes[0].addStruct('cmdStructBERen', cmdStructBERen) nodes[1].addStruct('cmdStructBERen', cmdStructBERen) cmdStructStart = CommandStruct(COMMANDID_STARTTRIAL, 0) nodes[0].addStruct('cmdStructStart', cmdStructStart) cmdStructStop = CommandStruct(COMMANDID_STOPTRIAL, 0) nodes[0].addStruct('cmdStructStop', cmdStructStop) berStruct = ObserveBERStruct() nodes[99].addStruct('berStruct', berStruct, handleUnrequested=True) sendRegistrations(nodes) controlStruct.packetGeneratorPeriod = pktPeriod controlStruct.packetGeneratorLength = pktLen controlStruct.channel = 9 controlStruct.txPower = 63 controlStruct.modOrderHeader = mod_hdr controlStruct.modOrderPayload = mod_payload nodes[0].sendToNode('controlStruct') nodes[1].sendToNode('controlStruct') nodes[0].sendToNode('cmdStructBERen') nodes[1].sendToNode('cmdStructBERen') #Experiment loop for ii, txGain in enumerate(txGains): print("Starting trial %d with TxGain %d at %s" % (ii, txGain, datetime.now())) #Stop any traffic that might be running nodes[0].sendToNode('cmdStructStop') #Update the Tx gain at the Tx node controlStruct.txPower = txGain nodes[0].sendToNode('controlStruct') #Clear the internal BER counters berStruct.clearBitCounts() #Let things settle time.sleep(0.25) #Start the trial nodes[0].sendToNode('cmdStructStart') #Run until minTime elapses time.sleep(minTime) nodes[0].sendToNode('cmdStructStop') #Give the nodes and server time to process any final structs time.sleep(1) #Record the results er_log.log("n0_txGain(%d) = %d;\t" % (ii+1, txGain)) er_log.log("n1_bitsRx(%d) = %d;\t" % (ii+1, berStruct.totalBitsReceived)) er_log.log("n1_bitErrs(%d) = %d;\r\n" % (ii+1, berStruct.totalBitErrors)) print("############################################") print("############# Experiment Done! #############") print("############################################") reactor.callFromThread(reactor.stop) sm = ScriptMaster() stdio.StandardIO(CmdReader()) factory = WARPnetClient(sm.startup); reactor.connectTCP('localhost', 10101, factory) reactor.run()
bsd-2-clause
5,196,952,186,976,665,000
27.425926
81
0.695114
false
3.10101
false
false
false
yangjincai/Xq2EFT
test_eft_calculator.py
1
3471
#!/usr/bin/env python2 import numpy as np from time import time import heapq from matplotlib import pyplot as plt from eft_calculator import EFT_calculator, Water import tools def load_coordinates(name): lines = open('test.dat/random/'+name).readlines()[-7:-1] coors = [[float(item) for item in line.split()[2:5]] for line in lines] return np.array(coors) class Classical_calculator: def __init__(self): self.eps = [0.12, 0.046, 0.046] self.sigma = [1.7, 0.2245, 0.2245] self.charge = [-0.834, 0.417, 0.417] def eval(self, coors): mol = Water() coor0 = coors[:3] coor1 = coors[3:] e = 0. f = np.zeros(3) t = np.zeros(3) com1 = mol.getCOM(coor1) eps, sigma, charge = self.eps, self.sigma, self.charge for i in range(3): for j in range(3): ener, force = self.atomicEF(coor0[i], eps[i], sigma[i], charge[i], coor1[j], eps[j], sigma[j], charge[j]) e += ener f += force t += np.cross(coor1[j]-com1, force) return np.array([e, f[0], f[1], f[2], t[0], t[1], t[2]]) def atomicEF(self, x0, e0, s0, q0, x1, e1, s1, q1): k = 138.935456 e = np.sqrt(e0 * e1) s = s0 + s1 r = np.linalg.norm(x0 - x1) sor6 = (s/r) ** 6 evdw = e * (sor6**2 - 2 * sor6) fvdw = e / r**2 * sor6 * (sor6 - 1) * (x1 - x0) eelec = k * q0 * q1 / r felec = k * q0 * q1 / r**3 * (x1 - x0) ener = evdw + eelec force = fvdw + felec return ener, force def test_random_set(): e0 = [] e1 = [] fce0 = [] fce1 = [] trq0 = [] trq1 = [] all = [] t1 = time() for i in range(2, 2000): # load atomic coor name = 'test%04d.inp' % i coors = load_coordinates(name) # evaluate with analytical function eft = cc.eval(coors) e0.append(eft[0]) fce0 += list(eft[1:4]) trq0 += list(eft[4:7]) # convert atomic coor to r, phi, theta... X0, q0 = calculator.mol.atomic2Xq(coors[:3]) X1, q1 = calculator.mol.atomic2Xq(coors[3:]) # evaluate with calculator eft = calculator.eval(X0, q0, X1, q1) e1.append(eft[0]) fce1 += list(eft[1:4]) trq1 += list(eft[4:7]) #all.append((-np.abs(e0[-1]-e1[-1]), name)) all.append((-np.linalg.norm(np.array(fce0) - np.array(fce1)), name)) t2 = time() print 'took %.1f s to evaluate the random set' % (t2 - t1) heapq.heapify(all) #for i in range(3): # de, name = heapq.heappop(all) # print -de, name # make a plot _, axarr = plt.subplots(1, 3) p = np.corrcoef(e0, e1)[0, 1] print "Energy: p =", p axarr[0].scatter(e0, e1) axarr[0].text(0, 0, 'p=%.4f'%p) p = np.corrcoef(fce0, fce1)[0, 1] print "Force: p =", p axarr[1].scatter(fce0, fce1) axarr[1].text(0, 0, 'p=%.4f'%p) p = np.corrcoef(trq0, trq1)[0, 1] print "Torque: p =", p axarr[2].scatter(trq0, trq1) axarr[2].text(0, 0, 'p=%.4f'%p) plt.savefig('corr.png') if __name__ == '__main__': order = 3 calculator = EFT_calculator(order) t0 = time() cc = Classical_calculator() #calculator.setup('grid_data.txt') calculator.setup() calculator.fill_grid(cc) t1 = time() print 'took %.1f s to fill the grid' % (t1 - t0) test_random_set()
apache-2.0
7,625,391,754,920,505,000
28.666667
121
0.518294
false
2.6557
false
false
false
mariocesar/django-startup
startup/accounts/models.py
1
2514
# coding=utf-8 from django.core.mail import send_mail from django.db import models from django.core import validators from django.conf import settings from django.utils.translation import ugettext_lazy as _ from django.utils import timezone from django.contrib.auth.models import AbstractBaseUser, UserManager, PermissionsMixin class User(AbstractBaseUser, PermissionsMixin): """ Users within the Django authentication system are represented by this model. Username, password and email are required. Other fields are optional. """ username = models.CharField( _('username'), max_length=30, unique=True, help_text=_('Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.'), validators=[validators.RegexValidator(r'^[\w.@+-]+$', _('Enter a valid username.'), 'invalid')] ) first_name = models.CharField(_('first name'), max_length=30, blank=True) last_name = models.CharField(_('last name'), max_length=30, blank=True) email = models.EmailField(_('email address'), blank=True) photo = models.ImageField(upload_to='users', blank=True, null=True) is_staff = models.BooleanField( _('staff status'), default=False, help_text=_('Designates whether the user can log into this admin ' 'site.')) is_active = models.BooleanField( _('active'), default=True, help_text=_('Designates whether this user should be treated as ' 'active. Unselect this instead of deleting accounts.')) date_joined = models.DateTimeField(_('date joined'), default=timezone.now) objects = UserManager() USERNAME_FIELD = 'username' REQUIRED_FIELDS = ['email'] class Meta: verbose_name = _('user') verbose_name_plural = _('users') @property def full_name(self): return self.get_full_name() def get_absolute_url(self): return settings.LOGIN_REDIRECT_URL def get_full_name(self): """ Returns the first_name plus the last_name, with a space in between. """ full_name = '%s %s' % (self.first_name, self.last_name) return full_name.strip() def get_short_name(self): "Returns the short name for the user." return self.first_name def email_user(self, subject, message, from_email=None, **kwargs): """ Sends an email to this User. """ send_mail(subject, message, from_email, [self.email], **kwargs)
mit
7,783,932,481,574,590,000
31.649351
103
0.639618
false
4.114566
false
false
false
elentarion/RatticWeb
cred/migrations/0035_auto__add_field_cred_expire_time.py
1
7806
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Cred.expire_time' db.add_column(u'cred_cred', 'expire_time', self.gf('django.db.models.fields.PositiveIntegerField')(default=None, null=True, blank=True), keep_default=False) def backwards(self, orm): # Deleting field 'Cred.expire_time' db.delete_column(u'cred_cred', 'expire_time') models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'cred.cred': { 'Meta': {'object_name': 'Cred'}, 'attachment': ('cred.fields.SizedFileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'attachment_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'descriptionmarkdown': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'expire_time': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}), 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']"}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'child_creds'", 'default': 'None', 'to': u"orm['auth.Group']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}), 'iconname': ('django.db.models.fields.CharField', [], {'default': "'Key.png'", 'max_length': '64'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'latest': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'history'", 'null': 'True', 'to': u"orm['cred.Cred']"}), 'modified': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}), 'ssh_key': ('cred.fields.SizedFileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'ssh_key_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}), 'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'child_creds'", 'default': 'None', 'to': u"orm['cred.Tag']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '250', 'null': 'True', 'blank': 'True'}) }, u'cred.credaudit': { 'Meta': {'ordering': "('-time',)", 'object_name': 'CredAudit'}, 'audittype': ('django.db.models.fields.CharField', [], {'max_length': '1'}), 'cred': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['cred.Cred']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'credlogs'", 'to': u"orm['auth.User']"}) }, u'cred.credchangeq': { 'Meta': {'object_name': 'CredChangeQ'}, 'cred': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cred.Cred']", 'unique': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}) }, u'cred.tag': { 'Meta': {'object_name': 'Tag'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}) } } complete_apps = ['cred']
gpl-2.0
8,432,225,457,294,191,000
74.796117
217
0.549321
false
3.557885
false
false
false
birkin/rapid_exports
rapid_app/models.py
1
6176
# -*- coding: utf-8 -*- from __future__ import unicode_literals import codecs, csv, datetime, ftplib, itertools, json, logging, operator, os, pprint, shutil, time, zipfile import MySQLdb # really pymysql; see config/__init__.py import requests from django.conf import settings as project_settings from django.core.urlresolvers import reverse from django.db import models from django.http import HttpResponse, HttpResponseRedirect from django.shortcuts import render from django.utils.encoding import smart_unicode from django.utils.text import slugify from rapid_app import settings_app from sqlalchemy import create_engine as alchemy_create_engine log = logging.getLogger(__name__) ###################### ## django db models ## ###################### class PrintTitleDev( models.Model ): """ Shows the dev db as it _will_ be populated. Db will is populated by another admin task. """ key = models.CharField( max_length=20, primary_key=True ) issn = models.CharField( max_length=15 ) start = models.IntegerField() end = models.IntegerField( blank=True, null=True ) location = models.CharField( 'other--location', max_length=25, blank=True, null=True ) building = models.CharField( max_length=25, blank=True, null=True ) call_number = models.CharField( max_length=50, blank=True, null=True ) date_updated = models.DateTimeField( 'other--date-updated', auto_now=True ) title = models.TextField( 'ss--title', blank=True, null=True ) url = models.TextField( 'ss--url', blank=True, null=True ) def __unicode__(self): return '%s__%s_to_%s' % ( self.issn, self.start, self.end ) # end class PrintTitleDev class ProcessorTracker( models.Model ): """ Tracks current-status and recent-processing. """ current_status = models.CharField( max_length=50, blank=True, null=True ) processing_started = models.DateTimeField( blank=True, null=True ) processing_ended = models.DateTimeField( blank=True, null=True ) recent_processing = models.TextField( blank=True, null=True ) def __unicode__(self): return '{status}__{started}'.format( status=self.current_status, started=self.processing_started ) class Meta: verbose_name_plural = "Processor Tracker" # end class PrintTitleDev ##################### ## regular classes ## ##################### class RapidFileGrabber( object ): """ Transfers Rapid's prepared file from remote to local location. Not-django class. """ def __init__( self, remote_server_name, remote_server_port, remote_server_username, remote_server_password, remote_filepath, local_destination_filepath, local_destination_extract_directory ): self.remote_server_name = remote_server_name self.remote_server_port = remote_server_port self.remote_server_username = remote_server_username self.remote_server_password = remote_server_password self.remote_filepath = remote_filepath self.local_destination_filepath = local_destination_filepath self.local_destination_extract_directory = local_destination_extract_directory def grab_file( self ): """ Grabs file. Called by ProcessFileFromRapidHelper.initiate_work(). """ log.debug( 'grab_file() remote_server_name, `%s`; remote_filepath, `%s`; local_destination_filepath, `%s`' % (self.remote_server_name, self.remote_filepath, self.local_destination_filepath) ) client = ftplib.FTP_TLS( timeout=10 ) client.connect( self.remote_server_name, self.remote_server_port ) client.auth() client.prot_p() client.login( self.remote_server_username, self.remote_server_password ) f = open( self.local_destination_filepath, 'wb' ) client.retrbinary( "RETR " + self.remote_filepath, f.write ) f.close() client.quit() return # def grab_file( self ): # """ Grabs file. # Called by ProcessFileFromRapidHelper.initiate_work(). """ # log.debug( 'grab_file() remote_server_name, `%s`; remote_filepath, `%s`; local_destination_filepath, `%s`' % (self.remote_server_name, self.remote_filepath, self.local_destination_filepath) ) # ( sftp, transport ) = ( None, None ) # try: # transport = paramiko.Transport( (self.remote_server_name, 22) ) # transport.connect( username=self.remote_server_username, password=self.remote_server_password ) # sftp = paramiko.SFTPClient.from_transport( transport ) # sftp.get( self.remote_filepath, self.local_destination_filepath ) # except Exception as e: # log.error( 'exception, `%s`' % unicode(repr(e)) ); raise Exception( unicode(repr(e)) ) # return def unzip_file( self ): """ Unzips file. Called by ProcessFileFromRapidHelper.initiate_work(). """ log.debug( 'unzip_file() zipped-filepath, `%s`; unzipped-directory, `%s`' % (self.local_destination_filepath, self.local_destination_extract_directory) ) try: zip_ref = zipfile.ZipFile( self.local_destination_filepath ) except Exception as e: log.error( 'exception, `%s`' % unicode(repr(e)) ); raise Exception( unicode(repr(e)) ) zip_ref.extractall( self.local_destination_extract_directory ) return # end class RapidFileGrabber class ManualDbHandler( object ): """ Backs-up and writes to non-rapid-manager print-titles table. Non-django class. """ def run_sql( self, sql, connection_url ): """ Executes sql. Called by UpdateTitlesHelper._make_backup_table() """ time.sleep( .25 ) log.debug( 'sql, ```%s```' % sql ) engine = alchemy_create_engine( connection_url ) try: return_val = None result = engine.execute( sql ) if 'fetchall' in dir( result.cursor ): return_val = result.cursor.fetchall() result.close() return return_val except Exception as e: log.error( 'exception executing sql, ```{}```'.format(unicode(repr(e))) ) # end class ManualDbHandler
mit
-5,328,917,401,375,825,000
41.888889
201
0.650259
false
3.833644
false
false
false
boudinfl/pke
pke/unsupervised/graph_based/textrank.py
1
6828
# -*- coding: utf-8 -*- # Authors: Ygor Gallina, Florian Boudin # Date: 10-18-2018 """TextRank keyphrase extraction model. Implementation of the TextRank model for keyword extraction described in: * Rada Mihalcea and Paul Tarau. TextRank: Bringing Order into Texts *In Proceedings of EMNLP*, 2004. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import math import logging import networkx as nx from pke.base import LoadFile class TextRank(LoadFile): """TextRank for keyword extraction. This model builds a graph that represents the text. A graph based ranking algorithm is then applied to extract the lexical units (here the words) that are most important in the text. In this implementation, nodes are words of certain part-of-speech (nouns and adjectives) and edges represent co-occurrence relation, controlled by the distance between word occurrences (here a window of 2 words). Nodes are ranked by the TextRank graph-based ranking algorithm in its unweighted variant. Parameterized example:: import pke # define the set of valid Part-of-Speeches pos = {'NOUN', 'PROPN', 'ADJ'} # 1. create a TextRank extractor. extractor = pke.unsupervised.TextRank() # 2. load the content of the document. extractor.load_document(input='path/to/input', language='en', normalization=None) # 3. build the graph representation of the document and rank the words. # Keyphrase candidates are composed from the 33-percent # highest-ranked words. extractor.candidate_weighting(window=2, pos=pos, top_percent=0.33) # 4. get the 10-highest scored candidates as keyphrases keyphrases = extractor.get_n_best(n=10) """ def __init__(self): """Redefining initializer for TextRank.""" super(TextRank, self).__init__() self.graph = nx.Graph() """The word graph.""" def candidate_selection(self, pos=None): """Candidate selection using longest sequences of PoS. Args: pos (set): set of valid POS tags, defaults to ('NOUN', 'PROPN', 'ADJ'). """ if pos is None: pos = {'NOUN', 'PROPN', 'ADJ'} # select sequence of adjectives and nouns self.longest_pos_sequence_selection(valid_pos=pos) def build_word_graph(self, window=2, pos=None): """Build a graph representation of the document in which nodes/vertices are words and edges represent co-occurrence relation. Syntactic filters can be applied to select only words of certain Part-of-Speech. Co-occurrence relations can be controlled using the distance between word occurrences in the document. As the original paper does not give precise details on how the word graph is constructed, we make the following assumptions from the example given in Figure 2: 1) sentence boundaries **are not** taken into account and, 2) stopwords and punctuation marks **are** considered as words when computing the window. Args: window (int): the window for connecting two words in the graph, defaults to 2. pos (set): the set of valid pos for words to be considered as nodes in the graph, defaults to ('NOUN', 'PROPN', 'ADJ'). """ if pos is None: pos = {'NOUN', 'PROPN', 'ADJ'} # flatten document as a sequence of (word, pass_syntactic_filter) tuples text = [(word, sentence.pos[i] in pos) for sentence in self.sentences for i, word in enumerate(sentence.stems)] # add nodes to the graph self.graph.add_nodes_from([word for word, valid in text if valid]) # add edges to the graph for i, (node1, is_in_graph1) in enumerate(text): # speed up things if not is_in_graph1: continue for j in range(i + 1, min(i + window, len(text))): node2, is_in_graph2 = text[j] if is_in_graph2 and node1 != node2: self.graph.add_edge(node1, node2) def candidate_weighting(self, window=2, pos=None, top_percent=None, normalized=False): """Tailored candidate ranking method for TextRank. Keyphrase candidates are either composed from the T-percent highest-ranked words as in the original paper or extracted using the `candidate_selection()` method. Candidates are ranked using the sum of their (normalized?) words. Args: window (int): the window for connecting two words in the graph, defaults to 2. pos (set): the set of valid pos for words to be considered as nodes in the graph, defaults to ('NOUN', 'PROPN', 'ADJ'). top_percent (float): percentage of top vertices to keep for phrase generation. normalized (False): normalize keyphrase score by their length, defaults to False. """ if pos is None: pos = {'NOUN', 'PROPN', 'ADJ'} # build the word graph self.build_word_graph(window=window, pos=pos) # compute the word scores using the unweighted PageRank formulae w = nx.pagerank_scipy(self.graph, alpha=0.85, tol=0.0001, weight=None) # generate the phrases from the T-percent top ranked words if top_percent is not None: # warn user as this is not the pke way of doing it logging.warning("Candidates are generated using {}-top".format( top_percent)) # computing the number of top keywords nb_nodes = self.graph.number_of_nodes() to_keep = min(math.floor(nb_nodes * top_percent), nb_nodes) # sorting the nodes by decreasing scores top_words = sorted(w, key=w.get, reverse=True) # creating keyphrases from the T-top words self.longest_keyword_sequence_selection(top_words[:int(to_keep)]) # weight candidates using the sum of their word scores for k in self.candidates.keys(): tokens = self.candidates[k].lexical_form self.weights[k] = sum([w[t] for t in tokens]) if normalized: self.weights[k] /= len(tokens) # use position to break ties self.weights[k] += (self.candidates[k].offsets[0]*1e-8)
gpl-3.0
159,721,785,282,820,700
35.908108
80
0.60208
false
4.278195
false
false
false
diofant/diofant
diofant/domains/realfield.py
1
3327
"""Implementation of :class:`RealField` class.""" from __future__ import annotations import mpmath from ..core import Float from ..polys.polyerrors import CoercionFailed from .characteristiczero import CharacteristicZero from .field import Field from .mpelements import MPContext from .simpledomain import SimpleDomain class RealField(CharacteristicZero, SimpleDomain, Field): """Real numbers up to the given precision.""" rep = 'RR' is_RealField = True is_Exact = False is_Numerical = True _default_precision = 53 @property def has_default_precision(self): return self.precision == self._default_precision @property def precision(self): return self._context.prec @property def dps(self): return self._context.dps @property def tolerance(self): return self._context.tolerance def __new__(cls, prec=_default_precision, dps=None, tol=None): context = MPContext(prec, dps, tol) obj = super().__new__(cls) try: obj.dtype = _reals_cache[(context.prec, context.tolerance)] except KeyError: _reals_cache[(context.prec, context.tolerance)] = obj.dtype = context.mpf context._parent = obj obj._context = context obj._hash = hash((cls.__name__, obj.dtype, context.prec, context.tolerance)) obj.zero = obj.dtype(0) obj.one = obj.dtype(1) return obj def __getnewargs_ex__(self): return (), {'prec': self.precision, 'tol': mpmath.mpf(self.tolerance._mpf_)} def __eq__(self, other): return (isinstance(other, RealField) and self.precision == other.precision and self.tolerance == other.tolerance) def __hash__(self): return self._hash def to_expr(self, element): return Float(element, self.dps) def from_expr(self, expr): number = expr.evalf(self.dps) if number.is_Number: return self.dtype(number) else: raise CoercionFailed(f'expected real number, got {expr}') def _from_PythonIntegerRing(self, element, base): return self.dtype(element) _from_GMPYIntegerRing = _from_PythonIntegerRing def _from_PythonRationalField(self, element, base): return self.dtype(element.numerator) / element.denominator _from_GMPYRationalField = _from_PythonRationalField def _from_AlgebraicField(self, element, base): return self.from_expr(base.to_expr(element)) def _from_RealField(self, element, base): if self == base: return element else: return self.dtype(element) def _from_ComplexField(self, element, base): if not element.imag: return self.dtype(element.real) def to_rational(self, element, limit=True): """Convert a real number to rational number.""" return self._context.to_rational(element, limit) def get_exact(self): from . import QQ return QQ def gcd(self, a, b): return self.one def almosteq(self, a, b, tolerance=None): """Check if ``a`` and ``b`` are almost equal.""" return self._context.almosteq(a, b, tolerance) _reals_cache: dict[tuple, RealField] = {} RR = RealField()
bsd-3-clause
-6,420,955,137,763,290,000
25.616
85
0.621882
false
3.914118
false
false
false
102/rsa
cli.py
1
1981
import argparse import rsa """ python3 cli.py -f key generate -l 8 python3 cli.py -f message encode -k key_public -d encoded python3 cli.py -f encoded decode -k key_private -d decoded """ def generate(args): public, private = rsa.get_key_pair(int(args.length)) with open(args.file + '_public', 'w+') as f: f.write(str(public)) with open(args.file + '_private', 'w+') as f: f.write(str(private)) def encode(args): with open(args.public_key, 'r') as f: public = rsa.PublicKey.fromstring(f.readline().replace('\n', '')) with open(args.file, 'rb') as f: message = bytearray(f.read()) with open(args.destination_file, 'wb') as f: result = public.encrypt(message) f.write(result) def decode(args): with open(args.private_key, 'r') as f: private = rsa.PrivateKey.fromstring(f.readline().replace('\n', '')) with open(args.file, 'rb') as f: message = bytearray(f.read()) with open(args.destination_file, 'wb') as f: result = private.decrypt(message) f.write(result) parser = argparse.ArgumentParser() parser.add_argument('-f', '--file', default='key') subparsers = parser.add_subparsers() generate_keys = subparsers.add_parser('generate') generate_keys.add_argument('-l', '--length', required=True, type=int) generate_keys.set_defaults(func=generate) encode_parser = subparsers.add_parser('encode') encode_parser.add_argument('-k', '--public-key', help='File with public key', required=True) encode_parser.add_argument('-d', '--destination-file', help='Destination file', required=True) encode_parser.set_defaults(func=encode) decode_parser = subparsers.add_parser('decode') decode_parser.add_argument('-k', '--private-key', help='File with private key', required=True) decode_parser.add_argument('-d', '--destination-file', help='Destination file', required=True) decode_parser.set_defaults(func=decode) args = parser.parse_args() args.func(args)
unlicense
-665,462,845,845,840,400
32.576271
94
0.669864
false
3.351946
false
false
false
tysonholub/twilio-python
twilio/rest/notify/v1/service/binding.py
1
18370
# coding=utf-8 r""" This code was generated by \ / _ _ _| _ _ | (_)\/(_)(_|\/| |(/_ v1.0.0 / / """ from twilio.base import deserialize from twilio.base import serialize from twilio.base import values from twilio.base.instance_context import InstanceContext from twilio.base.instance_resource import InstanceResource from twilio.base.list_resource import ListResource from twilio.base.page import Page class BindingList(ListResource): """ PLEASE NOTE that this class contains beta products that are subject to change. Use them with caution. """ def __init__(self, version, service_sid): """ Initialize the BindingList :param Version version: Version that contains the resource :param service_sid: The SID of the Service that the resource is associated with :returns: twilio.rest.notify.v1.service.binding.BindingList :rtype: twilio.rest.notify.v1.service.binding.BindingList """ super(BindingList, self).__init__(version) # Path Solution self._solution = {'service_sid': service_sid, } self._uri = '/Services/{service_sid}/Bindings'.format(**self._solution) def create(self, identity, binding_type, address, tag=values.unset, notification_protocol_version=values.unset, credential_sid=values.unset, endpoint=values.unset): """ Create a new BindingInstance :param unicode identity: The `identity` value that identifies the new resource's User :param BindingInstance.BindingType binding_type: The type of the Binding :param unicode address: The channel-specific address :param unicode tag: A tag that can be used to select the Bindings to notify :param unicode notification_protocol_version: The protocol version to use to send the notification :param unicode credential_sid: The SID of the Credential resource to be used to send notifications to this Binding :param unicode endpoint: Deprecated :returns: Newly created BindingInstance :rtype: twilio.rest.notify.v1.service.binding.BindingInstance """ data = values.of({ 'Identity': identity, 'BindingType': binding_type, 'Address': address, 'Tag': serialize.map(tag, lambda e: e), 'NotificationProtocolVersion': notification_protocol_version, 'CredentialSid': credential_sid, 'Endpoint': endpoint, }) payload = self._version.create( 'POST', self._uri, data=data, ) return BindingInstance(self._version, payload, service_sid=self._solution['service_sid'], ) def stream(self, start_date=values.unset, end_date=values.unset, identity=values.unset, tag=values.unset, limit=None, page_size=None): """ Streams BindingInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param date start_date: Only include usage that has occurred on or after this date :param date end_date: Only include usage that occurred on or before this date :param unicode identity: The `identity` value of the resources to read :param unicode tag: Only list Bindings that have all of the specified Tags :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.notify.v1.service.binding.BindingInstance] """ limits = self._version.read_limits(limit, page_size) page = self.page( start_date=start_date, end_date=end_date, identity=identity, tag=tag, page_size=limits['page_size'], ) return self._version.stream(page, limits['limit'], limits['page_limit']) def list(self, start_date=values.unset, end_date=values.unset, identity=values.unset, tag=values.unset, limit=None, page_size=None): """ Lists BindingInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param date start_date: Only include usage that has occurred on or after this date :param date end_date: Only include usage that occurred on or before this date :param unicode identity: The `identity` value of the resources to read :param unicode tag: Only list Bindings that have all of the specified Tags :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.notify.v1.service.binding.BindingInstance] """ return list(self.stream( start_date=start_date, end_date=end_date, identity=identity, tag=tag, limit=limit, page_size=page_size, )) def page(self, start_date=values.unset, end_date=values.unset, identity=values.unset, tag=values.unset, page_token=values.unset, page_number=values.unset, page_size=values.unset): """ Retrieve a single page of BindingInstance records from the API. Request is executed immediately :param date start_date: Only include usage that has occurred on or after this date :param date end_date: Only include usage that occurred on or before this date :param unicode identity: The `identity` value of the resources to read :param unicode tag: Only list Bindings that have all of the specified Tags :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of BindingInstance :rtype: twilio.rest.notify.v1.service.binding.BindingPage """ params = values.of({ 'StartDate': serialize.iso8601_date(start_date), 'EndDate': serialize.iso8601_date(end_date), 'Identity': serialize.map(identity, lambda e: e), 'Tag': serialize.map(tag, lambda e: e), 'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page( 'GET', self._uri, params=params, ) return BindingPage(self._version, response, self._solution) def get_page(self, target_url): """ Retrieve a specific page of BindingInstance records from the API. Request is executed immediately :param str target_url: API-generated URL for the requested results page :returns: Page of BindingInstance :rtype: twilio.rest.notify.v1.service.binding.BindingPage """ response = self._version.domain.twilio.request( 'GET', target_url, ) return BindingPage(self._version, response, self._solution) def get(self, sid): """ Constructs a BindingContext :param sid: The unique string that identifies the resource :returns: twilio.rest.notify.v1.service.binding.BindingContext :rtype: twilio.rest.notify.v1.service.binding.BindingContext """ return BindingContext(self._version, service_sid=self._solution['service_sid'], sid=sid, ) def __call__(self, sid): """ Constructs a BindingContext :param sid: The unique string that identifies the resource :returns: twilio.rest.notify.v1.service.binding.BindingContext :rtype: twilio.rest.notify.v1.service.binding.BindingContext """ return BindingContext(self._version, service_sid=self._solution['service_sid'], sid=sid, ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Notify.V1.BindingList>' class BindingPage(Page): """ PLEASE NOTE that this class contains beta products that are subject to change. Use them with caution. """ def __init__(self, version, response, solution): """ Initialize the BindingPage :param Version version: Version that contains the resource :param Response response: Response from the API :param service_sid: The SID of the Service that the resource is associated with :returns: twilio.rest.notify.v1.service.binding.BindingPage :rtype: twilio.rest.notify.v1.service.binding.BindingPage """ super(BindingPage, self).__init__(version, response) # Path Solution self._solution = solution def get_instance(self, payload): """ Build an instance of BindingInstance :param dict payload: Payload response from the API :returns: twilio.rest.notify.v1.service.binding.BindingInstance :rtype: twilio.rest.notify.v1.service.binding.BindingInstance """ return BindingInstance(self._version, payload, service_sid=self._solution['service_sid'], ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Notify.V1.BindingPage>' class BindingContext(InstanceContext): """ PLEASE NOTE that this class contains beta products that are subject to change. Use them with caution. """ def __init__(self, version, service_sid, sid): """ Initialize the BindingContext :param Version version: Version that contains the resource :param service_sid: The SID of the Service to fetch the resource from :param sid: The unique string that identifies the resource :returns: twilio.rest.notify.v1.service.binding.BindingContext :rtype: twilio.rest.notify.v1.service.binding.BindingContext """ super(BindingContext, self).__init__(version) # Path Solution self._solution = {'service_sid': service_sid, 'sid': sid, } self._uri = '/Services/{service_sid}/Bindings/{sid}'.format(**self._solution) def fetch(self): """ Fetch a BindingInstance :returns: Fetched BindingInstance :rtype: twilio.rest.notify.v1.service.binding.BindingInstance """ params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return BindingInstance( self._version, payload, service_sid=self._solution['service_sid'], sid=self._solution['sid'], ) def delete(self): """ Deletes the BindingInstance :returns: True if delete succeeds, False otherwise :rtype: bool """ return self._version.delete('delete', self._uri) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items()) return '<Twilio.Notify.V1.BindingContext {}>'.format(context) class BindingInstance(InstanceResource): """ PLEASE NOTE that this class contains beta products that are subject to change. Use them with caution. """ class BindingType(object): APN = "apn" GCM = "gcm" SMS = "sms" FCM = "fcm" FACEBOOK_MESSENGER = "facebook-messenger" ALEXA = "alexa" def __init__(self, version, payload, service_sid, sid=None): """ Initialize the BindingInstance :returns: twilio.rest.notify.v1.service.binding.BindingInstance :rtype: twilio.rest.notify.v1.service.binding.BindingInstance """ super(BindingInstance, self).__init__(version) # Marshaled Properties self._properties = { 'sid': payload.get('sid'), 'account_sid': payload.get('account_sid'), 'service_sid': payload.get('service_sid'), 'credential_sid': payload.get('credential_sid'), 'date_created': deserialize.iso8601_datetime(payload.get('date_created')), 'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')), 'notification_protocol_version': payload.get('notification_protocol_version'), 'endpoint': payload.get('endpoint'), 'identity': payload.get('identity'), 'binding_type': payload.get('binding_type'), 'address': payload.get('address'), 'tags': payload.get('tags'), 'url': payload.get('url'), 'links': payload.get('links'), } # Context self._context = None self._solution = {'service_sid': service_sid, 'sid': sid or self._properties['sid'], } @property def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: BindingContext for this BindingInstance :rtype: twilio.rest.notify.v1.service.binding.BindingContext """ if self._context is None: self._context = BindingContext( self._version, service_sid=self._solution['service_sid'], sid=self._solution['sid'], ) return self._context @property def sid(self): """ :returns: The unique string that identifies the resource :rtype: unicode """ return self._properties['sid'] @property def account_sid(self): """ :returns: The SID of the Account that created the resource :rtype: unicode """ return self._properties['account_sid'] @property def service_sid(self): """ :returns: The SID of the Service that the resource is associated with :rtype: unicode """ return self._properties['service_sid'] @property def credential_sid(self): """ :returns: The SID of the Credential resource to be used to send notifications to this Binding :rtype: unicode """ return self._properties['credential_sid'] @property def date_created(self): """ :returns: The RFC 2822 date and time in GMT when the resource was created :rtype: datetime """ return self._properties['date_created'] @property def date_updated(self): """ :returns: The RFC 2822 date and time in GMT when the resource was last updated :rtype: datetime """ return self._properties['date_updated'] @property def notification_protocol_version(self): """ :returns: The protocol version to use to send the notification :rtype: unicode """ return self._properties['notification_protocol_version'] @property def endpoint(self): """ :returns: Deprecated :rtype: unicode """ return self._properties['endpoint'] @property def identity(self): """ :returns: The `identity` value that identifies the new resource's User :rtype: unicode """ return self._properties['identity'] @property def binding_type(self): """ :returns: The type of the Binding :rtype: unicode """ return self._properties['binding_type'] @property def address(self): """ :returns: The channel-specific address :rtype: unicode """ return self._properties['address'] @property def tags(self): """ :returns: The list of tags associated with this Binding :rtype: unicode """ return self._properties['tags'] @property def url(self): """ :returns: The absolute URL of the Binding resource :rtype: unicode """ return self._properties['url'] @property def links(self): """ :returns: The URLs of related resources :rtype: unicode """ return self._properties['links'] def fetch(self): """ Fetch a BindingInstance :returns: Fetched BindingInstance :rtype: twilio.rest.notify.v1.service.binding.BindingInstance """ return self._proxy.fetch() def delete(self): """ Deletes the BindingInstance :returns: True if delete succeeds, False otherwise :rtype: bool """ return self._proxy.delete() def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items()) return '<Twilio.Notify.V1.BindingInstance {}>'.format(context)
mit
4,937,572,604,946,714,000
34.057252
122
0.609581
false
4.451175
false
false
false
rogerthat-platform/rogerthat-backend
src/rogerthat/pages/payment.py
1
5119
# -*- coding: utf-8 -*- # Copyright 2017 GIG Technology NV # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @@license_version:1.3@@ import base64 import json import logging import urllib import webapp2 from mcfw.rpc import serialize_complex_value from rogerthat.bizz.payment import get_payment_provider_for_user, get_api_module from rogerthat.bizz.payment.state import finish_login_state, create_login_state, get_login_state from rogerthat.dal.payment import get_payment_provider from rogerthat.rpc import users from rogerthat.settings import get_server_settings from rogerthat.to.payment import AppPaymentProviderTO from rogerthat.utils.app import create_app_user_by_email class PaymentCallbackHandler(webapp2.RequestHandler): def get(self, provider_id, path): params = dict(self.request.GET) logging.debug("PaymentCallbackHandler.GET '%s', at path '%s' with params %s", provider_id, path, params) get_api_module(provider_id).web_callback(self, path, params) def post(self, provider_id, path): params = dict(self.request.POST) logging.debug("PaymentCallbackHandler.POST '%s', at path '%s' with params %s", provider_id, path, params) get_api_module(provider_id).web_callback(self, path, params) class PaymentLoginRedirectHandler(webapp2.RequestHandler): def get(self, provider_id): pp = get_payment_provider(provider_id) if not pp: logging.debug('PaymentLoginRedirectHandler: payment provider not found') self.abort(400) return email = self.request.get("email", None) app_id = self.request.get("app_id", None) app_user = create_app_user_by_email(email, app_id) state = create_login_state(app_user, provider_id) args = { 'state': state, 'response_type': 'code', 'client_id': pp.oauth_settings.client_id, 'scope': pp.oauth_settings.scope, 'redirect_uri': pp.redirect_url(get_server_settings().baseUrl) } url = '%s?%s' % (pp.oauth_settings.authorize_url, urllib.urlencode(args)) logging.debug('Redirecting to %s', url) self.redirect(url.encode('utf-8')) class PaymentLoginAppHandler(webapp2.RequestHandler): def post(self): params = dict(self.request.POST) logging.debug("PaymentLoginAppHandler with params %s", params) user = self.request.headers.get("X-MCTracker-User", None) password = self.request.headers.get("X-MCTracker-Pass", None) if not (user and password): logging.debug("user not provided") self.response.set_status(500) return if not users.set_json_rpc_user(base64.decodestring(user), base64.decodestring(password)): logging.debug("user not set") self.response.set_status(500) return app_user = users.get_current_user() state = params["state"] login_state = get_login_state(state) if app_user != login_state.app_user: self.response.set_status(500) logging.error("%s tried to finish anothers user login %s", app_user, state) return token = get_api_module(login_state.provider_id).handle_code(login_state) logging.debug('Received token: %s', token) if not finish_login_state(state, token): logging.debug("user already finished this login") self.response.set_status(500) return args = {"result": "success", "payment_provider": serialize_complex_value( get_payment_provider_for_user(app_user, login_state.provider_id), AppPaymentProviderTO, False)} r = json.dumps(args) self.response.out.write(r) class PaymentTransactionHandler(webapp2.RequestHandler): def get(self, provider_id, transaction_id): logging.debug("PaymentTransactionHandler '%s' for transaction '%s'", provider_id, transaction_id) pp = get_payment_provider(provider_id) if not pp: logging.debug('PaymentTransactionHandler: payment provider not found') self.abort(400) return trans_details = get_api_module(provider_id).get_public_transaction(transaction_id) if not trans_details: logging.debug('PaymentTransactionHandler: transaction not found') self.abort(404) return logging.info('Returning result: %s', trans_details) self.response.headers['Content-Type'] = "application/json" self.response.out.write(json.dumps(trans_details))
apache-2.0
2,898,154,106,186,170,400
39.307087
115
0.664388
false
3.843093
false
false
false
dmisem/dsmblog
pelicanconf.py
1
1851
# -*- coding: utf-8 -*- # from __future__ import unicode_literals import os import sys SITE_ROOT = os.path.realpath(os.path.dirname(__file__)) sys.path.append(SITE_ROOT) import local_settings as ls AUTHOR = ls.AUTHOR SITENAME = ls.SITENAME SITEURL = ls.SITEURL PATH = ls.PATH TIMEZONE = ls.TIMEZONE LOCALE = ls.LOCALE DEFAULT_LANG = ls.DEFAULT_LANG ARTICLE_URL = 'articles/{lang}/{slug}.html' ARTICLE_SAVE_AS = ARTICLE_URL ARTICLE_LANG_URL = ARTICLE_URL ARTICLE_LANG_SAVE_AS = ARTICLE_URL # Feed generation is usually not desired when developing FEED_ALL_ATOM = None CATEGORY_FEED_ATOM = None TRANSLATION_FEED_ATOM = None # Blogroll LINKS = ( ('Pelican', 'http://getpelican.com/'), ('Python.org', 'http://python.org/'), ('Jinja2', 'http://jinja.pocoo.org/'), ('ReStructuredText', 'http://docutils.sourceforge.net/rst.html'), ) # Social widget SOCIAL = ( ('linkedin', 'http://ua.linkedin.com/pub/dmitry-semenov/5/994/a6a', ''), ('github', 'https://github.com/dmisem', ''), ('bitbucket', 'https://bitbucket.org/dmisem', ''), ('e-mail', 'mailto:dmitry.5674@gmail.com', 'envelope'), ) STATIC_PATHS = ['images', 'img'] DEFAULT_PAGINATION = 10 # Uncomment following line if you want document-relative URLs when developing RELATIVE_URLS = True THEME = "themes/pelican-bootstrap3" PYGMENTS_STYLE = "default" FAVICON = 'img/favicon.ico' SITELOGO = 'img/dsm.png' HIDE_SITENAME = True DISPLAY_TAGS_ON_SIDEBAR = True DISPLAY_TAGS_INLINE = False TAG_LEVELS_COUNT = 3 # My settings TAGS_URL = 'tags.html' DISPLAY_CATEGORIES_ON_SIDEBAR = False DISPLAY_RECENT_POSTS_ON_SIDEBAR = False # PLUGIN_PATHS = [SITE_ROOT + '/plugins'] PLUGIN_PATHS = ['plugins'] PLUGINS = ['tag_cloud'] USE_FOLDER_AS_CATEGORY = True if __name__ == "__main__": d = globals() for k in dir(): print('{0} => {1}'.format(k, d[k]))
mit
-77,352,275,435,833,500
23.355263
77
0.675851
false
2.81307
false
false
false
tum-ens/urbs
urbs/saveload.py
1
2106
import pandas as pd from .pyomoio import get_entity, list_entities def create_result_cache(prob): entity_types = ['set', 'par', 'var', 'exp'] if hasattr(prob, 'dual'): entity_types.append('con') entities = [] for entity_type in entity_types: entities.extend(list_entities(prob, entity_type).index.tolist()) result_cache = {} for entity in entities: result_cache[entity] = get_entity(prob, entity) return result_cache def save(prob, filename): """Save urbs model input and result cache to a HDF5 store file. Args: - prob: a urbs model instance containing a solution - filename: HDF5 store file to be written Returns: Nothing """ import warnings import tables warnings.filterwarnings('ignore', category=pd.io.pytables.PerformanceWarning) warnings.filterwarnings('ignore', category=tables.NaturalNameWarning) if not hasattr(prob, '_result'): prob._result = create_result_cache(prob) with pd.HDFStore(filename, mode='w') as store: for name in prob._data.keys(): store['data/'+name] = prob._data[name] for name in prob._result.keys(): store['result/'+name] = prob._result[name] class ResultContainer(object): """ Result/input data container for reporting functions. """ def __init__(self, data, result): self._data = data self._result = result def load(filename): """Load a urbs model result container from a HDF5 store file. Args: filename: an existing HDF5 store file Returns: prob: the modified instance containing the result cache """ with pd.HDFStore(filename, mode='r') as store: data_cache = {} for group in store.get_node('data'): data_cache[group._v_name] = store[group._v_pathname] result_cache = {} for group in store.get_node('result'): result_cache[group._v_name] = store[group._v_pathname] return ResultContainer(data_cache, result_cache)
gpl-3.0
-8,746,768,734,882,240,000
28.25
72
0.616809
false
4.065637
false
false
false
eli-schwartz/pacman
test/pacman/util.py
1
4814
# Copyright (c) 2006 by Aurelien Foret <orelien@chez.com> # Copyright (c) 2006-2018 Pacman Development Team <pacman-dev@archlinux.org> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os import re import hashlib import tap SELFPATH = os.path.abspath(os.path.dirname(__file__)) # ALPM PM_ROOT = "/" PM_DBPATH = "var/lib/pacman" PM_SYNCDBPATH = "var/lib/pacman/sync" PM_LOCK = "var/lib/pacman/db.lck" PM_CACHEDIR = "var/cache/pacman/pkg" PM_EXT_PKG = ".pkg.tar.gz" PM_HOOKDIR = "etc/pacman.d/hooks" # Pacman PACCONF = "etc/pacman.conf" # Pactest TMPDIR = "tmp" SYNCREPO = "var/pub" LOGFILE = "var/log/pactest.log" verbose = 0 def vprint(msg): if verbose: tap.diag(msg) # # Methods to generate files # def getfileinfo(filename): data = { 'changed': False, 'isdir': False, 'islink': False, 'link': None, 'hasperms': False, 'perms': None, } if filename[-1] == "*": data["changed"] = True filename = filename.rstrip("*") if filename.find(" -> ") != -1: filename, link = filename.split(" -> ") data["islink"] = True data["link"] = link elif filename.find("|") != -1: filename, perms = filename.split("|") data["hasperms"] = True data["perms"] = int(perms, 8) if filename[-1] == "/": data["isdir"] = True data["filename"] = filename return data def mkfile(base, name, data=""): info = getfileinfo(name) filename = info["filename"] path = os.path.join(base, filename) if info["isdir"]: if not os.path.isdir(path): os.makedirs(path, 0o755) return path dir_path = os.path.dirname(path) if dir_path and not os.path.isdir(dir_path): os.makedirs(dir_path, 0o755) if info["islink"]: os.symlink(info["link"], path) else: writedata(path, data) if info["perms"]: os.chmod(path, info["perms"]) return path def writedata(filename, data): if isinstance(data, list): data = "\n".join(data) fd = open(filename, "w") if data: fd.write(data) if data[-1] != "\n": fd.write("\n") fd.close() def mkcfgfile(filename, root, option, db): # Options data = ["[options]"] for key, value in option.items(): data.extend(["%s = %s" % (key, j) for j in value]) # Repositories # sort by repo name so tests can predict repo order, rather than be # subjects to the whims of python dict() ordering for key in sorted(db.keys()): if key != "local": value = db[key] data.append("[%s]\n" \ "SigLevel = %s\n" \ "Server = file://%s" \ % (value.treename, value.getverify(), \ os.path.join(root, SYNCREPO, value.treename))) for optkey, optval in value.option.items(): data.extend(["%s = %s" % (optkey, j) for j in optval]) mkfile(root, filename, "\n".join(data)) # # MD5 helpers # def getmd5sum(filename): if not os.path.isfile(filename): return "" fd = open(filename, "rb") checksum = hashlib.md5() while 1: block = fd.read(32 * 1024) if not block: break checksum.update(block) fd.close() return checksum.hexdigest() def mkmd5sum(data): checksum = hashlib.md5() checksum.update(("%s\n" % data).encode('utf8')) return checksum.hexdigest() # # Miscellaneous # def which(filename, path=None): if not path: path = os.environ["PATH"].split(os.pathsep) for p in path: f = os.path.join(p, filename) if os.access(f, os.F_OK): return f return None def grep(filename, pattern): pat = re.compile(pattern) myfile = open(filename, 'r') for line in myfile: if pat.search(line): myfile.close() return True myfile.close() return False def mkdir(path): if os.path.isdir(path): return elif os.path.isfile(path): raise OSError("'%s' already exists and is not a directory" % path) os.makedirs(path, 0o755)
gpl-2.0
4,919,808,286,097,878,000
24.743316
77
0.583922
false
3.361732
false
false
false
maferelo/saleor
saleor/product/migrations/0072_auto_20180925_1048.py
3
1358
# Generated by Django 2.0.8 on 2018-09-25 15:48 from django.db import migrations class Migration(migrations.Migration): dependencies = [("product", "0071_attributechoicevalue_value")] operations = [ migrations.RenameModel(old_name="ProductAttribute", new_name="Attribute"), migrations.RenameModel( old_name="AttributeChoiceValueTranslation", new_name="AttributeValueTranslation", ), migrations.RenameModel( old_name="AttributeChoiceValue", new_name="AttributeValue" ), migrations.RenameModel( old_name="ProductAttributeTranslation", new_name="AttributeTranslation" ), migrations.RenameField( model_name="attributetranslation", old_name="product_attribute", new_name="attribute", ), migrations.RenameField( model_name="attributevaluetranslation", old_name="attribute_choice_value", new_name="attribute_value", ), migrations.AlterUniqueTogether( name="attributetranslation", unique_together={("language_code", "attribute")}, ), migrations.AlterUniqueTogether( name="attributevaluetranslation", unique_together={("language_code", "attribute_value")}, ), ]
bsd-3-clause
-7,478,345,584,148,143,000
32.95
83
0.613402
false
4.85
false
false
false
macosforge/ccs-calendarserver
calendarserver/tools/dkimtool.py
1
8493
#!/usr/bin/env python ## # Copyright (c) 2012-2017 Apple Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ## import os import sys from Crypto.PublicKey import RSA from StringIO import StringIO from twisted.internet import reactor from twisted.internet.defer import inlineCallbacks from twisted.logger import LogLevel, STDLibLogObserver from twisted.python.usage import Options from twext.python.log import Logger from txweb2.http_headers import Headers from txdav.caldav.datastore.scheduling.ischedule.dkim import RSA256, DKIMRequest, \ PublicKeyLookup, DKIMVerifier, DKIMVerificationError log = Logger() def _doKeyGeneration(options): key = RSA.generate(options["key-size"]) output = key.exportKey() lineBreak = False if options["key"]: with open(options["key"], "w") as f: f.write(output) else: print(output) lineBreak = True output = key.publickey().exportKey() if options["pub-key"]: with open(options["pub-key"], "w") as f: f.write(output) else: if lineBreak: print print(output) lineBreak = True if options["txt"]: output = "".join(output.splitlines()[1:-1]) txt = "v=DKIM1; p=%s" % (output,) if lineBreak: print print(txt) @inlineCallbacks def _doRequest(options): if options["verbose"]: log.levels().setLogLevelForNamespace("txdav.caldav.datastore.scheduling.ischedule.dkim", LogLevel.debug) # Parse the HTTP file with open(options["request"]) as f: request = f.read() method, uri, headers, stream = _parseRequest(request) # Setup signing headers sign_headers = options["signing"] if sign_headers is None: sign_headers = [] for hdr in ("Host", "Content-Type", "Originator", "Recipient+"): if headers.hasHeader(hdr.rstrip("+")): sign_headers.append(hdr) else: sign_headers = sign_headers.split(":") dkim = DKIMRequest( method, uri, headers, stream, options["domain"], options["selector"], options["key"], options["algorithm"], sign_headers, True, True, False, int(options["expire"]), ) if options["fake-time"]: dkim.time = "100" dkim.expire = "200" dkim.message_id = "1" yield dkim.sign() s = StringIO() _writeRequest(dkim, s) print(s.getvalue()) @inlineCallbacks def _doVerify(options): # Parse the HTTP file with open(os.path.expanduser(options["verify"])) as f: verify = f.read() _method, _uri, headers, body = _parseRequest(verify) # Check for local public key if options["pub-key"]: PublicKeyLookup_File.pubkeyfile = os.path.expanduser(options["pub-key"]) lookup = (PublicKeyLookup_File,) else: lookup = None dkim = DKIMVerifier(headers, body, lookup) if options["fake-time"]: dkim.time = 0 try: yield dkim.verify() except DKIMVerificationError, e: print("Verification Failed: %s" % (e,)) else: print("Verification Succeeded") def _parseRequest(request): lines = request.splitlines(True) method, uri, _ignore_version = lines.pop(0).split() hdrs = [] body = None for line in lines: if body is not None: body.append(line) elif line.strip() == "": body = [] elif line[0] in (" ", "\t"): hdrs[-1] += line else: hdrs.append(line) headers = Headers() for hdr in hdrs: name, value = hdr.split(':', 1) headers.addRawHeader(name, value.strip()) stream = "".join(body) return method, uri, headers, stream def _writeRequest(request, f): f.write("%s %s HTTP/1.1\r\n" % (request.method, request.uri,)) for name, valuelist in request.headers.getAllRawHeaders(): for value in valuelist: f.write("%s: %s\r\n" % (name, value)) f.write("\r\n") f.write(request.stream.read()) class PublicKeyLookup_File(PublicKeyLookup): method = "*" pubkeyfile = None def getPublicKey(self): """ Do the key lookup using the actual lookup method. """ with open(self.pubkeyfile) as f: data = f.read() return RSA.importKey(data) def usage(e=None): if e: print(e) print("") try: DKIMToolOptions().opt_help() except SystemExit: pass if e: sys.exit(64) else: sys.exit(0) description = """Usage: dkimtool [options] Options: -h Print this help and exit # Key Generation --key-gen Generate private/public key files --key FILE Private key file to create [stdout] --pub-key FILE Public key file to create [stdout] --key-size SIZE Key size [1024] --txt Also generate the public key TXT record --fake-time Use fake t=, x= values when signing and also ignore expiration on verification # Request --request FILE An HTTP request to sign --algorithm ALGO Signature algorithm [rsa-sha256] --domain DOMAIN Signature domain [example.com] --selector SELECTOR Signature selector [dkim] --key FILE Private key to use --signing HEADERS List of headers to sign [automatic] --expire SECONDS When to expire signature [no expiry] # Verify --verify FILE An HTTP request to verify --pkey FILE Public key to use in place of q= lookup Description: This utility is for testing DKIM signed HTTP requests. Key operations are: --key-gen: generate a private/public RSA key. --request: sign an HTTP request. --verify: verify a signed HTTP request. """ class DKIMToolOptions(Options): """ Command-line options for 'calendarserver_dkimtool' """ synopsis = description optFlags = [ ['verbose', 'v', "Verbose logging."], ['key-gen', 'g', "Generate private/public key files"], ['txt', 't', "Also generate the public key TXT record"], ['fake-time', 'f', "Fake time values for signing/verification"], ] optParameters = [ ['key', 'k', None, "Private key file to create [default: stdout]"], ['pub-key', 'p', None, 'Public key file to create [default: stdout]'], ['key-size', 'x', 1024, 'Key size'], ['request', 'r', None, 'An HTTP request to sign'], ['algorithm', 'a', RSA256, 'Signature algorithm'], ['domain', 'd', 'example.com', 'Signature domain'], ['selector', 's', 'dkim', 'Signature selector'], ['signing', 'h', None, 'List of headers to sign [automatic]'], ['expire', 'e', 3600, 'When to expire signature'], ['verify', 'w', None, 'An HTTP request to verify'], ] def __init__(self): super(DKIMToolOptions, self).__init__() self.outputName = '-' @inlineCallbacks def _runInReactor(fn, options): try: yield fn(options) except Exception, e: print(e) finally: reactor.stop() def main(argv=sys.argv, stderr=sys.stderr): options = DKIMToolOptions() options.parseOptions(argv[1:]) # # Send logging output to stdout # observer = STDLibLogObserver() observer.start() if options["verbose"]: log.levels().setLogLevelForNamespace("txdav.caldav.datastore.scheduling.ischedule.dkim", LogLevel.debug) if options["key-gen"]: _doKeyGeneration(options) elif options["request"]: reactor.callLater(0, _runInReactor, _doRequest, options) reactor.run() elif options["verify"]: reactor.callLater(0, _runInReactor, _doVerify, options) reactor.run() else: usage("Invalid options") if __name__ == '__main__': main()
apache-2.0
2,544,725,631,419,095,600
26.134185
112
0.602849
false
3.837777
false
false
false
cfe-lab/Umberjack
test/simulations/sim_pipeline.py
1
7917
""" The full pipeline for generating simulated population reads for unit testing. Usage: python sim_pipeline.py [config file] """ import subprocess import os import logging import sys import ConfigParser import hyphy.hyphy_handler as hyphy_handler import fasttree.fasttree_handler as fasttree_handler import config.settings as settings settings.setup_logging() LOGGER = logging.getLogger(__name__) def get_path_str(path, pardir): """ If absolute path, then returns the path as is. If relative path, then returns absolute path of concatenated pardir/path :param str path: absolute or relative file or directory path :param str pardir: parent directory to concatenate to path if path is relative directory :return str: absolute resolved path """ if not os.path.isabs(path): return os.path.join(pardir, path) else: return path SECTION = "sim" config_file = sys.argv[1] config = ConfigParser.RawConfigParser() config.read(config_file) OUTDIR = os.path.dirname(config_file) # Output directory for simulated data # Generate Tree SEED = config.getint(SECTION, "SEED") FILENAME_PREFIX = config.get(SECTION, "FILENAME_PREFIX") NUM_CODON_SITES = config.getint(SECTION, "NUM_CODON_SITES") NUM_INDIV = config.getint(SECTION, "NUM_INDIV") treefile = OUTDIR + os.sep + FILENAME_PREFIX + ".nwk" renamed_treefile = OUTDIR + os.sep + FILENAME_PREFIX + ".rename.nwk" if os.path.exists(treefile) and os.path.getsize(treefile) and os.path.exists(renamed_treefile) and os.path.getsize(renamed_treefile): LOGGER.warn("Not regenerating trees {} and {}".format(treefile, renamed_treefile) ) else: asg_driver_exe = os.path.abspath(os.path.dirname(__file__) + os.sep + "asg_driver.py") asg_driver_cmd = ["python", asg_driver_exe, OUTDIR + os.sep + FILENAME_PREFIX, str(NUM_INDIV), str(SEED)] LOGGER.debug("About to execute " + " ".join(asg_driver_cmd)) subprocess.check_call(asg_driver_cmd, env=os.environ) LOGGER.debug("Finished execute ") # Relabel tree nodes to more manageable names. Reformat tree so that indelible can handle it. relabel_phylogeny_exe = os.path.abspath(os.path.dirname(__file__) + os.sep + "relabel_phylogeny.py") relabel_phylogeny_cmd = ["python", relabel_phylogeny_exe, treefile] LOGGER.debug("About to execute " + " ".join(relabel_phylogeny_cmd)) subprocess.check_call(relabel_phylogeny_cmd, env=os.environ) LOGGER.debug("Finished execute ") # Use Indelible to create population sequences at different scaling factors (ie mutation rates) INDELIBLE_BIN_DIR = get_path_str(config.get(SECTION, "INDELIBLE_BIN_DIR"), OUTDIR) INDELIBLE_SCALING_RATES = config.get(SECTION, "INDELIBLE_SCALING_RATES") batch_indelible_exe = os.path.abspath(os.path.dirname(__file__) + "/indelible/batch_indelible.py") indelible_cmd = ["python", batch_indelible_exe, renamed_treefile, # full filepath to tree INDELIBLE_SCALING_RATES, str(SEED), # random seed str(NUM_CODON_SITES), # number of codon sites in genome OUTDIR, # indelible output file directory FILENAME_PREFIX, # Indelible output filename prefix INDELIBLE_BIN_DIR] # indelible bin dir LOGGER.debug("About to execute " + " ".join(indelible_cmd)) subprocess.check_call(indelible_cmd, env=os.environ) LOGGER.debug("Finished execute ") # Create sample genome by concatenating slices of indelible alignments from different mutation rates. sample_genomes_fasta = OUTDIR + os.sep + "mixed" + os.sep + FILENAME_PREFIX + ".mixed.fasta" sample_genomes_consensus_fasta = sample_genomes_fasta.replace(".fasta", ".consensus.fasta") if (os.path.exists(sample_genomes_fasta) and os.path.getsize(sample_genomes_fasta) and os.path.exists(sample_genomes_consensus_fasta) and os.path.getsize(sample_genomes_consensus_fasta)): LOGGER.warn("Not regenerating combined sample genome fastas {} and {} ".format(sample_genomes_fasta, sample_genomes_consensus_fasta)) else: sample_genomes_exe = os.path.abspath(os.path.dirname(__file__) + os.sep + "sample_genomes.py") sample_genomes_cmd = ["python", sample_genomes_exe, INDELIBLE_SCALING_RATES, # comma delimited list of mutation scaling rates OUTDIR + os.sep + "mixed", # full filepath of directory for sample_genomes.py output FILENAME_PREFIX + ".mixed", # prefix of sample_genomes.py population sequence output files str(SEED), # random seed str(NUM_CODON_SITES), # number codon sites OUTDIR, # Indelible output directory FILENAME_PREFIX] # INDELible output filename prefix LOGGER.debug("About to execute " + " ".join(sample_genomes_cmd)) subprocess.check_call(sample_genomes_cmd, env=os.environ) LOGGER.debug("Finished execute ") # Simulate MiSeq reads from the population genomes. ART_BIN_DIR = get_path_str(config.get(SECTION, "ART_BIN_DIR"), OUTDIR) ART_QUAL_PROFILE_TSV1 = get_path_str(config.get(SECTION, "ART_QUAL_PROFILE_TSV1"), OUTDIR) ART_QUAL_PROFILE_TSV2 = get_path_str(config.get(SECTION, "ART_QUAL_PROFILE_TSV2"), OUTDIR) ART_FOLD_COVER = config.getint(SECTION, "ART_FOLD_COVER") ART_MEAN_INSERT = config.getint(SECTION, "ART_MEAN_INSERT") ART_STDEV_INSERT = config.getint(SECTION, "ART_STDEV_INSERT") PICARD_BIN_DIR = get_path_str(config.get(SECTION, "PICARD_BIN_DIR"), OUTDIR) BWA_BIN_DIR = get_path_str(config.get(SECTION, "BWA_BIN_DIR"), OUTDIR) PROCS = config.getint(SECTION, "PROCS") art_reads_dir = OUTDIR + os.sep + "mixed" + os.sep + "reads" art_reads_filename_prefix = FILENAME_PREFIX + ".mixed.reads" generate_reads_exe = os.path.abspath(os.path.dirname(__file__) + os.sep + "generate_reads.py") generate_reads_cmd = ["python", generate_reads_exe, ART_BIN_DIR, ART_QUAL_PROFILE_TSV1, ART_QUAL_PROFILE_TSV2, sample_genomes_fasta, sample_genomes_consensus_fasta, art_reads_dir + os.sep + art_reads_filename_prefix, # dir and filename prefix of ART output str(ART_FOLD_COVER), str(ART_MEAN_INSERT), str(ART_STDEV_INSERT), PICARD_BIN_DIR, BWA_BIN_DIR, OUTDIR + os.sep + "mixed" + os.sep + "aln", # BWA output dir str(PROCS), str(SEED), OUTDIR + os.sep + "mixed" + os.sep + FILENAME_PREFIX + ".mixed.rates.csv"] # Indelible mixed mutation rates csv LOGGER.debug("About to execute " + " ".join(generate_reads_cmd)) subprocess.check_call(generate_reads_cmd, env=os.environ) LOGGER.debug("Finished execute ") # For the sample_genomes populations, we lose the true tree branch lengths when we concatenate multiple populations at different scalings together. # Get FastTree to approximate tree for concatenated population sequences. FASTTREE_EXE = get_path_str(config.get(SECTION, "FASTTREE_EXE"), OUTDIR) sample_genomes_tree_fname = fasttree_handler.make_tree_repro(fasta_fname=sample_genomes_fasta, intree_fname=renamed_treefile, fastree_exe=FASTTREE_EXE) # Calculate HyPhy dN/dS for the full sample_genomes population fasta HYPHY_EXE = get_path_str(config.get(SECTION, "HYPHY_EXE"), OUTDIR) HYPHY_BASEPATH = get_path_str(config.get(SECTION, "HYPHY_BASEPATH"), OUTDIR) hyphy_handler.calc_dnds(codon_fasta_filename=sample_genomes_fasta, tree_filename=sample_genomes_tree_fname, hyphy_exe=HYPHY_EXE, hyphy_basedir=HYPHY_BASEPATH, threads=PROCS)
bsd-2-clause
-6,855,658,814,491,119,000
46.981818
147
0.664898
false
3.418394
true
false
false
pgoeser/gnuradio-mlse
python/testapp.py
1
15170
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2011 Paul Goeser # # This is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr import mlse_swig as mlse import time, random, cmath, math, operator import signal # for correct KeyboardInterrupt redirection import threading import testapp_framework from testapp_framework import * from math import sqrt # this file contains a small testapp that can run simulations of # the mlse equalizer. # you might want to grab the PYTHONPATH that run_tests uses. # Or run it via run_testapp. if __name__=="__main__": import sys, threading try: action = sys.argv[1] except: action = "default" try: if(action=="rewrite"): params = {"simu_length":2000} snrs = range(0,21,2) dispatcher = TestDispatcher(4,params, reuse=False) var = {} var = add_var(var,"noise_amplitude",snr_to_ampl(snrs)) # var = add_var(var,"noise_amplitude",[0.2]*2) results = dispatcher.runjob(var) # bers = extract_results(results,"ber") bers = [ r["ber"] for r in results ] print bers berplot(snrs, bers, "SNR/BER", True) if(action=="cmd"): try: from IPython import embed except: raise RuntimeError("Commandline needs ipython 0.11") myns={} embed(user_ns=myns) locals().update(myns) if(action=="debug"): try: from IPython import embed except: print "This needs ipython 0.11" raise params = {"simu_length":2000} dispatcher = TestDispatcher(1,params, reuse=False) var = [{}] results = dispatcher.runjob(var) results = dispatcher.runjob(var) print results[0]["ber"] myns={} embed(user_ns=myns) locals().update(myns) if(action=="filtercompare"): import filters params = {"simu_length":40000, "channel_delay":8} snrs = range(0,18+1,1) dispatcher = TestDispatcher(4,params, reuse=False) data = [] # correction_factor = {"laur":0.11671196828764578, "proj":0.15779756319255925, "gauss":0.13329449537101956, "dirac":1} # those numbers are the noise power that gets through the filter at 8 samples/symbol names = {"laur":"Laurent","gauss":"Gauss","rrcos":"Wurzel-Nyquist","proj":"Projektion"} for filt in ["laur","proj","gauss", "rrcos"]: print "running",filt var = {"filter_taps":eval("filters."+filt)} # var = add_var(var,"noise_amplitude",snr_to_ampl(snrs, math.sqrt(1/correction_factor[filt]))) var = add_var(var,"noise_amplitude",snr_to_ampl(snrs, 7.97)) results = dispatcher.runjob(var) bers = [ r["ber"] for r in results ] data.append({"legend":names[filt], "bers":bers, "snrs":snrs}) print bers # print results[-1]["gr_vector_sink_x_0"][-6:] # import matplotlib.pyplot as plt # plt.figure(None, figsize=(6,4.2), dpi=300) # important: figure size (in inches) to make it look good # plt.plot(map(lambda x:x.real,results[-1]["gr_vector_sink_x_0"][-6:])) # plt.plot(map(lambda x:x.imag,results[-1]["gr_vector_sink_x_0"][-6:])) # plt.show() berplot2(data, "Vergleich der Empfangsfilter", True, True) if(action=="noisesimu"): import filters params = {} dispatcher = TestDispatcher(4,params, reuse=False, topblock="noise_simulation") snrs=[0,20,100] for filt in ["laur","proj","gauss"]: print "running",filt var = [{"filter_taps":eval("filters."+filt)}] var = add_var(var,"noise_amplitude",snr_to_ampl(snrs)) results = dispatcher.runjob(var) data = [i["sink"] for i in results] power= [ sum(d)/len(d) for d in data ] print filt, ": ", power if(action=="default"): import filters params = {"simu_length":4000, "channel_delay":8} snrs = range(0,18+1,1) dispatcher = TestDispatcher(2,params, reuse=False) data = [] var = {"filter_taps":filters.rrcos} var = add_var(var,"noise_amplitude",snr_to_ampl(snrs, 7.97)) results = dispatcher.runjob(var) bers = [ r["ber"] for r in results ] data.append({"legend":"default", "bers":bers, "snrs":snrs}) print bers plt = berplot2(data, "BER simulation", True, True) if(action=="chantaptest"): import filters params = {"simu_length":1000, "channel_delay":0} snrs = [20] dispatcher = TestDispatcher(4,params, reuse=False) data = [] chantaps = ([1]+[0]*7)*6 var = {"filter_taps":filters.proj[16:48], "channel_taps":chantaps} var = add_var(var,"noise_amplitude",snr_to_ampl(snrs, 8)) results = dispatcher.runjob(var) bers = [ r["ber"] for r in results ] data.append({"legend":"default", "bers":bers, "snrs":snrs}) print bers import matplotlib.pyplot as plt plt.figure(None, figsize=(6,4.2), dpi=200) # important: figure size (in inches) to make it look good plt.stem(range(6),map(lambda x:x.real,results[-1]["gr_vector_sink_x_0"][-6:]),"bo-") plt.stem(range(6),map(lambda x:x.imag,results[-1]["gr_vector_sink_x_0"][-6:]),"go-") plt.show() #plt = berplot2(data, "BER simulation", True, True) if(action=="synctest"): # synchronisation test without channel import filters data = [] for delay in range(4,12+1): print "delay: %i"%delay params = {"simu_length":10000, "channel_delay":delay} snrs = range(0,18+1,1) dispatcher = TestDispatcher(4,params, reuse=False) var = {"filter_taps":filters.rrcos} var = add_var(var,"noise_amplitude",snr_to_ampl(snrs, 8)) results = dispatcher.runjob(var) bers = [ r["ber"] for r in results ] data.append({"bers":bers, "snrs":snrs}) print bers plt = berplot2(data, "Synchronisation test",) save(action) if(action=="synctest2"): import filters data = [] for delay in range(5,11+1): print "delay: %i"%delay params = {"simu_length":10000, "channel_delay":delay} channel_taps = ([1]+[0]*7)*5+[1] snrs = range(12,20+1,1) dispatcher = TestDispatcher(2,params, reuse=False) var = {"filter_taps":filters.rrcos[16:48], "channel_taps":channel_taps} var = add_var(var,"noise_amplitude",snr_to_ampl(snrs, 8*6)) results = dispatcher.runjob(var) bers = [ r["ber"] for r in results ] data.append({"bers":bers, "snrs":snrs, "legend":delay}) print bers plt = berplot2(data, "Synchronisation test", bermin=1e-6) save(action) if(action=="entzerrertest"): import filters data = [] channels_per_point = 500 #amount of different channels simulated for each plot-point for chanlen in range(1,6+1): params = {"simu_length":1000, "channel_delay":(8-chanlen)*4} # this centers the measured impulse response in the measuring window snrs = range(0,18+1,1) dispatcher = TestDispatcher(2,params, reuse=False) bers = [0.]*len(snrs) for n in range(channels_per_point): print "still chanlen: %i, channel #%i"%(chanlen, n) channel_taps = [0.]*(chanlen*8 - 7) # "allocate" vector for i in range(chanlen): #channel_taps[i*8] = cmath.rect(1.,2*math.pi*random.random()) # fill it, correctly spaced channel_taps[i*8] = complex(random.gauss(0,1),random.gauss(0,1)) # fill it, correctly spaced # calculate energy of the signal after channel import numpy #Eb = 8 * numpy.sum(numpy.absolute(numpy.convolve(filters.rrcos, channel_taps))) f = numpy.convolve(filters.laur, channel_taps) Eb = numpy.sum(numpy.multiply(numpy.conj(f),f)).real # Eb = 8 #TODO: ist das korrekt? print "Eb: %s"%Eb var = {"filter_taps":filters.rrcos[16:48], "channel_taps":channel_taps} var = add_var(var,"noise_amplitude",snr_to_ampl(snrs, Eb)) # here we need correction for the higher signal energy due to the filter results = dispatcher.runjob(var) newbers = [ r["ber"] for r in results ] bers = map(sum,zip(bers, newbers)) # accumulate bers bers = [i/channels_per_point for i in bers] print bers data.append({"legend":chanlen, "bers":bers, "snrs":snrs}) # import matplotlib.pyplot as plt # plt.figure(None, figsize=(6,4.2), dpi=200) # important: figure size (in inches) to make it look good # plt.stem(range(6),map(lambda x:x.real,results[-1]["gr_vector_sink_x_0"][-6:]),"bo-") # plt.stem(range(6),map(lambda x:x.imag,results[-1]["gr_vector_sink_x_0"][-6:]),"go-") # plt.show() # plt = berplot2(data, "Entzerrertest", True, cmdline=False) setdata(data, u"Entzerrertest mit Kanallängen 1-6") savedt(action) action=action+"-plot" if(action=="entzerrertest-plot"): filename="entzerrertest" load(filename) global savedata savedata["title"]=u"Entzerrertest mit Kanallängen 1-6" plt = berplot2() from ber_awgn import ber_awgn, ebno_awgn plt.plot(ebno_awgn, ber_awgn,"k--") plt.legend(title=u"Kanallänge") # repaints legend plt.annotate("AWGN (theoretisch)",(7.2,1e-3), rotation=-52, ha="center", va="center") # a = list(plt.axis()) # a[1] += 1 # plt.axis(a) # make some space for legend saveplt(filename) if(action=="plot"): dispatcher = TestDispatcher(4, blocksize=1000) channel = [0,0,0,1,0,0,0] snrlist = range(0,21,2) res = dispatcher.runjob(snrlist, channel, 1000) print res del dispatcher import matplotlib.pyplot as plt plt.figure(None, figsize=(6,4.2), dpi=300) # important: figure size (in inches) to make it look good plt.semilogy() plt.plot(snrlist, res) plt.grid("on") plt.axis([min(snrlist),max(snrlist),1e-5,1]) plt.title("BER/SNR ohne Mehrwegeausbreitung") plt.xlabel("SNR / dB") plt.ylabel("$p_{BE}$") plt.savefig("ber-snr_nochannel.eps") if(action=="plot_nodecorr"): dispatcher = TestDispatcher(4, blocksize=1000) channel = [0,0,0,1,0,0,0] snrlist = range(0,21) res1 = dispatcher.runjob(snrlist, channel, 2000) dispatcher = TestDispatcher(4, blocksize=1000, no_decorr=True) res2 = dispatcher.runjob(snrlist, channel, 2000) print res1 print res2 del dispatcher import matplotlib.pyplot as plt plt.figure(None, figsize=(6,4.2), dpi=300) # important: figure size (in inches) to make it look good plt.semilogy() plt.plot(snrlist, res1) plt.plot(snrlist, res2) plt.grid("on") plt.axis([min(snrlist),max(snrlist),1e-5,1]) plt.title("BER/SNR ohne Kanal, mit/ohne MLSE") plt.xlabel("SNR / dB") plt.ylabel("$p_{BE}$") plt.savefig("ber-snr_nochannel_decorrP.eps") elif(action=="plot2"): numchan=30 dispatcher = TestDispatcher(4, blocksize=100) snrlist = range(0,21) res=[] import matplotlib.pyplot as plt plt.figure(None, figsize=(6,4.2), dpi=80) # important: figure size (in inches) to make it look good plt.semilogy() plt.grid("on") plt.axis([min(snrlist),max(snrlist),1e-5,1]) plt.title(u'BER/SNR mit Echos zufälliger Phase') plt.xlabel("SNR / dB") plt.ylabel("$p_{BE}$") plt.interactive(True) for numpath in range(1,6+1): chanlist = [ ([0]*((7-numpath)/2)+ [ cmath.rect(1,2*math.pi*random.random()) for i in xrange(numpath)] + [0])[:7] for i in xrange(numchan) ] r = [0]*len(snrlist) for channel in chanlist: r = map(operator.add, r, dispatcher.runjob(snrlist, channel, 100)) r = map(lambda x:x/numchan, r) res.append(r) plt.plot(snrlist, r) plt.grid() plt.grid("on") plt.axis([min(snrlist),max(snrlist),1e-5,1]) plt.draw() print res plt.legend((u"Kanallänge 1", u"Kanallänge 2", u"Kanallänge 3", u"Kanallänge 4",u"Kanallänge 5",u"Kanallänge 6"),loc=3) plt.savefig("ber-snr_manual_channel.eps") del dispatcher elif(action=="short"): tester = Tester(blocksize=1000) tester.run(10,1000) finally: print "quitting" # del dispatcher #destructor, if it didn't happen already # clean up any further dispatchers for i in locals().values(): if isinstance(i,TestDispatcher): i.stopworkers()
gpl-3.0
5,299,925,792,086,577,000
42.314286
214
0.53252
false
3.626794
true
false
false
clawpack/clawpack-4.x
apps/tsunami/bowl-slosh/setrun.py
1
8919
""" Module to set up run time parameters for Clawpack. The values set in the function setrun are then written out to data files that will be read in by the Fortran code. """ import os from pyclaw import data #------------------------------ def setrun(claw_pkg='geoclaw'): #------------------------------ """ Define the parameters used for running Clawpack. INPUT: claw_pkg expected to be "geoclaw" for this setrun. OUTPUT: rundata - object of class ClawRunData """ assert claw_pkg.lower() == 'geoclaw', "Expected claw_pkg = 'geoclaw'" ndim = 2 rundata = data.ClawRunData(claw_pkg, ndim) #------------------------------------------------------------------ # GeoClaw specific parameters: #------------------------------------------------------------------ rundata = setgeo(rundata) # Defined below #------------------------------------------------------------------ # Standard Clawpack parameters to be written to claw.data: # (or to amr2ez.data for AMR) #------------------------------------------------------------------ clawdata = rundata.clawdata # initialized when rundata instantiated # Set single grid parameters first. # See below for AMR parameters. # --------------- # Spatial domain: # --------------- # Number of space dimensions: clawdata.ndim = ndim # Lower and upper edge of computational domain: clawdata.xlower = -2. clawdata.xupper = 2. clawdata.ylower = -2. clawdata.yupper = 2. # Number of grid cells: clawdata.mx = 41 clawdata.my = 41 # --------------- # Size of system: # --------------- # Number of equations in the system: clawdata.meqn = 3 # Number of auxiliary variables in the aux array (initialized in setaux) clawdata.maux = 3 # Index of aux array corresponding to capacity function, if there is one: clawdata.mcapa = 0 # ------------- # Initial time: # ------------- clawdata.t0 = 0.0 # ------------- # Output times: #-------------- # Specify at what times the results should be written to fort.q files. # Note that the time integration stops after the final output time. # The solution at initial time t0 is always written in addition. clawdata.outstyle = 1 if clawdata.outstyle==1: # Output nout frames at equally spaced times up to tfinal: clawdata.nout = 16 clawdata.tfinal = 4.4857014654663745 elif clawdata.outstyle == 2: # Specify a list of output times. clawdata.tout = [0.5, 1.0] # used if outstyle == 2 clawdata.nout = len(clawdata.tout) elif clawdata.outstyle == 3: # Output every iout timesteps with a total of ntot time steps: iout = 5 ntot = 50 clawdata.iout = [iout, ntot] # --------------------------------------------------- # Verbosity of messages to screen during integration: # --------------------------------------------------- # The current t, dt, and cfl will be printed every time step # at AMR levels <= verbosity. Set verbosity = 0 for no printing. # (E.g. verbosity == 2 means print only on levels 1 and 2.) clawdata.verbosity = 3 # -------------- # Time stepping: # -------------- # if dt_variable==1: variable time steps used based on cfl_desired, # if dt_variable==0: fixed time steps dt = dt_initial will always be used. clawdata.dt_variable = 1 # Initial time step for variable dt. # If dt_variable==0 then dt=dt_initial for all steps: clawdata.dt_initial = 0.0001 # Max time step to be allowed if variable dt used: clawdata.dt_max = 1e+99 # Desired Courant number if variable dt used, and max to allow without # retaking step with a smaller dt: clawdata.cfl_desired = 0.75 clawdata.cfl_max = 1.0 # Maximum number of time steps to allow between output times: clawdata.max_steps = 5000 # ------------------ # Method to be used: # ------------------ # Order of accuracy: 1 => Godunov, 2 => Lax-Wendroff plus limiters clawdata.order = 2 # Transverse order for 2d or 3d (not used in 1d): clawdata.order_trans = 2 # Number of waves in the Riemann solution: clawdata.mwaves = 3 # List of limiters to use for each wave family: # Required: len(mthlim) == mwaves clawdata.mthlim = [3,3,3] # Source terms splitting: # src_split == 0 => no source term (src routine never called) # src_split == 1 => Godunov (1st order) splitting used, # src_split == 2 => Strang (2nd order) splitting used, not recommended. clawdata.src_split = 1 # -------------------- # Boundary conditions: # -------------------- # Number of ghost cells (usually 2) clawdata.mbc = 2 # Choice of BCs at xlower and xupper: # 0 => user specified (must modify bcN.f to use this option) # 1 => extrapolation (non-reflecting outflow) # 2 => periodic (must specify this at both boundaries) # 3 => solid wall for systems where q(2) is normal velocity clawdata.mthbc_xlower = 1 clawdata.mthbc_xupper = 1 clawdata.mthbc_ylower = 1 clawdata.mthbc_yupper = 1 # --------------- # AMR parameters: # --------------- # max number of refinement levels: mxnest = 2 clawdata.mxnest = -mxnest # negative ==> anisotropic refinement in x,y,t # List of refinement ratios at each level (length at least mxnest-1) clawdata.inratx = [4,4] clawdata.inraty = [4,4] clawdata.inratt = [2,6] # Instead of setting these ratios, set: # geodata.variable_dt_refinement_ratios = True # in setgeo. # to automatically choose refinement ratios in time based on estimate # of maximum wave speed on all grids at each level. # Specify type of each aux variable in clawdata.auxtype. # This must be a list of length maux, each element of which is one of: # 'center', 'capacity', 'xleft', or 'yleft' (see documentation). clawdata.auxtype = ['center','center','yleft'] clawdata.tol = -1.0 # negative ==> don't use Richardson estimator clawdata.tolsp = 0.5 # used in default flag2refine subroutine # (Not used in geoclaw!) clawdata.kcheck = 3 # how often to regrid (every kcheck steps) clawdata.ibuff = 2 # width of buffer zone around flagged points # More AMR parameters can be set -- see the defaults in pyclaw/data.py return rundata # end of function setrun # ---------------------- #------------------- def setgeo(rundata): #------------------- """ Set GeoClaw specific runtime parameters. For documentation see .... """ try: geodata = rundata.geodata except: print "*** Error, this rundata has no geodata attribute" raise AttributeError("Missing geodata attribute") # == setgeo.data values == geodata.variable_dt_refinement_ratios = True geodata.igravity = 1 geodata.gravity = 9.81 geodata.icoordsys = 1 # == settsunami.data values == geodata.sealevel = -10. geodata.drytolerance = 1.e-3 geodata.wavetolerance = 1.e-2 geodata.depthdeep = 1.e2 geodata.maxleveldeep = 3 geodata.ifriction = 1 geodata.coeffmanning = 0. geodata.frictiondepth = 1.e6 # == settopo.data values == geodata.topofiles = [] # for topography, append lines of the form # [topotype, minlevel, maxlevel, t1, t2, fname] geodata.topofiles.append([2, 1, 10, 0., 1.e10, 'bowl.topotype2']) # == setdtopo.data values == geodata.dtopofiles = [] # for moving topography, append lines of the form: (<= 1 allowed for now!) # [minlevel,maxlevel,fname] # == setqinit.data values == geodata.iqinit = 0 geodata.qinitfiles = [] # for qinit perturbations, append lines of the form: (<= 1 allowed for now!) # [minlev, maxlev, fname] # == setregions.data values == geodata.regions = [] # to specify regions of refinement append lines of the form # [minlevel,maxlevel,t1,t2,x1,x2,y1,y2] # == setgauges.data values == geodata.gauges = [] # for gauges append lines of the form [gaugeno, x, y, t1, t2] # == setfixedgrids.data values == geodata.fixedgrids = [] # for fixed grids append lines of the form # [t1,t2,noutput,x1,x2,y1,y2,xpoints,ypoints,\ # ioutarrivaltimes,ioutsurfacemax] #geodata.fixedgrids.append([1., 2., 4, 0., 100., 0., 100., 11, 11, 0, 0]) return rundata # end of function setgeo # ---------------------- if __name__ == '__main__': # Set up run-time parameters and write all data files. import sys if len(sys.argv) == 2: rundata = setrun(sys.argv[1]) else: rundata = setrun() rundata.write()
bsd-3-clause
-2,397,011,731,965,025,000
26.358896
80
0.575737
false
3.542097
false
false
false
zsjohny/jumpserver
apps/users/views/profile/pubkey.py
1
1689
# ~*~ coding: utf-8 ~*~ from django.http import HttpResponse from django.urls import reverse_lazy from django.utils.translation import ugettext as _ from django.views import View from django.views.generic.edit import UpdateView from common.utils import get_logger, ssh_key_gen from common.permissions import ( PermissionsMixin, IsValidUser, UserCanUpdateSSHKey, ) from ... import forms from ...models import User __all__ = [ 'UserPublicKeyUpdateView', 'UserPublicKeyGenerateView', ] logger = get_logger(__name__) class UserPublicKeyUpdateView(PermissionsMixin, UpdateView): template_name = 'users/user_pubkey_update.html' model = User form_class = forms.UserPublicKeyForm permission_classes = [IsValidUser, UserCanUpdateSSHKey] success_url = reverse_lazy('users:user-profile') def get_object(self, queryset=None): return self.request.user def get_context_data(self, **kwargs): context = { 'app': _('Users'), 'action': _('Public key update'), } kwargs.update(context) return super().get_context_data(**kwargs) class UserPublicKeyGenerateView(PermissionsMixin, View): permission_classes = [IsValidUser] def get(self, request, *args, **kwargs): username = request.user.username private, public = ssh_key_gen(username=username, hostname='jumpserver') request.user.public_key = public request.user.save() response = HttpResponse(private, content_type='text/plain') filename = "{0}-jumpserver.pem".format(username) response['Content-Disposition'] = 'attachment; filename={}'.format(filename) return response
gpl-2.0
-3,068,625,369,736,953,300
30.277778
84
0.686797
false
3.992908
false
false
false
coolhacks/python-hacks
examples/codebreaker/vigenereDictionaryHacker.py
1
1270
# Vigenere Cipher Dictionary Hacker # http://inventwithpython.com/hacking (BSD Licensed) import detectEnglish, vigenereCipher, pyperclip def main(): ciphertext = """Tzx isnz eccjxkg nfq lol mys bbqq I lxcz.""" hackedMessage = hackVigenere(ciphertext) if hackedMessage != None: print('Copying hacked message to clipboard:') print(hackedMessage) pyperclip.copy(hackedMessage) else: print('Failed to hack encryption.') def hackVigenere(ciphertext): fo = open('dictionary.txt') words = fo.readlines() fo.close() for word in words: word = word.strip() # remove the newline at the end decryptedText = vigenereCipher.decryptMessage(word, ciphertext) if detectEnglish.isEnglish(decryptedText, wordPercentage=40): # Check with user to see if the decrypted key has been found. print() print('Possible encryption break:') print('Key ' + str(word) + ': ' + decryptedText[:100]) print() print('Enter D for done, or just press Enter to continue breaking:') response = input('> ') if response.upper().startswith('D'): return decryptedText if __name__ == '__main__': main()
mit
432,503,798,185,404,800
31.564103
80
0.624409
false
3.956386
false
false
false
denys-duchier/kivy
kivy/uix/dropdown.py
1
11698
''' Drop-Down List ============== .. versionadded:: 1.4.0 A versatile drop-down list that can be used with custom widgets. It allows you to display a list of widgets under a displayed widget. Unlike other toolkits, the list of widgets can contain any type of widget: simple buttons, images etc. The positioning of the drop-down list is fully automatic: we will always try to place the dropdown list in a way that the user can select an item in the list. Basic example ------------- A button with a dropdown list of 10 possible values. All the buttons within the dropdown list will trigger the dropdown :meth:`DropDown.select` method. After being called, the main button text will display the selection of the dropdown. :: from kivy.uix.dropdown import DropDown from kivy.uix.button import Button from kivy.base import runTouchApp # create a dropdown with 10 buttons dropdown = DropDown() for index in range(10): # when adding widgets, we need to specify the height manually (disabling # the size_hint_y) so the dropdown can calculate the area it needs. btn = Button(text='Value %d' % index, size_hint_y=None, height=44) # for each button, attach a callback that will call the select() method # on the dropdown. We'll pass the text of the button as the data of the # selection. btn.bind(on_release=lambda btn: dropdown.select(btn.text)) # then add the button inside the dropdown dropdown.add_widget(btn) # create a big main button mainbutton = Button(text='Hello', size_hint=(None, None)) # show the dropdown menu when the main button is released # note: all the bind() calls pass the instance of the caller (here, the # mainbutton instance) as the first argument of the callback (here, # dropdown.open.). mainbutton.bind(on_release=dropdown.open) # one last thing, listen for the selection in the dropdown list and # assign the data to the button text. dropdown.bind(on_select=lambda instance, x: setattr(mainbutton, 'text', x)) runTouchApp(mainbutton) Extending dropdown in Kv ------------------------ You could create a dropdown directly from your kv:: #:kivy 1.4.0 <CustomDropDown>: Button: text: 'My first Item' size_hint_y: None height: 44 on_release: root.select('item1') Label: text: 'Unselectable item' size_hint_y: None height: 44 Button: text: 'My second Item' size_hint_y: None height: 44 on_release: root.select('item2') And then, create the associated python class and use it:: class CustomDropDown(DropDown): pass dropdown = CustomDropDown() mainbutton = Button(text='Hello', size_hint=(None, None)) mainbutton.bind(on_release=dropdown.open) dropdown.bind(on_select=lambda instance, x: setattr(mainbutton, 'text', x)) ''' __all__ = ('DropDown', ) from kivy.uix.scrollview import ScrollView from kivy.properties import ObjectProperty, NumericProperty, BooleanProperty from kivy.core.window import Window from kivy.lang import Builder _grid_kv = ''' GridLayout: size_hint_y: None height: self.minimum_size[1] cols: 1 ''' class DropDownException(Exception): '''DropDownException class. ''' pass class DropDown(ScrollView): '''DropDown class. See module documentation for more information. :Events: `on_select`: data Fired when a selection is done. The data of the selection is passed in as the first argument and is what you pass in the :meth:`select` method as the first argument. `on_dismiss`: .. versionadded:: 1.8.0 Fired when the DropDown is dismissed, either on selection or on touching outside the widget. ''' auto_width = BooleanProperty(True) '''By default, the width of the dropdown will be the same as the width of the attached widget. Set to False if you want to provide your own width. ''' max_height = NumericProperty(None, allownone=True) '''Indicate the maximum height that the dropdown can take. If None, it will take the maximum height available until the top or bottom of the screen is reached. :attr:`max_height` is a :class:`~kivy.properties.NumericProperty` and defaults to None. ''' dismiss_on_select = BooleanProperty(True) '''By default, the dropdown will be automatically dismissed when a selection has been done. Set to False to prevent the dismiss. :attr:`dismiss_on_select` is a :class:`~kivy.properties.BooleanProperty` and defaults to True. ''' auto_dismiss = BooleanProperty(True) '''By default, the dropdown will be automatically dismissed when a touch happens outside of it, this option allow to disable this feature :attr:`auto_dismiss` is a :class:`~kivy.properties.BooleanProperty` and defaults to True. .. versionadded:: 1.8.0 ''' attach_to = ObjectProperty(allownone=True) '''(internal) Property that will be set to the widget to which the drop down list is attached. The :meth:`open` method will automatically set this property whilst :meth:`dismiss` will set it back to None. ''' container = ObjectProperty() '''(internal) Property that will be set to the container of the dropdown list. It is a :class:`~kivy.uix.gridlayout.GridLayout` by default. ''' __events__ = ('on_select', 'on_dismiss') def __init__(self, **kwargs): self._win = None if 'container' not in kwargs: c = self.container = Builder.load_string(_grid_kv) else: c = None kwargs.setdefault('do_scroll_x', False) if 'size_hint' not in kwargs: kwargs.setdefault('size_hint_x', None) kwargs.setdefault('size_hint_y', None) super(DropDown, self).__init__(**kwargs) if c is not None: super(DropDown, self).add_widget(c) self.on_container(self, c) Window.bind(on_key_down=self.on_key_down) self.fast_bind('size', self._reposition) def on_key_down(self, instance, key, scancode, codepoint, modifiers): if key == 27 and self.get_parent_window(): self.dismiss() return True def on_container(self, instance, value): if value is not None: self.container.bind(minimum_size=self._container_minimum_size) def open(self, widget): '''Open the dropdown list and attach it to a specific widget. Depending on the position of the widget within the window and the height of the dropdown, the dropdown might be above or below that widget. ''' # ensure we are not already attached if self.attach_to is not None: self.dismiss() # we will attach ourself to the main window, so ensure the # widget we are looking for have a window self._win = widget.get_parent_window() if self._win is None: raise DropDownException( 'Cannot open a dropdown list on a hidden widget') self.attach_to = widget widget.bind(pos=self._reposition, size=self._reposition) self._reposition() # attach ourself to the main window self._win.add_widget(self) def dismiss(self, *largs): '''Remove the dropdown widget from the window and detach it from the attached widget. ''' if self.parent: self.parent.remove_widget(self) if self.attach_to: self.attach_to.unbind(pos=self._reposition, size=self._reposition) self.attach_to = None self.dispatch('on_dismiss') def on_dismiss(self): pass def select(self, data): '''Call this method to trigger the `on_select` event with the `data` selection. The `data` can be anything you want. ''' self.dispatch('on_select', data) if self.dismiss_on_select: self.dismiss() def on_select(self, data): pass def _container_minimum_size(self, instance, size): if self.max_height: self.height = min(size[1], self.max_height) self.do_scroll_y = size[1] > self.max_height else: self.height = size[1] self.do_scroll_y = True def add_widget(self, *largs): if self.container: return self.container.add_widget(*largs) return super(DropDown, self).add_widget(*largs) def remove_widget(self, *largs): if self.container: return self.container.remove_widget(*largs) return super(DropDown, self).remove_widget(*largs) def clear_widgets(self): if self.container: return self.container.clear_widgets() return super(DropDown, self).clear_widgets() def on_touch_down(self, touch): if super(DropDown, self).on_touch_down(touch): return True if self.collide_point(*touch.pos): return True if self.attach_to and self.attach_to.collide_point(*touch.pos): return True if self.auto_dismiss: self.dismiss() def on_touch_up(self, touch): if super(DropDown, self).on_touch_up(touch): return True if 'button' in touch.profile and touch.button.startswith('scroll'): return if self.auto_dismiss: self.dismiss() def _reposition(self, *largs): # calculate the coordinate of the attached widget in the window # coordinate system win = self._win widget = self.attach_to if not widget or not win: return wx, wy = widget.to_window(*widget.pos) wright, wtop = widget.to_window(widget.right, widget.top) # set width and x if self.auto_width: self.width = wright - wx # ensure the dropdown list doesn't get out on the X axis, with a # preference to 0 in case the list is too wide. x = wx if x + self.width > win.width: x = win.width - self.width if x < 0: x = 0 self.x = x # determine if we display the dropdown upper or lower to the widget h_bottom = wy - self.height h_top = win.height - (wtop + self.height) if h_bottom > 0: self.top = wy elif h_top > 0: self.y = wtop else: # none of both top/bottom have enough place to display the # widget at the current size. Take the best side, and fit to # it. height = max(h_bottom, h_top) if height == h_bottom: self.top = wy self.height = wy else: self.y = wtop self.height = win.height - wtop if __name__ == '__main__': from kivy.uix.button import Button from kivy.base import runTouchApp def show_dropdown(button, *largs): dp = DropDown() dp.bind(on_select=lambda instance, x: setattr(button, 'text', x)) for i in range(10): item = Button(text='hello %d' % i, size_hint_y=None, height=44) item.bind(on_release=lambda btn: dp.select(btn.text)) dp.add_widget(item) dp.open(button) def touch_move(instance, touch): instance.center = touch.pos btn = Button(text='SHOW', size_hint=(None, None), pos=(300, 200)) btn.bind(on_release=show_dropdown, on_touch_move=touch_move) runTouchApp(btn)
mit
86,564,783,490,734,420
32.232955
80
0.617712
false
3.949359
false
false
false
praw-dev/praw
praw/models/subreddits.py
1
6211
"""Provide the Subreddits class.""" from typing import TYPE_CHECKING, Dict, Iterator, List, Optional, Union from warnings import warn from ..const import API_PATH from . import Subreddit from .base import PRAWBase from .listing.generator import ListingGenerator from .util import stream_generator if TYPE_CHECKING: # pragma: no cover from ... import praw class Subreddits(PRAWBase): """Subreddits is a Listing class that provides various subreddit lists.""" @staticmethod def _to_list(subreddit_list): return ",".join([str(x) for x in subreddit_list]) def default( self, **generator_kwargs: Union[str, int, Dict[str, str]] ) -> Iterator["praw.models.Subreddit"]: """Return a :class:`.ListingGenerator` for default subreddits. Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. """ return ListingGenerator( self._reddit, API_PATH["subreddits_default"], **generator_kwargs ) def gold(self, **generator_kwargs) -> Iterator["praw.models.Subreddit"]: """Alias for :meth:`.premium` to maintain backwards compatibility.""" warn( "`subreddits.gold` has be renamed to `subreddits.premium`.", category=DeprecationWarning, stacklevel=2, ) return self.premium(**generator_kwargs) def premium( self, **generator_kwargs: Union[str, int, Dict[str, str]] ) -> Iterator["praw.models.Subreddit"]: """Return a :class:`.ListingGenerator` for premium subreddits. Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. """ return ListingGenerator( self._reddit, API_PATH["subreddits_gold"], **generator_kwargs ) def new( self, **generator_kwargs: Union[str, int, Dict[str, str]] ) -> Iterator["praw.models.Subreddit"]: """Return a :class:`.ListingGenerator` for new subreddits. Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. """ return ListingGenerator( self._reddit, API_PATH["subreddits_new"], **generator_kwargs ) def popular( self, **generator_kwargs: Union[str, int, Dict[str, str]] ) -> Iterator["praw.models.Subreddit"]: """Return a :class:`.ListingGenerator` for popular subreddits. Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. """ return ListingGenerator( self._reddit, API_PATH["subreddits_popular"], **generator_kwargs ) def recommended( self, subreddits: List[Union[str, "praw.models.Subreddit"]], omit_subreddits: Optional[List[Union[str, "praw.models.Subreddit"]]] = None, ) -> List["praw.models.Subreddit"]: """Return subreddits recommended for the given list of subreddits. :param subreddits: A list of Subreddit instances and/or subreddit names. :param omit_subreddits: A list of Subreddit instances and/or subreddit names to exclude from the results (Reddit's end may not work as expected). """ if not isinstance(subreddits, list): raise TypeError("subreddits must be a list") if omit_subreddits is not None and not isinstance(omit_subreddits, list): raise TypeError("omit_subreddits must be a list or None") params = {"omit": self._to_list(omit_subreddits or [])} url = API_PATH["sub_recommended"].format(subreddits=self._to_list(subreddits)) return [ Subreddit(self._reddit, sub["sr_name"]) for sub in self._reddit.get(url, params=params) ] def search( self, query: str, **generator_kwargs: Union[str, int, Dict[str, str]] ) -> Iterator["praw.models.Subreddit"]: """Return a :class:`.ListingGenerator` of subreddits matching ``query``. Subreddits are searched by both their title and description. :param query: The query string to filter subreddits by. Additional keyword arguments are passed in the initialization of :class:`.ListingGenerator`. .. seealso:: :meth:`~.search_by_name` to search by subreddit names """ self._safely_add_arguments(generator_kwargs, "params", q=query) return ListingGenerator( self._reddit, API_PATH["subreddits_search"], **generator_kwargs ) def search_by_name( self, query: str, include_nsfw: bool = True, exact: bool = False ) -> List["praw.models.Subreddit"]: """Return list of Subreddits whose names begin with ``query``. :param query: Search for subreddits beginning with this string. :param include_nsfw: Include subreddits labeled NSFW (default: True). :param exact: Return only exact matches to ``query`` (default: False). """ result = self._reddit.post( API_PATH["subreddits_name_search"], data={"include_over_18": include_nsfw, "exact": exact, "query": query}, ) return [self._reddit.subreddit(x) for x in result["names"]] def search_by_topic(self, query: str) -> List["praw.models.Subreddit"]: """Return list of Subreddits whose topics match ``query``. :param query: Search for subreddits relevant to the search topic. .. note:: As of 09/01/2020, this endpoint always returns 404. """ result = self._reddit.get( API_PATH["subreddits_by_topic"], params={"query": query} ) return [self._reddit.subreddit(x["name"]) for x in result if x.get("name")] def stream( self, **stream_options: Union[str, int, Dict[str, str]] ) -> Iterator["praw.models.Subreddit"]: """Yield new subreddits as they are created. Subreddits are yielded oldest first. Up to 100 historical subreddits will initially be returned. Keyword arguments are passed to :func:`.stream_generator`. """ return stream_generator(self.new, **stream_options)
bsd-2-clause
-2,262,619,277,718,906,000
35.321637
87
0.628884
false
4.105089
false
false
false
jomyhuang/sdwle
SDWLE/agents/trade/possible_play.py
1
6263
from SDWLE.agents.trade.util import Util from functools import reduce class PossiblePlay: def __init__(self, cards, available_mana): if len(cards) == 0: raise Exception("PossiblePlay cards is empty") self.cards = cards self.available_mana = available_mana def card_mana(self): def eff_mana(card): if card.name == "The Coin": return -1 else: return card.mana_cost() return reduce(lambda s, c: s + eff_mana(c), self.cards, 0) def sorted_mana(self): return Util.reverse_sorted(map(lambda c: c.mana_cost(), self.cards)) def wasted(self): return self.available_mana - self.card_mana() def value(self): res = self.card_mana() wasted = self.wasted() if wasted < 0: raise Exception("Too Much Mana") res += wasted * -100000000000 factor = 100000000 for card_mana in self.sorted_mana(): res += card_mana * factor factor = factor / 10 if self.has_hero_power() and self.available_mana < 6: res -= 10000000000000000 if any(map(lambda c: c.name == "The Coin", self.cards)): res -= 100 return res def has_hero_power(self): for card in self.cards: if card.name == 'Hero Power': return True return False def first_card(self): if self.has_hero_power(): for card in self.cards: if card.name == 'Hero Power': return card raise Exception("bad") else: return self.cards[0] def __str__(self): names = [c.name for c in self.cards] s = str(names) return "{} {}".format(s, self.value()) class CoinPlays: def coin(self): cards = [c for c in filter(lambda c: c.name == 'The Coin', self.cards)] return cards[0] def raw_plays_with_coin(self): res = [] if self.has_coin(): coinPlays = self.after_coin().raw_plays() for play in coinPlays: cards = [self.coin()] + play res.append(cards) return res def raw_plays(self): res = [] for play in self.raw_plays_without_coin(): res.append(play) for play in self.raw_plays_with_coin(): res.append(play) return res def has_coin(self): return any(map(lambda c: c.name == "The Coin", self.cards)) def cards_without_coin(self): return Util.filter_out_one(self.cards, lambda c: c.name == "The Coin") def after_coin(self): return PossiblePlays(self.cards_without_coin(), self.mana + 1) def without_coin(self): return PossiblePlays(self.cards_without_coin(), self.mana) class HeroPowerCard: def __init__(self): self.mana = 2 self.name = "Hero Power" self.player = None def can_use(self, player, game): return True def mana_cost(self): return 2 class PossiblePlays(CoinPlays): def __init__(self, cards, mana, allow_hero_power=True): self.cards = cards self.mana = mana self.allow_hero_power = allow_hero_power def possible_is_pointless_coin(self, possible): if len(possible) != 1 or possible[0].name != "The Coin": return False cards_playable_after_coin = [card for card in filter(lambda c: c.mana - 1 == self.mana, self.cards)] return len(cards_playable_after_coin) == 0 def raw_plays_without_coin(self): res = [] def valid_card(card): saved_mana = card.player.mana card.player.mana = self.mana usable = card.can_use(card.player, card.player.game) card.player.mana = saved_mana return usable possible = [card for card in filter(valid_card, self.cards)] if self.possible_is_pointless_coin(possible): possible = [] if self.mana >= 2 and self.allow_hero_power: possible.append(HeroPowerCard()) if len(possible) == 0: return [[]] for card in possible: rest = self.cards[0:99999] if card.name == 'Hero Power': f_plays = PossiblePlays(rest, self.mana - card.mana_cost(), allow_hero_power=False).raw_plays() else: rest.remove(card) f_plays = PossiblePlays(rest, self.mana - card.mana_cost(), allow_hero_power=self.allow_hero_power).raw_plays() for following_play in f_plays: combined = [card] + following_play res.append(combined) res = Util.uniq_by_sorted(res) return res def plays_inner(self): res = [PossiblePlay(raw, self.mana) for raw in self.raw_plays() if len(raw) > 0] res = sorted(res, key=PossiblePlay.value) res.reverse() return res def plays(self): return self.plays_inner() def __str__(self): res = [] for play in self.plays(): res.append(play.__str__()) return str.join("\n", res) class PlayMixin: def play_one_card(self, player): if len(player.minions) == 7: return if player.game.game_ended: return allow_hero_power = (not player.hero.power.used) and player.hero.health > 2 plays = PossiblePlays(player.hand, player.mana, allow_hero_power=allow_hero_power).plays() if len(plays) > 0: play = plays[0] if len(play.cards) == 0: raise Exception("play has no cards") card = play.first_card() if card.name == 'Hero Power': player.hero.power.use() else: self.last_card_played = card player.game.play_card(card) return card def play_cards(self, player): card = self.play_one_card(player) if card: self.play_cards(player)
mit
-9,081,206,652,314,761,000
27.085202
108
0.531055
false
3.809611
false
false
false
strummerTFIU/TFG-IsometricMaps
LAStools/ArcGIS_toolbox/scripts_production/las2demPro.py
1
8098
# # las2demPro.py # # (c) 2013, martin isenburg - http://rapidlasso.com # rapidlasso GmbH - fast tools to catch reality # # uses las2dem.exe to raster a folder of LiDAR files # # LiDAR input: LAS/LAZ/BIN/TXT/SHP/BIL/ASC/DTM # raster output: BIL/ASC/IMG/TIF/DTM/PNG/JPG # # for licensing see http://lastools.org/LICENSE.txt # import sys, os, arcgisscripting, subprocess def check_output(command,console): if console == True: process = subprocess.Popen(command) else: process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) output,error = process.communicate() returncode = process.poll() return returncode,output ### create the geoprocessor object gp = arcgisscripting.create(9.3) ### report that something is happening gp.AddMessage("Starting las2dem production ...") ### get number of arguments argc = len(sys.argv) ### report arguments (for debug) #gp.AddMessage("Arguments:") #for i in range(0, argc): # gp.AddMessage("[" + str(i) + "]" + sys.argv[i]) ### get the path to LAStools lastools_path = os.path.dirname(os.path.dirname(os.path.dirname(sys.argv[0]))) ### make sure the path does not contain spaces if lastools_path.count(" ") > 0: gp.AddMessage("Error. Path to .\\lastools installation contains spaces.") gp.AddMessage("This does not work: " + lastools_path) gp.AddMessage("This would work: C:\\software\\lastools") sys.exit(1) ### complete the path to where the LAStools executables are lastools_path = lastools_path + "\\bin" ### check if path exists if os.path.exists(lastools_path) == False: gp.AddMessage("Cannot find .\\lastools\\bin at " + lastools_path) sys.exit(1) else: gp.AddMessage("Found " + lastools_path + " ...") ### create the full path to the las2dem executable las2dem_path = lastools_path+"\\las2dem.exe" ### check if executable exists if os.path.exists(lastools_path) == False: gp.AddMessage("Cannot find las2dem.exe at " + las2dem_path) sys.exit(1) else: gp.AddMessage("Found " + las2dem_path + " ...") ### create the command string for las2dem.exe command = ['"'+las2dem_path+'"'] ### maybe use '-verbose' option if sys.argv[argc-1] == "true": command.append("-v") ### counting up the arguments c = 1 ### add input LiDAR wildcards = sys.argv[c+1].split() for wildcard in wildcards: command.append("-i") command.append('"' + sys.argv[c] + "\\" + wildcard + '"') c = c + 2 ### maybe use a user-defined step size if sys.argv[c] != "1": command.append("-step") command.append(sys.argv[c].replace(",",".")) c = c + 1 ### maybe use a user-defined kill if sys.argv[c] != "100": command.append("-kill") command.append(sys.argv[c].replace(",",".")) c = c + 1 ### what should we raster if sys.argv[c] == "slope": command.append("-slope") elif sys.argv[c] == "intensity": command.append("-intensity") elif sys.argv[c] == "rgb": command.append("-rgb") c = c + 1 ### what should we output if sys.argv[c] == "hillshade": command.append("-hillshade") elif sys.argv[c] == "gray ramp": command.append("-gray") elif sys.argv[c] == "false colors": command.append("-false") ### do we have special lighting for hillshade if sys.argv[c] == "hillshade": if (sys.argv[c+1] != "north east") or (sys.argv[c+2] != "1 pm"): command.append("-light") if sys.argv[c+1] == "north": command.append("0") command.append("1.41421") elif sys.argv[c+1] == "south": command.append("0") command.append("-1.41421") elif sys.argv[c+1] == "east": command.append("1.41421") command.append("0") elif sys.argv[c+1] == "west": command.append("-1.41421") command.append("0") elif sys.argv[c+1] == "north east": command.append("1") command.append("1") elif sys.argv[c+1] == "south east": command.append("1") command.append("-1") elif sys.argv[c+1] == "north west": command.append("-1") command.append("1") else: ### if sys.argv[c+1] == "south west" command.append("-1") command.append("-1") if sys.argv[c+2] == "noon": command.append("100") elif sys.argv[c+2] == "1 pm": command.append("2") elif sys.argv[c+2] == "3 pm": command.append("1") elif sys.argv[c+2] == "6 pm": command.append("0.5") else: ### if sys.argv[c+2] == "9 pm" command.append("0.1") ### do we have a min max value for colors if (sys.argv[c] == "gray ramp") or (sys.argv[c] == "false colors"): if (sys.argv[c+3] != "#") and (sys.argv[c+4] != "#"): command.append("-set_min_max") command.append(sys.argv[c+3].replace(",",".")) command.append(sys.argv[c+4].replace(",",".")) c = c + 5 ### what should we triangulate if sys.argv[c] == "ground points only": command.append("-keep_class") command.append("2") command.append("-extra_pass") elif sys.argv[c] == "ground and keypoints": command.append("-keep_class") command.append("2") command.append("8") command.append("-extra_pass") elif sys.argv[c] == "ground and buildings": command.append("-keep_class") command.append("2") command.append("6") command.append("-extra_pass") elif sys.argv[c] == "ground and vegetation": command.append("-keep_class") command.append("2") command.append("3") command.append("4") command.append("5") command.append("-extra_pass") elif sys.argv[c] == "ground and objects": command.append("-keep_class") command.append("2") command.append("3") command.append("4") command.append("5") command.append("6") command.append("-extra_pass") elif sys.argv[c] == "last return only": command.append("-last_only") command.append("-extra_pass") elif sys.argv[c] == "first return only": command.append("-first_only") command.append("-extra_pass") c = c + 1 ### should we use the tile bounding box if sys.argv[c] == "true": command.append("-use_tile_bb") c = c + 1 ### do we have lakes if sys.argv[c] != "#": command.append("-lakes") command.append('"'+sys.argv[c]+'"') c = c + 1 ### do we have creeks if sys.argv[c] != "#": command.append("-creeks") command.append('"'+sys.argv[c]+'"') c = c + 1 ### maybe an output format was selected if sys.argv[c] != "#": command.append("-o" + sys.argv[c]) c = c + 1 ### maybe an output directory was selected if sys.argv[c] != "#": command.append("-odir") command.append('"'+sys.argv[c]+'"') c = c + 1 ### maybe an output appendix was selected if sys.argv[c] != "#": command.append("-odix") command.append('"'+sys.argv[c]+'"') c = c + 1 ### maybe we should run on multiple cores if sys.argv[c] != "1": command.append("-cores") command.append(sys.argv[c]) c = c + 1 ### maybe there are additional input options if sys.argv[c] != "#": additional_options = sys.argv[c].split() for option in additional_options: command.append(option) ### report command string gp.AddMessage("LAStools command line:") command_length = len(command) command_string = str(command[0]) command[0] = command[0].strip('"') for i in range(1, command_length): command_string = command_string + " " + str(command[i]) command[i] = command[i].strip('"') gp.AddMessage(command_string) ### run command returncode,output = check_output(command, False) ### report output of las2dem gp.AddMessage(str(output)) ### check return code if returncode != 0: gp.AddMessage("Error. las2dem failed.") sys.exit(1) ### report happy end gp.AddMessage("Success. las2dem done.")
mit
-6,832,320,602,952,765,000
28.104089
130
0.587059
false
3.131477
false
false
false
mozilla/kuma
kuma/users/adapters.py
1
13369
from allauth.account.adapter import DefaultAccountAdapter, get_adapter from allauth.account.models import EmailAddress from allauth.account.utils import cleanup_email_addresses from allauth.exceptions import ImmediateHttpResponse from allauth.socialaccount.adapter import DefaultSocialAccountAdapter from allauth.socialaccount.models import SocialLogin from django import forms from django.contrib import messages from django.contrib.auth import get_user_model from django.db.models import Q from django.shortcuts import redirect, render from django.utils.cache import add_never_cache_headers from django.utils.translation import ugettext_lazy as _ from waffle import switch_is_active from kuma.core.urlresolvers import reverse from .constants import USERNAME_CHARACTERS, USERNAME_REGEX from .models import UserBan REMOVE_BUG_URL = ( 'https://bugzilla.mozilla.org/enter_bug.cgi?' '&product=developer.mozilla.org' '&component=User%20management' '&short_desc=Account%20deletion%20request%20for%20[username]' '&comment=Please%20delete%20my%20MDN%20account.%20My%20username%20is%3A' '%0D%0A%0D%0A[username]' '&status_whiteboard=[account-mod]' '&defined_groups=1' '&groups=mozilla-employee-confidential') REMOVE_MESSAGE = _("Sorry, you must have at least one connected account so " "you can sign in. To disconnect this account connect a " "different one first. To delete your MDN profile please " '<a href="%(bug_form_url)s" rel="nofollow">file a bug</a>.') USERNAME_EMAIL = _('An email address cannot be used as a username.') class KumaAccountAdapter(DefaultAccountAdapter): def is_open_for_signup(self, request): """ We disable the signup with regular accounts as we require GitHub (for now) """ return False def clean_username(self, username): """ When signing up make sure the username isn't already used by a different user, and doesn't contain invalid characters. """ # We have stricter username requirements than django-allauth, # because we don't want to allow '@' in usernames. So we check # that before calling super() to make sure we catch those # problems and show our error messages. if '@' in username: raise forms.ValidationError(USERNAME_EMAIL) if not USERNAME_REGEX.match(username): raise forms.ValidationError(USERNAME_CHARACTERS) username = super(KumaAccountAdapter, self).clean_username(username) if get_user_model().objects.filter(username=username).exists(): raise forms.ValidationError(_('The username you entered ' 'already exists.')) return username def message_templates(self, *names): return tuple('messages/%s.txt' % name for name in names) def add_message(self, request, level, message_template, message_context={}, extra_tags='', *args, **kwargs): """ Adds an extra "account" tag to the success and error messages. """ # let's ignore some messages if message_template.endswith(self.message_templates('logged_in', 'logged_out')): return # promote the "account_connected" message to success if message_template.endswith(self.message_templates('account_connected')): level = messages.SUCCESS # when a next URL is set because of a multi step sign-in # (e.g. sign-in with github, verified mail is found in other # social accounts, agree to first log in with other to connect # instead) and the next URL is not the edit profile page (which # would indicate the start of the sign-in process from the edit # profile page) we ignore the message "account connected" message # as it would be misleading # Bug 1229906#c2 - need from "create new account" page user_url = reverse('users.user_edit', kwargs={'username': request.user.username}) next_url = request.session.get('sociallogin_next_url', None) if next_url != user_url: return # and add an extra tag to the account messages extra_tag = 'account' if extra_tags: extra_tags += ' ' extra_tags += extra_tag super(KumaAccountAdapter, self).add_message(request, level, message_template, message_context, extra_tags, *args, **kwargs) def save_user(self, request, user, form, commit=True): super(KumaAccountAdapter, self).save_user(request, user, form, commit=False) is_github_url_public = form.cleaned_data.get('is_github_url_public') user.is_github_url_public = is_github_url_public if commit: # pragma: no cover # commit will be True, unless extended by a derived class user.save() return user class KumaSocialAccountAdapter(DefaultSocialAccountAdapter): def is_open_for_signup(self, request, sociallogin): """ We specifically enable social accounts as a way to signup because the default adapter uses the account adpater above as the default. """ allowed = True if switch_is_active('registration_disabled'): allowed = False # bug 1291892: Don't confuse next login with connecting accounts if not allowed: for key in ('socialaccount_sociallogin', 'sociallogin_provider'): try: del request.session[key] except KeyError: # pragma: no cover pass return allowed def validate_disconnect(self, account, accounts): """ Validate whether or not the socialaccount account can be safely disconnected. """ if len(accounts) == 1: raise forms.ValidationError(REMOVE_MESSAGE % {'bug_form_url': REMOVE_BUG_URL}) def pre_social_login(self, request, sociallogin): """ Invoked just after a user successfully authenticates via a social provider, but before the login is actually processed. We use it to: 1. Check if the user is connecting accounts via signup page 2. store the name of the socialaccount provider in the user's session. TODO: When legacy Persona sessions are cleared (Nov 1 2016), this function can be simplified. """ session_login_data = request.session.get('socialaccount_sociallogin', None) request_login = sociallogin # Is there already a sociallogin_provider in the session? if session_login_data: session_login = SocialLogin.deserialize(session_login_data) # If the provider in the session is different from the provider in the # request, the user is connecting a new provider to an existing account if session_login.account.provider != request_login.account.provider: # Does the request sociallogin match an existing user? if not request_login.is_existing: # go straight back to signup page with an error message # BEFORE allauth over-writes the session sociallogin level = messages.ERROR message = "socialaccount/messages/account_not_found.txt" get_adapter().add_message(request, level, message) raise ImmediateHttpResponse( redirect('socialaccount_signup') ) # Is the user banned? if sociallogin.is_existing: bans = UserBan.objects.filter(user=sociallogin.user, is_active=True) if bans.exists(): banned_response = render(request, 'users/user_banned.html', { 'bans': bans, 'path': request.path }) add_never_cache_headers(banned_response) raise ImmediateHttpResponse(banned_response) # sociallogin_provider is used in the UI to indicate what method was # used to login to the website. The session variable # 'socialaccount_sociallogin' has the same data, but will be dropped at # the end of login. request.session['sociallogin_provider'] = (sociallogin .account.provider) request.session.modified = True def get_connect_redirect_url(self, request, socialaccount): """ Returns the default URL to redirect to after successfully connecting a social account. """ assert request.user.is_authenticated user_url = reverse('users.user_edit', kwargs={'username': request.user.username}) return user_url def save_user(self, request, sociallogin, form=None): """ Checks for an existing user (via verified email addresses within the social login object) and, if one is found, associates the incoming social account with that existing user instead of a new user. It also removes the "socialaccount_sociallogin" key from the session. If the "socialaccount_sociallogin" key remains in the session, then the user will be unable to connect a second account unless they log out and log in again. (TODO: Check if this part of the method is still needed/used. I suspect not.) """ # We have to call get_existing_user() again. The result of the earlier # call (within the is_auto_signup_allowed() method), can't be cached as # an attribute on the instance because a different instance of this # class is used when calling this method from the one used when calling # is_auto_signup_allowed(). user = get_existing_user(sociallogin) if user: # We can re-use an existing user instead of creating a new one. # Let's guarantee this user has an unusable password, just in case # we're recovering an old user that has never had this done before. user.set_unusable_password() # This associates this new social account with the existing user. sociallogin.connect(request, user) # Since the "connect" call above does not add any email addresses # from the social login that are missing from the user's current # associated set, let's add them here. add_user_email(request, user, sociallogin.email_addresses) # Now that we've successfully associated a GitHub/Google social # account with this existing user, let's delete all of the user's # associated Persona social accounts (if any). Users may have # multiple associated Persona social accounts (each identified # by a unique email address). user.socialaccount_set.filter(provider='persona').delete() else: user = super().save_user(request, sociallogin, form) try: del request.session['socialaccount_sociallogin'] except KeyError: # pragma: no cover pass return user def is_auto_signup_allowed(self, request, sociallogin): """ We allow "auto-signup" (basically skipping the sign-up form page) only if there is an existing user that we can re-use instead of creating a new one. """ return bool(get_existing_user(sociallogin)) def get_existing_user(sociallogin): """ Attempts to find an existing user that is associated with a verified email address that matches one of the verified email addresses within the "sociallogin" object. """ emails = Q() for email_address in sociallogin.email_addresses: if email_address.verified: emails |= Q(emailaddress__email=email_address.email) if emails: # Users can have multiple associated EmailAddress objects, so # let's use "distinct()" to remove any duplicate users. users = list(get_user_model().objects .filter(emails, emailaddress__verified=True) .distinct()) # For now, we're only going to return a user if there's only one. if len(users) == 1: return users[0] return None def add_user_email(request, user, addresses): """ This is based on allauth.account.utils.setup_user_email, but targets the addition of email-address objects to an existing user. """ for a in cleanup_email_addresses(request, addresses)[0]: if not EmailAddress.objects.filter(user=user, email=a.email).exists(): a.user = user a.save()
mpl-2.0
-4,814,583,395,379,713,000
43.862416
83
0.612686
false
4.676111
false
false
false
vponomaryov/rally
tests/unit/plugins/openstack/wrappers/test_keystone.py
1
8687
# Copyright 2014: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystoneclient import exceptions import mock from rally.plugins.openstack.wrappers import keystone from tests.unit import test class KeystoneWrapperTestBase(object): def test_list_services(self): service = mock.MagicMock() service.id = "fake_id" service.name = "Foobar" service.extra_field = "extra_field" self.client.services.list.return_value = [service] result = list(self.wrapped_client.list_services()) self.assertEqual([("fake_id", "Foobar")], result) self.assertEqual("fake_id", result[0].id) self.assertEqual("Foobar", result[0].name) self.assertFalse(hasattr(result[0], "extra_field")) def test_wrap(self): client = mock.MagicMock() client.version = "dummy" self.assertRaises(NotImplementedError, keystone.wrap, client) def test_delete_service(self): self.wrapped_client.delete_service("fake_id") self.client.services.delete.assert_called_once_with("fake_id") def test_list_roles(self): role = mock.MagicMock() role.id = "fake_id" role.name = "Foobar" role.extra_field = "extra_field" self.client.roles.list.return_value = [role] result = list(self.wrapped_client.list_roles()) self.assertEqual([("fake_id", "Foobar")], result) self.assertEqual("fake_id", result[0].id) self.assertEqual("Foobar", result[0].name) self.assertFalse(hasattr(result[0], "extra_field")) def test_delete_role(self): self.wrapped_client.delete_role("fake_id") self.client.roles.delete.assert_called_once_with("fake_id") class KeystoneV2WrapperTestCase(test.TestCase, KeystoneWrapperTestBase): def setUp(self): super(KeystoneV2WrapperTestCase, self).setUp() self.client = mock.MagicMock() self.client.version = "v2.0" self.wrapped_client = keystone.wrap(self.client) def test_create_project(self): self.wrapped_client.create_project("Foobar") self.client.tenants.create.assert_called_once_with("Foobar") def test_create_project_in_non_default_domain_fail(self): self.assertRaises( NotImplementedError, self.wrapped_client.create_project, "Foobar", "non-default-domain") def test_delete_project(self): self.wrapped_client.delete_project("fake_id") self.client.tenants.delete.assert_called_once_with("fake_id") def test_list_projects(self): tenant = mock.MagicMock() tenant.id = "fake_id" tenant.name = "Foobar" tenant.extra_field = "extra_field" self.client.tenants.list.return_value = [tenant] result = list(self.wrapped_client.list_projects()) self.assertEqual([("fake_id", "Foobar", "default")], result) self.assertEqual("fake_id", result[0].id) self.assertEqual("Foobar", result[0].name) self.assertEqual("default", result[0].domain_id) self.assertFalse(hasattr(result[0], "extra_field")) def test_create_user(self): self.wrapped_client.create_user("foo", "bar", email="foo@bar.com", project_id="tenant_id", domain_name="default") self.client.users.create.assert_called_once_with( "foo", "bar", "foo@bar.com", "tenant_id") def test_create_user_in_non_default_domain_fail(self): self.assertRaises( NotImplementedError, self.wrapped_client.create_user, "foo", "bar", email="foo@bar.com", project_id="tenant_id", domain_name="non-default-domain") def test_delete_user(self): self.wrapped_client.delete_user("fake_id") self.client.users.delete.assert_called_once_with("fake_id") def test_list_users(self): user = mock.MagicMock() user.id = "fake_id" user.name = "foo" user.tenantId = "tenant_id" user.extra_field = "extra_field" self.client.users.list.return_value = [user] result = list(self.wrapped_client.list_users()) self.assertEqual([("fake_id", "foo", "tenant_id", "default")], result) self.assertEqual("fake_id", result[0].id) self.assertEqual("foo", result[0].name) self.assertEqual("tenant_id", result[0].project_id) self.assertEqual("default", result[0].domain_id) self.assertFalse(hasattr(result[0], "extra_field")) class KeystoneV3WrapperTestCase(test.TestCase, KeystoneWrapperTestBase): def setUp(self): super(KeystoneV3WrapperTestCase, self).setUp() self.client = mock.MagicMock() self.client.version = "v3" self.wrapped_client = keystone.wrap(self.client) self.client.domains.get.side_effect = exceptions.NotFound self.client.domains.list.return_value = [ mock.MagicMock(id="domain_id")] def test_create_project(self): self.wrapped_client.create_project("Foobar", "domain") self.client.projects.create.assert_called_once_with( name="Foobar", domain="domain_id") def test_create_project_with_non_existing_domain_fail(self): self.client.domains.list.return_value = [] self.assertRaises(exceptions.NotFound, self.wrapped_client.create_project, "Foobar", "non-existing-domain") def test_delete_project(self): self.wrapped_client.delete_project("fake_id") self.client.projects.delete.assert_called_once_with("fake_id") def test_list_projects(self): project = mock.MagicMock() project.id = "fake_id" project.name = "Foobar" project.domain_id = "domain_id" project.extra_field = "extra_field" self.client.projects.list.return_value = [project] result = list(self.wrapped_client.list_projects()) self.assertEqual([("fake_id", "Foobar", "domain_id")], result) self.assertEqual("fake_id", result[0].id) self.assertEqual("Foobar", result[0].name) self.assertEqual("domain_id", result[0].domain_id) self.assertFalse(hasattr(result[0], "extra_field")) def test_create_user(self): fake_role = mock.MagicMock(id="fake_role_id") fake_role.name = "__member__" self.client.roles.list.return_value = [fake_role] self.client.users.create.return_value = mock.MagicMock( id="fake_user_id") self.wrapped_client.create_user( "foo", "bar", email="foo@bar.com", project_id="project_id", domain_name="domain") self.client.users.create.assert_called_once_with( name="foo", password="bar", email="foo@bar.com", default_project="project_id", domain="domain_id") def test_create_user_with_non_existing_domain_fail(self): self.client.domains.list.return_value = [] self.assertRaises(exceptions.NotFound, self.wrapped_client.create_user, "foo", "bar", email="foo@bar.com", project_id="project_id", domain_name="non-existing-domain") def test_delete_user(self): self.wrapped_client.delete_user("fake_id") self.client.users.delete.assert_called_once_with("fake_id") def test_list_users(self): user = mock.MagicMock() user.id = "fake_id" user.name = "foo" user.default_project_id = "project_id" user.domain_id = "domain_id" user.extra_field = "extra_field" self.client.users.list.return_value = [user] result = list(self.wrapped_client.list_users()) self.assertEqual([("fake_id", "foo", "project_id", "domain_id")], result) self.assertEqual("fake_id", result[0].id) self.assertEqual("foo", result[0].name) self.assertEqual("project_id", result[0].project_id) self.assertEqual("domain_id", result[0].domain_id) self.assertFalse(hasattr(result[0], "extra_field"))
apache-2.0
-867,412,063,852,557,000
40.764423
78
0.626453
false
3.749245
true
false
false
xbcsmith/frell
test/json_obj.py
1
1900
import logging log = logging.getLogger() logging.basicConfig(level=logging.DEBUG) class StorageObject(object): def __init__(self, storage): self._dict = storage def get(self, fieldName): return self._dict.get(fieldName) def set(self, fieldName, value): return self._dict.__setitem__(fieldName, value) class Field(object): def __init__(self, fieldName, fieldType): self.fieldName = fieldName self.fieldType = fieldType def __get__(self, instance, owner): log.debug("Calling __get__ for %s", self.fieldName) val = instance._dict.get(self.fieldName) if issubclass(self.fieldType, (int, str)): return val if val is None: val = instance._dict[self.fieldName] = {} return self.fieldType(val) def __set__(self, instance, value): log.debug("Calling __set__ for %s", self.fieldName) if isinstance(instance, StorageObject): return instance.set(self.fieldName, value) instance._dict[self.fieldName] = value class Location(StorageObject): city = Field('city', str) zip = Field('zip', str) class User(StorageObject): username = Field('username', str) uid = Field('uid', int) location = Field('location', Location) class JsonObject(StorageObject): tag = Field('tag', str) created_by = Field('created_by', User) modified_by = Field('modified_by', User) j = JsonObject({'a' : 1, 'created_by' : {'username' : 'miiban', 'uid' : 500}}) print "Created by:", j.created_by print "Modified by:", j.modified_by print "Modified by username:", j.modified_by.username j.modified_by.username = 'bcsmit' j.modified_by.uid= 501 print "Modified by username:", j.modified_by.username print "Modified by zip:", j.modified_by.location.zip j.modified_by.location.zip = 27511 print "Modified by zip:", j.modified_by.location.zip
apache-2.0
8,738,986,348,364,620,000
31.20339
78
0.647895
false
3.551402
false
false
false
Arkapravo/morse-0.6
src/morse/sensors/stereo_unit.py
1
2329
import logging; logger = logging.getLogger("morse." + __name__) from morse.core.services import async_service import morse.core.sensor import bge from functools import partial class StereoUnitClass(morse.core.sensor.MorseSensorClass): """ Base for stereo pairs It is used to link two camera objects, and export the images as a stereo pair. """ def __init__(self, obj, parent=None): """ Constructor method. Receives the reference to the Blender object. The second parameter should be the name of the object's parent. """ logger.info('%s initialization' % obj.name) # Call the constructor of the parent class super(self.__class__,self).__init__(obj, parent) self.num_cameras = 0 self.camera_list = [] # Create a list of the cameras attached to this component for child in obj.children: # Skip this object if it is not a component # It is most likely just a geometric shape object try: child['Component_Tag'] except KeyError as detail: continue camera_name = child.name # Store only the name of the camera # All data from the camera can be accessed later # by using bge.logic.componentDict[camera_name], # which will return the instance of the camera object self.camera_list.append(camera_name) self.num_cameras += 1 logger.info("Stereo Unit has %d cameras" % self.num_cameras) logger.info('Component initialized') def capture_completion(self, answer): self._expected_answer-= 1 if self._expected_answer == 0: status, res = answer self.completed(status, res) def interrupt(self): for camera in self.camera_list: camera_instance = bge.logic.componentDict[camera] camera_instance.interrupt() @async_service def capture(self, n): self._expected_answer = self.num_cameras for camera in self.camera_list: camera_instance = bge.logic.componentDict[camera] camera_instance.capture(partial(self.capture_completion), n) def default_action(self): """ Main function of this component. """ pass
bsd-3-clause
-2,536,339,978,277,546,000
34.287879
72
0.617003
false
4.345149
false
false
false
SumiTomohiko/Yog
tests/test_dict.py
1
2284
# -*- coding: utf-8 -*- from testcase import TestCase class TestDict(TestCase): def test_literal0(self): self._test(""" d = {} puts(d.size) """, """0 """) def test_literal5(self): self._test(""" d = { 42: 26 } puts(d[42]) """, """26 """) def test_literal10(self): self._test(""" d = { 42: 26, } puts(d[42]) """, """26 """) def test_literal20(self): self._test(""" d = { 42: 26, "foo": "bar" } puts(d[42]) """, """26 """) def test_literal30(self): self._test(""" d = { 'foo: "bar" } puts(d['foo]) """, """bar """) def test_dict0(self): self._test(""" def foo(d) puts(d[42]) end d = Dict.new() d[42] = 26 foo(d) """, """26 """) def test_dict10(self): self._test(""" def foo(d) puts(d[4611686018427387904]) end d = Dict.new() d[4611686018427387904] = 42 foo(d) """, """42 """) def test_dict20(self): self._test(""" def foo(d) puts(d["foo"]) end d = Dict.new() d["foo"] = 42 foo(d) """, """42 """) def test_KeyError0(self): def test_stderr(stderr): self._test_regexp(r"""Traceback \(most recent call last\): File "[^"]+", line 3, in <package> KeyError: .* """, stderr) self._test(""" d = Dict.new() puts(d["foo"]) """, stderr=test_stderr) def test_add0(self): self._test(""" print(({} + {}).size) """, "0") def test_add10(self): self._test(""" print(({ 'foo: 42 } + { 'bar: 26 }).size) """, "2") def test_add20(self): self._test(""" print(({ 'foo: 42 } + { 'bar: 26 })['foo]) """, "42") def test_add30(self): self._test(""" print(({ 'foo: 42 } + { 'bar: 26 })['bar]) """, "26") def test_each0(self): self._test(""" d = { 'foo: 'bar } d.each() do |key, value| print(key.inspect()) end """, "'foo") def test_each10(self): self._test(""" d = { 'foo: 'bar } d.each() do |key, value| print(value.inspect()) end """, "'bar") def test_get0(self): self._test(""" print({ 42: 26 }.get(42)) """, "26") def test_get10(self): self._test(""" print({ 42: 26 }.get(\"foo\")) """, "nil") def test_get20(self): self._test(""" print({ 42: 26 }.get(\"foo\", \"bar\")) """, "bar") # vim: tabstop=4 shiftwidth=4 expandtab softtabstop=4
mit
-2,695,812,920,068,500,500
15.198582
70
0.487741
false
2.775213
true
false
false
Mangara/ArboralExplorer
lib/Cmpl/cmplServer/cmplServer/CmplServerTools.py
1
3351
#*********************************************************************** # This code is part of CmplServer # # Copyright (C) 2013, 2014 # Mike Steglich - Technical University of Applied Sciences # Wildau, Germany # # CmplServer is a project of the Technical University of # Applied Sciences Wildau and the Institute for Operations Research # and Business Management at the Martin Luther University # Halle-Wittenberg. # Please visit the project homepage <www.coliop.org> # # CmplServer is free software; you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # CmplServer is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public # License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. # #********************************************************************** #!/usr/bin/python from SocketServer import ThreadingMixIn from SimpleXMLRPCServer import SimpleXMLRPCServer from time import gmtime, strftime from pyCmpl.CmplDefs import * import os import time import string import platform #*************** CmplXmlRpcServer ******************************** class CmplXmlRpcServer(ThreadingMixIn, SimpleXMLRPCServer): #*********** process_request ********** def process_request(self, request, client_address): self.client_address = client_address return SimpleXMLRPCServer.process_request(self, request, client_address) #*********** end process_request ****** #*************** end CmplXmlRpcServer **************************** #*************** CmplServerTools *************************************** class CmplServerTools(object): #*********** cmplLogging *********** @staticmethod def cmplLogging(logFile, msg , id=None , name=None ): try: if id==None and name==None: logFile.write("[" + strftime("%Y-%m-%d %H:%M:%S", gmtime())+ "] - " + msg+"\n") elif name==None: #logFile.write( string.split(id,"-")[0] + " - [" + strftime("%Y-%m-%d %H:%M:%S", gmtime())+ "] - " + id + " <"+msg+">\n") logFile.write( "[" + strftime("%Y-%m-%d %H:%M:%S", gmtime())+ "] - " + id + " - <"+msg+">\n") else: #logFile.write( string.split(id,"-")[0] + " - [" + strftime("%Y-%m-%d %H:%M:%S", gmtime())+ "] - " + id + " - " + name + " <"+msg+">\n") logFile.write( "[" + strftime("%Y-%m-%d %H:%M:%S", gmtime())+ "] - " + id + " - " + name + " <"+msg+">\n") logFile.flush() except IOError, e: pass #raise Exception("IO error for solution or log file ") #*********** end cmplLogging ******* #*********** readFileContent ********** @staticmethod def readFileContent(fileName): try: f = open(fileName, "r") lines = f.read() f.close() return lines except IOError, e: raise Exception("IO error for file "+fileName) #*********** end readFileContent ****** #*************** end CmplServerTools ***********************************
apache-2.0
539,508,423,461,816,500
31.230769
140
0.563414
false
3.527368
false
false
false
Lana-B/Pheno4T
madanalysis/install/install_matplotlib.py
1
5864
################################################################################ # # Copyright (C) 2012-2013 Eric Conte, Benjamin Fuks # The MadAnalysis development team, email: <ma5team@iphc.cnrs.fr> # # This file is part of MadAnalysis 5. # Official website: <https://launchpad.net/madanalysis5> # # MadAnalysis 5 is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # MadAnalysis 5 is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with MadAnalysis 5. If not, see <http://www.gnu.org/licenses/> # ################################################################################ from madanalysis.install.install_service import InstallService from shell_command import ShellCommand import os import sys import logging class InstallMatplotlib: def __init__(self,main): self.main = main self.installdir = os.path.normpath(self.main.archi_info.ma5dir+'/tools/matplotlib/') self.toolsdir = os.path.normpath(self.main.archi_info.ma5dir+'/tools') self.tmpdir = self.main.session_info.tmpdir self.downloaddir = self.main.session_info.downloaddir self.untardir = os.path.normpath(self.tmpdir + '/MA5_matplotlib/') self.ncores = 1 self.files = {"matplotlib.tar.gz" : "http://sourceforge.net/projects/matplotlib/files/matplotlib/matplotlib-1.3.1/matplotlib-1.3.1.tar.gz"} def Detect(self): if not os.path.isdir(self.toolsdir): logging.debug("The folder '"+self.toolsdir+"' is not found") return False if not os.path.isdir(self.installdir): logging.debug("The folder "+self.installdir+"' is not found") return False return True def Remove(self,question=True): from madanalysis.IOinterface.folder_writer import FolderWriter return FolderWriter.RemoveDirectory(self.installdir,question) def CreatePackageFolder(self): if not InstallService.create_tools_folder(self.toolsdir): return False if not InstallService.create_package_folder(self.toolsdir,'matplotlib'): return False return True def CreateTmpFolder(self): ok = InstallService.prepare_tmp(self.untardir, self.downloaddir) if ok: self.tmpdir=self.untardir return ok def Download(self): # Checking connection with MA5 web site if not InstallService.check_ma5site(): return False # Launching wget logname = os.path.normpath(self.installdir+'/wget.log') if not InstallService.wget(self.files,logname,self.downloaddir): return False # Ok return True def Unpack(self): # Logname logname = os.path.normpath(self.installdir+'/unpack.log') # Unpacking the tarball ok, packagedir = InstallService.untar(logname, self.tmpdir,'matplotlib.tar.gz') if not ok: return False # Ok: returning the good folder self.tmpdir=packagedir return True def Build(self): # Input theCommands=['python','setup.py','build'] logname=os.path.normpath(self.installdir+'/compilation.log') # Execute logging.debug('shell command: '+' '.join(theCommands)) ok, out= ShellCommand.ExecuteWithLog(theCommands,\ logname,\ self.tmpdir,\ silent=False) # return result if not ok: logging.error('impossible to build the project. For more details, see the log file:') logging.error(logname) return ok def Install(self): # Input theCommands=['python','setup.py','install','--home='+self.installdir] logname=os.path.normpath(self.installdir+'/compilation.log') # Execute logging.debug('shell command: '+' '.join(theCommands)) ok, out= ShellCommand.ExecuteWithLog(theCommands,\ logname,\ self.tmpdir,\ silent=False) # return result if not ok: logging.error('impossible to build the project. For more details, see the log file:') logging.error(logname) return ok def Check(self): # Check matplotlib downloaded version is in use try: import matplotlib if str(matplotlib.__version__) != "1.3.1": logging.error("Not using the right version of Matplotlib.") self.display_log() return False except: logging.error("Cannot use Matplotlib. Please install it.") self.display_log() return False return True def display_log(self): logging.error("More details can be found into the log files:") logging.error(" - "+os.path.normpath(self.installdir+"/wget.log")) logging.error(" - "+os.path.normpath(self.installdir+"/unpack.log")) logging.error(" - "+os.path.normpath(self.installdir+"/configuration.log")) logging.error(" - "+os.path.normpath(self.installdir+"/compilation.log")) logging.error(" - "+os.path.normpath(self.installdir+"/installation.log")) def NeedToRestart(self): return True
gpl-3.0
4,663,602,692,768,040,000
36.589744
147
0.597715
false
4.280292
false
false
false
dropbox/emmer
emmer/response_router.py
1
5866
import re class ResponseRouter(object): """Handles the passing of control from a conversation to a client app's routes. For read requests and write requests, ResponseRouter maintains two lists of rules, where each rule is a tuple is of the form(filename pattern, action). When a request comes in, the filename given is checked against the list of filename regex patterns, and the first rule that matches invokes the corresponding action. actions are application level functions that take the following argument: client_host: The ip or hostname of the client. client_port: The port of the client filename: The filename included in the client request. Additionally, a write request takes an additional argument: data: The data sent from the client in the tftp conversation. In the case of read requests, actions should return string data that will be served directly back to clients. """ def __init__(self): self.read_rules = [] self.write_rules = [] def append_read_rule(self, filename_pattern, action): """Adds a rule associating a filename pattern with an action for read requests. The action given will execute when a read request is received but before any responses are given. Args: filename_pattern: A string pattern to match future read request filenames against. action: A function to invoke when a later read request arrives matching the given filename_pattern. """ self.read_rules.append((filename_pattern, action)) def append_write_rule(self, filename_pattern, action): """Adds a rule associating a filename pattern with an action for write requests. The action given will execute when a write request is completed and all data received. Args: filename_pattern: A string pattern to match future read request filenames against. action: A function to invoke when a later read request arrives matching the given filename_pattern. """ self.write_rules.append((filename_pattern, action)) def initialize_read(self, filename, client_host, client_port): """For a read request, finds the appropriate action and invokes it. Args: filename: The filename included in the client's request. client_host: The host of the client connecting. client_port: The port of the client connecting. Returns: A ReadBuffer containing the file contents to return. If there is no corresponding action, returns None. """ action = self.find_action(self.read_rules, filename) if action: return ReadBuffer(action(client_host, client_port, filename)) else: return None def initialize_write(self, filename, client_host, client_port): """For a write request, finds the appropriate action and returns it. This is different than a read request in that the action is invoked at the end of the file transfer. Args: filename: The filename included in the client's request. client_host: The host of the client connecting. client_port: The port of the client connecting. Returns: An action that is to be run at the end of a write request file transfer. If there is no corresponding action, returns None. """ return self.find_action(self.write_rules, filename) def find_action(self, rules, filename): """Given a list of rules and a filename to match against them, returns an action stored in one of those rules. The action returned corresponds to the first rule that matches the filename given. Args: rules: A list of tuples, where each tuple is (filename pattern, action). filename: A filename to match against the filename regex patterns. Returns: An action corresponding to the first rule that matches the filename given. If no rules match, returns None. """ for (filename_pattern, action) in rules: if re.match(filename_pattern, filename): return action return None class ReadBuffer(object): """A ReadBuffer is used to temporarily store read request data while the transfer has not completely succeeded. It offers an interface for retrieving chunks of data in 512 byte chunks based on block number. """ def __init__(self, data): self.data = data def get_block_count(self): """Returns the amount of blocks that this ReadBuffer can produce This amount is also the largest value that can be passed into get_block. """ return (len(self.data) / 512) + 1 def get_block(self, block_num): """Returns the data corresponding to the given block number Args: block_num: The block number of data to request. By the TFTP protocol, blocks are consecutive 512 byte sized chunks of data with the exception of the final block which may be less than 512 chunks. Return: A 512 byte or less chunk of data corresponding to the given block number. """ return self.data[(block_num - 1) * 512:block_num * 512] class WriteBuffer(object): """A WriteBuffer is used to temporarily store write request data while the transfer has not completely succeeded. Retrieve the data from the `data` property. """ def __init__(self): self.data = "" def receive_data(self, data): """Write some more data to the WriteBuffer """ self.data += data
mit
-3,915,947,709,512,583,700
38.106667
79
0.648483
false
4.900585
false
false
false
henrysher/duplicity
duplicity/backends/dpbxbackend.py
1
19561
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*- # # Copyright 2013 jno <jno@pisem.net> # Copyright 2016 Dmitry Nezhevenko <dion@dion.org.ua> # # Version: 0.3 # # 0. You can make me happy with https://www.dropbox.com/referrals/NTE2ODA0Mzg5 # 1. Most of the code was taken from cli_client.py. The ftpsbackend.py was used as a template # 2. DPBX & dpbx are used because the use of the actual name is prohibited # # This file is part of duplicity. # # Duplicity is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 2 of the License, or (at your # option) any later version. # # Duplicity is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with duplicity; if not, write to the Free Software Foundation, # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA import StringIO import os import re import sys import time import traceback import urllib from duplicity import log, globals from duplicity import progress from duplicity.errors import BackendException from duplicity.globals import num_retries from requests.exceptions import ConnectionError import duplicity.backend # This is chunk size for upload using Dpbx chumked API v2. It doesn't # make sense to make it much large since Dpbx SDK uses connection pool # internally. So multiple chunks will sent using same keep-alive socket # Plus in case of network problems we most likely will be able to retry # only failed chunk DPBX_UPLOAD_CHUNK_SIZE = 16 * 1024 * 1024 # Download internal buffer size. Files are downloaded using one request. DPBX_DOWNLOAD_BUF_SIZE = 512 * 1024 DPBX_AUTORENAMED_FILE_RE = re.compile(r' \([0-9]+\)\.[^\.]+$') def log_exception(e): log.Error('Exception [%s]:' % (e,)) f = StringIO.StringIO() traceback.print_exc(file=f) f.seek(0) for s in f.readlines(): log.Error('| ' + s.rstrip()) f.close() def command(login_required=True): """a decorator for handling authentication and exceptions""" def decorate(f): def wrapper(self, *args): try: return f(self, *args) except ApiError as e: log_exception(e) raise BackendException('dpbx api error "%s"' % (e,)) except Exception as e: log_exception(e) log.Error('dpbx code error "%s"' % (e,), log.ErrorCode.backend_code_error) raise wrapper.__doc__ = f.__doc__ return wrapper return decorate class DPBXBackend(duplicity.backend.Backend): """Connect to remote store using Dr*pB*x service""" def __init__(self, parsed_url): duplicity.backend.Backend.__init__(self, parsed_url) try: from dropbox import Dropbox from dropbox.exceptions import AuthError, BadInputError, ApiError from dropbox.files import (UploadSessionCursor, CommitInfo, WriteMode, GetMetadataError, DeleteError, UploadSessionLookupError, ListFolderError) from dropbox.oauth import DropboxOAuth2FlowNoRedirect except ImportError as e: raise BackendException("""\ This backend requires the dropbox package version 6.9.0 To install use "sudo pip install dropbox==6.9.0" Exception: %s""" % str(e)) self.api_account = None self.api_client = None self.auth_flow = None self.login() def user_authenticated(self): try: account = self.api_client.users_get_current_account() log.Debug("User authenticated as ,%s" % account) return True except: log.Debug('User not authenticated') return False def load_access_token(self): return os.environ.get('DPBX_ACCESS_TOKEN', None) def save_access_token(self, access_token): raise BackendException('dpbx: Please set DPBX_ACCESS_TOKEN=\"%s\" environment variable' % access_token) def obtain_access_token(self): log.Info("dpbx: trying to obtain access token") for env_var in ['DPBX_APP_KEY', 'DPBX_APP_SECRET']: if env_var not in os.environ: raise BackendException('dpbx: %s environment variable not set' % env_var) app_key = os.environ['DPBX_APP_KEY'] app_secret = os.environ['DPBX_APP_SECRET'] if not sys.stdout.isatty() or not sys.stdin.isatty(): log.FatalError('dpbx error: cannot interact, but need human attention', log.ErrorCode.backend_command_error) auth_flow = DropboxOAuth2FlowNoRedirect(app_key, app_secret) log.Debug('dpbx,auth_flow.start()') authorize_url = auth_flow.start() print print '-' * 72 print "1. Go to: " + authorize_url print "2. Click \"Allow\" (you might have to log in first)." print "3. Copy the authorization code." print '-' * 72 auth_code = raw_input("Enter the authorization code here: ").strip() try: log.Debug('dpbx,auth_flow.finish(%s)' % auth_code) authresult = auth_flow.finish(auth_code) except Exception as e: raise BackendException('dpbx: Unable to obtain access token: %s' % e) log.Info("dpbx: Authentication successfull") self.save_access_token(authresult.access_token) def login(self): if self.load_access_token() is None: self.obtain_access_token() self.api_client = Dropbox(self.load_access_token()) self.api_account = None try: log.Debug('dpbx,users_get_current_account([token])') self.api_account = self.api_client.users_get_current_account() log.Debug("dpbx,%s" % self.api_account) except (BadInputError, AuthError) as e: log.Debug('dpbx,exception: %s' % e) log.Info("dpbx: Authentication failed. Trying to obtain new access token") self.obtain_access_token() # We're assuming obtain_access_token will throw exception. # So this line should not be reached raise BackendException("dpbx: Please update DPBX_ACCESS_TOKEN and try again") log.Info("dpbx: Successfully authenticated as %s" % self.api_account.name.display_name) def _error_code(self, operation, e): if isinstance(e, ApiError): err = e.error if isinstance(err, GetMetadataError) and err.is_path(): if err.get_path().is_not_found(): return log.ErrorCode.backend_not_found elif isinstance(err, DeleteError) and err.is_path_lookup(): lookup = e.error.get_path_lookup() if lookup.is_not_found(): return log.ErrorCode.backend_not_found @command() def _put(self, source_path, remote_filename): remote_dir = urllib.unquote(self.parsed_url.path.lstrip('/')) remote_path = '/' + os.path.join(remote_dir, remote_filename).rstrip() file_size = os.path.getsize(source_path.name) progress.report_transfer(0, file_size) if file_size < DPBX_UPLOAD_CHUNK_SIZE: # Upload whole file at once to avoid extra server request res_metadata = self.put_file_small(source_path, remote_path) else: res_metadata = self.put_file_chunked(source_path, remote_path) # A few sanity checks if res_metadata.path_display != remote_path: raise BackendException('dpbx: result path mismatch: %s (expected: %s)' % (res_metadata.path_display, remote_path)) if res_metadata.size != file_size: raise BackendException('dpbx: result size mismatch: %s (expected: %s)' % (res_metadata.size, file_size)) def put_file_small(self, source_path, remote_path): if not self.user_authenticated(): self.login() file_size = os.path.getsize(source_path.name) f = source_path.open('rb') try: log.Debug('dpbx,files_upload(%s, [%d bytes])' % (remote_path, file_size)) res_metadata = self.api_client.files_upload(f.read(), remote_path, mode=WriteMode.overwrite, autorename=False, client_modified=None, mute=True) log.Debug('dpbx,files_upload(): %s' % res_metadata) progress.report_transfer(file_size, file_size) return res_metadata finally: f.close() def put_file_chunked(self, source_path, remote_path): if not self.user_authenticated(): self.login() file_size = os.path.getsize(source_path.name) f = source_path.open('rb') try: buf = f.read(DPBX_UPLOAD_CHUNK_SIZE) log.Debug('dpbx,files_upload_session_start([%d bytes]), total: %d' % (len(buf), file_size)) upload_sid = self.api_client.files_upload_session_start(buf) log.Debug('dpbx,files_upload_session_start(): %s' % upload_sid) upload_cursor = UploadSessionCursor(upload_sid.session_id, f.tell()) commit_info = CommitInfo(remote_path, mode=WriteMode.overwrite, autorename=False, client_modified=None, mute=True) res_metadata = None progress.report_transfer(f.tell(), file_size) requested_offset = None current_chunk_size = DPBX_UPLOAD_CHUNK_SIZE retry_number = globals.num_retries is_eof = False # We're doing our own error handling and retrying logic because # we can benefit from Dpbx chunked upload and retry only failed # chunk while not is_eof or not res_metadata: try: if requested_offset is not None: upload_cursor.offset = requested_offset if f.tell() != upload_cursor.offset: f.seek(upload_cursor.offset) buf = f.read(current_chunk_size) is_eof = f.tell() >= file_size if not is_eof and len(buf) == 0: continue # reset temporary status variables requested_offset = None current_chunk_size = DPBX_UPLOAD_CHUNK_SIZE retry_number = globals.num_retries if not is_eof: assert len(buf) != 0 log.Debug('dpbx,files_upload_sesssion_append([%d bytes], offset=%d)' % (len(buf), upload_cursor.offset)) self.api_client.files_upload_session_append(buf, upload_cursor.session_id, upload_cursor.offset) else: log.Debug('dpbx,files_upload_sesssion_finish([%d bytes], offset=%d)' % (len(buf), upload_cursor.offset)) res_metadata = self.api_client.files_upload_session_finish(buf, upload_cursor, commit_info) upload_cursor.offset = f.tell() log.Debug('progress: %d of %d' % (upload_cursor.offset, file_size)) progress.report_transfer(upload_cursor.offset, file_size) except ApiError as e: error = e.error if isinstance(error, UploadSessionLookupError) and error.is_incorrect_offset(): # Server reports that we should send another chunk. # Most likely this is caused by network error during # previous upload attempt. In such case we'll get # expected offset from server and it's enough to just # seek() and retry again new_offset = error.get_incorrect_offset().correct_offset log.Debug('dpbx,files_upload_session_append: incorrect offset: %d (expected: %s)' % (upload_cursor.offset, new_offset)) if requested_offset is not None: # chunk failed even after seek attempt. Something # strange and no safe way to recover raise BackendException("dpbx: unable to chunk upload") else: # will seek and retry requested_offset = new_offset continue raise except ConnectionError as e: log.Debug('dpbx,files_upload_session_append: %s' % e) retry_number -= 1 if not self.user_authenticated(): self.login() if retry_number == 0: raise # We don't know for sure, was partial upload successful or # not. So it's better to retry smaller amount to avoid extra # reupload log.Info('dpbx: sleeping a bit before chunk retry') time.sleep(30) current_chunk_size = DPBX_UPLOAD_CHUNK_SIZE / 5 requested_offset = None continue if f.tell() != file_size: raise BackendException('dpbx: something wrong') log.Debug('dpbx,files_upload_sesssion_finish(): %s' % res_metadata) progress.report_transfer(f.tell(), file_size) return res_metadata finally: f.close() @command() def _get(self, remote_filename, local_path): if not self.user_authenticated(): self.login() remote_dir = urllib.unquote(self.parsed_url.path.lstrip('/')) remote_path = '/' + os.path.join(remote_dir, remote_filename).rstrip() log.Debug('dpbx,files_download(%s)' % remote_path) res_metadata, http_fd = self.api_client.files_download(remote_path) log.Debug('dpbx,files_download(%s): %s, %s' % (remote_path, res_metadata, http_fd)) file_size = res_metadata.size to_fd = None progress.report_transfer(0, file_size) try: to_fd = local_path.open('wb') for c in http_fd.iter_content(DPBX_DOWNLOAD_BUF_SIZE): to_fd.write(c) progress.report_transfer(to_fd.tell(), file_size) finally: if to_fd: to_fd.close() http_fd.close() # It's different from _query() check because we're not querying metadata # again. Since this check is free, it's better to have it here local_size = os.path.getsize(local_path.name) if local_size != file_size: raise BackendException("dpbx: wrong file size: %d (expected: %d)" % (local_size, file_size)) local_path.setdata() @command() def _list(self): # Do a long listing to avoid connection reset if not self.user_authenticated(): self.login() remote_dir = '/' + urllib.unquote(self.parsed_url.path.lstrip('/')).rstrip() log.Debug('dpbx.files_list_folder(%s)' % remote_dir) res = [] try: resp = self.api_client.files_list_folder(remote_dir) log.Debug('dpbx.list(%s): %s' % (remote_dir, resp)) while True: res.extend([entry.name for entry in resp.entries]) if not resp.has_more: break resp = self.api_client.files_list_folder_continue(resp.cursor) except ApiError as e: if (isinstance(e.error, ListFolderError) and e.error.is_path() and e.error.get_path().is_not_found()): log.Debug('dpbx.list(%s): ignore missing folder (%s)' % (remote_dir, e)) else: raise # Warn users of old version dpbx about automatically renamed files self.check_renamed_files(res) return res @command() def _delete(self, filename): if not self.user_authenticated(): self.login() remote_dir = urllib.unquote(self.parsed_url.path.lstrip('/')) remote_path = '/' + os.path.join(remote_dir, filename).rstrip() log.Debug('dpbx.files_delete(%s)' % remote_path) self.api_client.files_delete(remote_path) # files_permanently_delete seems to be better for backup purpose # but it's only available for Business accounts # self.api_client.files_permanently_delete(remote_path) @command() def _close(self): """close backend session? no! just "flush" the data""" log.Debug('dpbx.close():') @command() def _query(self, filename): if not self.user_authenticated(): self.login() remote_dir = urllib.unquote(self.parsed_url.path.lstrip('/')) remote_path = '/' + os.path.join(remote_dir, filename).rstrip() log.Debug('dpbx.files_get_metadata(%s)' % remote_path) info = self.api_client.files_get_metadata(remote_path) log.Debug('dpbx.files_get_metadata(%s): %s' % (remote_path, info)) return {'size': info.size} def check_renamed_files(self, file_list): if not self.user_authenticated(): self.login() bad_list = [x for x in file_list if DPBX_AUTORENAMED_FILE_RE.search(x) is not None] if len(bad_list) == 0: return log.Warn('-' * 72) log.Warn('Warning! It looks like there are automatically renamed files on backend') log.Warn('They were probably created when using older version of duplicity.') log.Warn('') log.Warn('Please check your backup consistency. Most likely you will need to choose') log.Warn('largest file from duplicity-* (number).gpg and remove brackets from its name.') log.Warn('') log.Warn('These files are not managed by duplicity at all and will not be') log.Warn('removed/rotated automatically.') log.Warn('') log.Warn('Affected files:') for x in bad_list: log.Warn('\t%s' % x) log.Warn('') log.Warn('In any case it\'s better to create full backup.') log.Warn('-' * 72) duplicity.backend.register_backend("dpbx", DPBXBackend)
gpl-2.0
-7,412,439,218,951,572,000
40.442797
107
0.557998
false
4.222102
false
false
false
possess1on/botTelegram-zabbix
botTelegram-zabbix.py
1
19925
#!/usr/bin/env python # -*- coding: utf-8 -*- # ######################################################################### # BotTelegram Zabbix # Filename: botTelegram-zabbix.py ########################################################################## from telegram.ext import Updater, CommandHandler import logging import sys import subprocess import urllib import requests ########################################## # Python install module # git clone https://github.com/possess1on/botTelegram-zabbix.git # cd botTelegram-zabbix # pip install python-telegram-bot # sudo apt-get install python-pip # pip install -r requirements.txt # Test # python botTelegram-zabbix.py # # BG # python botTelegram-zabbix.py& # ########################################## ########################################## # Install python & pip # pip install pip python-telegram-bot --upgrade # apt-get install python-urllib3 ########################################## varZabbixmapa1 = "url" varZabbixmapa2 = "url" varZabbixmapa3 = "url" varZabbixmapa4 = "url" varZabbixmapa5 = "url" varZabbixmapa6 = "url" varZabbixmapa7 = "url" varZabbixmapa8 = "url" varZabbixmapa9 = "url" varZabbixmapa10 = "url" varZabbixmapa11 = "url" varZabbixmapa12 = "url" varZabbixmapa13 = "url" varZabbixmapa14 = "url" varZabbixmapa15 = "url" users_liberados = [id] varBotToken = 'token' varUsername = "log" varPassword = "pass" varZabbixServer = "url" varZabbixLanguage = "US" # Enable logging logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO, filename='botTelegram_zabbix.log') logging.info('Started') logger = logging.getLogger(__name__) job_queue = None # Zabbix cookie varcookie = None def start(bot, update): chat_id = update.message.chat_id if not chat_id in users_liberados: logging.info("Не найден - ID {}".format(chat_id)) return bot.sendMessage(update.message.chat_id, text='Добро пожаловать!!') def mapa1(bot, update): chat_id = update.message.chat_id if not chat_id in users_liberados: logging.info("Не найден - ID {}".format(chat_id)) return try: # urllib.urlretrieve(varZabbixmapa5, "botTelegram_mapa5.jpg") login() zbx_img_url = varZabbixmapa1 file_img = "botTelegram_mapa1.jpg" res = requests.get(zbx_img_url, cookies=varcookie) res_code = res.status_code if res_code == 404: logger.warn("Код 404 проверьте адрес: {}".format(zbx_img_url)) return False res_img = res.content with open(file_img, 'wb') as fp: fp.write(res_img) fp.close bot.sendPhoto(chat_id=update.message.chat_id, photo=open(file_img, 'rb')) except IndexError: return except ValueError: return def mapa2(bot, update): chat_id = update.message.chat_id if not chat_id in users_liberados: logging.info("Не найден - ID {}".format(chat_id)) return try: # urllib.urlretrieve(varZabbixmapa5, "botTelegram_mapa5.jpg") login() zbx_img_url = varZabbixmapa2 file_img = "botTelegram_mapa2.jpg" res = requests.get(zbx_img_url, cookies=varcookie) res_code = res.status_code if res_code == 404: logger.warn("Код 404 проверьте адрес: {}".format(zbx_img_url)) return False res_img = res.content with open(file_img, 'wb') as fp: fp.write(res_img) fp.close bot.sendPhoto(chat_id=update.message.chat_id, photo=open(file_img, 'rb')) except IndexError: return except ValueError: return def mapa3(bot, update): chat_id = update.message.chat_id if not chat_id in users_liberados: logging.info("Не найден - ID {}".format(chat_id)) return try: # urllib.urlretrieve(varZabbixmapa5, "botTelegram_mapa5.jpg") login() zbx_img_url = varZabbixmapa3 file_img = "botTelegram_mapa3.jpg" res = requests.get(zbx_img_url, cookies=varcookie) res_code = res.status_code if res_code == 404: logger.warn("Код 404 проверьте адрес: {}".format(zbx_img_url)) return False res_img = res.content with open(file_img, 'wb') as fp: fp.write(res_img) fp.close bot.sendPhoto(chat_id=update.message.chat_id, photo=open(file_img, 'rb')) except IndexError: return except ValueError: return def mapa4(bot, update): chat_id = update.message.chat_id if not chat_id in users_liberados: logging.info("Не найден - ID {}".format(chat_id)) return try: # urllib.urlretrieve(varZabbixmapa5, "botTelegram_mapa5.jpg") login() zbx_img_url = varZabbixmapa4 file_img = "botTelegram_mapa4.jpg" res = requests.get(zbx_img_url, cookies=varcookie) res_code = res.status_code if res_code == 404: logger.warn("Код 404 проверьте адрес: {}".format(zbx_img_url)) return False res_img = res.content with open(file_img, 'wb') as fp: fp.write(res_img) fp.close bot.sendPhoto(chat_id=update.message.chat_id, photo=open(file_img, 'rb')) except IndexError: return except ValueError: return def mapa5(bot, update): chat_id = update.message.chat_id if not chat_id in users_liberados: logging.info("Не найден - ID {}".format(chat_id)) return try: # urllib.urlretrieve(varZabbixmapa5, "botTelegram_mapa5.jpg") login() zbx_img_url = varZabbixmapa5 file_img = "botTelegram_mapa5.jpg" res = requests.get(zbx_img_url, cookies=varcookie) res_code = res.status_code if res_code == 404: logger.warn("Код 404 проверьте адрес: {}".format(zbx_img_url)) return False res_img = res.content with open(file_img, 'wb') as fp: fp.write(res_img) fp.close bot.sendPhoto(chat_id=update.message.chat_id, photo=open(file_img, 'rb')) except IndexError: return except ValueError: return def mapa6(bot, update): chat_id = update.message.chat_id if not chat_id in users_liberados: logging.info("Не найден - ID {}".format(chat_id)) return try: # urllib.urlretrieve(varZabbixmapa5, "botTelegram_mapa5.jpg") login() zbx_img_url = varZabbixmapa6 file_img = "botTelegram_mapa6.jpg" res = requests.get(zbx_img_url, cookies=varcookie) res_code = res.status_code if res_code == 404: logger.warn("Код 404 проверьте адрес: {}".format(zbx_img_url)) return False res_img = res.content with open(file_img, 'wb') as fp: fp.write(res_img) fp.close bot.sendPhoto(chat_id=update.message.chat_id, photo=open(file_img, 'rb')) except IndexError: return except ValueError: return def mapa7(bot, update): chat_id = update.message.chat_id if not chat_id in users_liberados: logging.info("Не найден - ID {}".format(chat_id)) return try: # urllib.urlretrieve(varZabbixmapa5, "botTelegram_mapa5.jpg") login() zbx_img_url = varZabbixmapa7 file_img = "botTelegram_mapa7.jpg" res = requests.get(zbx_img_url, cookies=varcookie) res_code = res.status_code if res_code == 404: logger.warn("Код 404 проверьте адрес: {}".format(zbx_img_url)) return False res_img = res.content with open(file_img, 'wb') as fp: fp.write(res_img) fp.close bot.sendPhoto(chat_id=update.message.chat_id, photo=open(file_img, 'rb')) except IndexError: return except ValueError: return def mapa8(bot, update): chat_id = update.message.chat_id if not chat_id in users_liberados: logging.info("Не найден - ID {}".format(chat_id)) return try: # urllib.urlretrieve(varZabbixmapa5, "botTelegram_mapa5.jpg") login() zbx_img_url = varZabbixmapa8 file_img = "botTelegram_mapa8.jpg" res = requests.get(zbx_img_url, cookies=varcookie) res_code = res.status_code if res_code == 404: logger.warn("Код 404 проверьте адрес: {}".format(zbx_img_url)) return False res_img = res.content with open(file_img, 'wb') as fp: fp.write(res_img) fp.close bot.sendPhoto(chat_id=update.message.chat_id, photo=open(file_img, 'rb')) except IndexError: return except ValueError: return def mapa9(bot, update): chat_id = update.message.chat_id if not chat_id in users_liberados: logging.info("Не найден - ID {}".format(chat_id)) return try: # urllib.urlretrieve(varZabbixmapa5, "botTelegram_mapa5.jpg") login() zbx_img_url = varZabbixmapa9 file_img = "botTelegram_mapa9.jpg" res = requests.get(zbx_img_url, cookies=varcookie) res_code = res.status_code if res_code == 404: logger.warn("Код 404 проверьте адрес: {}".format(zbx_img_url)) return False res_img = res.content with open(file_img, 'wb') as fp: fp.write(res_img) fp.close bot.sendPhoto(chat_id=update.message.chat_id, photo=open(file_img, 'rb')) except IndexError: return except ValueError: return def mapa10(bot, update): chat_id = update.message.chat_id if not chat_id in users_liberados: logging.info("Не найден - ID {}".format(chat_id)) return try: # urllib.urlretrieve(varZabbixmapa5, "botTelegram_mapa5.jpg") login() zbx_img_url = varZabbixmapa10 file_img = "botTelegram_mapa10.jpg" res = requests.get(zbx_img_url, cookies=varcookie) res_code = res.status_code if res_code == 404: logger.warn("Код 404 проверьте адрес: {}".format(zbx_img_url)) return False res_img = res.content with open(file_img, 'wb') as fp: fp.write(res_img) fp.close bot.sendPhoto(chat_id=update.message.chat_id, photo=open(file_img, 'rb')) except IndexError: return except ValueError: return def mapa11(bot, update): chat_id = update.message.chat_id if not chat_id in users_liberados: logging.info("Не найден - ID {}".format(chat_id)) return try: # urllib.urlretrieve(varZabbixmapa5, "botTelegram_mapa5.jpg") login() zbx_img_url = varZabbixmapa11 file_img = "botTelegram_mapa11.jpg" res = requests.get(zbx_img_url, cookies=varcookie) res_code = res.status_code if res_code == 404: logger.warn("Код 404 проверьте адрес: {}".format(zbx_img_url)) return False res_img = res.content with open(file_img, 'wb') as fp: fp.write(res_img) fp.close bot.sendPhoto(chat_id=update.message.chat_id, photo=open(file_img, 'rb')) except IndexError: return except ValueError: return def mapa12(bot, update): chat_id = update.message.chat_id if not chat_id in users_liberados: logging.info("Не найден - ID {}".format(chat_id)) return try: # urllib.urlretrieve(varZabbixmapa5, "botTelegram_mapa5.jpg") login() zbx_img_url = varZabbixmapa12 file_img = "botTelegram_mapa12.jpg" res = requests.get(zbx_img_url, cookies=varcookie) res_code = res.status_code if res_code == 404: logger.warn("Код 404 проверьте адрес: {}".format(zbx_img_url)) return False res_img = res.content with open(file_img, 'wb') as fp: fp.write(res_img) fp.close bot.sendPhoto(chat_id=update.message.chat_id, photo=open(file_img, 'rb')) except IndexError: return except ValueError: return def mapa13(bot, update): chat_id = update.message.chat_id if not chat_id in users_liberados: logging.info("Не найден - ID {}".format(chat_id)) return try: # urllib.urlretrieve(varZabbixmapa5, "botTelegram_mapa5.jpg") login() zbx_img_url = varZabbixmapa13 file_img = "botTelegram_mapa13.jpg" res = requests.get(zbx_img_url, cookies=varcookie) res_code = res.status_code if res_code == 404: logger.warn("Код 404 проверьте адрес: {}".format(zbx_img_url)) return False res_img = res.content with open(file_img, 'wb') as fp: fp.write(res_img) fp.close bot.sendPhoto(chat_id=update.message.chat_id, photo=open(file_img, 'rb')) except IndexError: return except ValueError: return def mapa14(bot, update): chat_id = update.message.chat_id if not chat_id in users_liberados: logging.info("Не найден - ID {}".format(chat_id)) return try: # urllib.urlretrieve(varZabbixmapa5, "botTelegram_mapa5.jpg") login() zbx_img_url = varZabbixmapa14 file_img = "botTelegram_mapa14.jpg" res = requests.get(zbx_img_url, cookies=varcookie) res_code = res.status_code if res_code == 404: logger.warn("Код 404 проверьте адрес: {}".format(zbx_img_url)) return False res_img = res.content with open(file_img, 'wb') as fp: fp.write(res_img) fp.close bot.sendPhoto(chat_id=update.message.chat_id, photo=open(file_img, 'rb')) except IndexError: return except ValueError: return def mapa15(bot, update): chat_id = update.message.chat_id if not chat_id in users_liberados: logging.info("Не найден - ID {}".format(chat_id)) return try: # urllib.urlretrieve(varZabbixmapa5, "botTelegram_mapa5.jpg") login() zbx_img_url = varZabbixmapa15 file_img = "botTelegram_mapa15.jpg" res = requests.get(zbx_img_url, cookies=varcookie) res_code = res.status_code if res_code == 404: logger.warn("Код 404 проверьте адрес: {}".format(zbx_img_url)) return False res_img = res.content with open(file_img, 'wb') as fp: fp.write(res_img) fp.close bot.sendPhoto(chat_id=update.message.chat_id, photo=open(file_img, 'rb')) except IndexError: return except ValueError: return def help(bot, update): chat_id = update.message.chat_id if not chat_id in users_liberados: logging.info("Не найден - ID {}".format(chat_id)) return bot.sendMessage(update.message.chat_id, text="Help:\n" "/atm - Банкоматы\n" "/postamat - Почтаматы\n" "/140100 - Аксу\n" "/140200 - Актогай\n" "/140300 - Баянаул\n" "/140400 - Железинка\n" "/140500 - Иртышск\n" "/140600 - Качиры\n" "/140700 - Лебяжий\n" "/140800 - Майск\n" "/140900 - ПРУПС\n" "/141000 - Успенка\n" "/141100 - Щербакты\n" "/141200 - Экибастуз\n" "/140000 - ОПСы\n") def error(bot, update, error): logger.warn('Update "%s" error "%s"' % (update, error)) def login(): global varcookie requests.packages.urllib3.disable_warnings() if varZabbixLanguage == "PT": data_api = {"name": varUsername, "password": varPassword, "enter": "Connect-SE"} else: data_api = {"name": varUsername, "password": varPassword, "enter": "Sign in"} req_cookie = requests.post(varZabbixServer + "/", data=data_api, verify=True) varcookie = req_cookie.cookies if len(req_cookie.history) > 1 and req_cookie.history[0].status_code == 302: logger.warn("Проверьте адрес сервера") if not varcookie: logger.warn("Проверьте имя пользователя и пароль") varcookie = None def grafico(bot, update, args): chat_id = update.message.chat_id if not chat_id in users_liberados: logging.info("Не найден - ID {}".format(chat_id)) return try: #print len(args) if len(args) < 2: bot.sendMessage(chat_id, text='Корректность') return False grafico_id = args[0] grafico_seg = args[1] login() zbx_img_url = ("{}/chart.php?itemids={}&period={}&width=600".format(varZabbixServer, grafico_id, grafico_seg)) file_img = "botTelegram_grafico_{}.jpg".format(grafico_id) res = requests.get(zbx_img_url, cookies=varcookie) res_code = res.status_code if res_code == 404: logger.warn("Проверьте адрес Zabbix Grafico: {}".format(zbx_img_url)) return False res_img = res.content with open(file_img, 'wb') as fp: fp.write(res_img) fp.close() bot.sendPhoto(chat_id=update.message.chat_id, photo=open(file_img, 'rb')) except (IndexError, ValueError): update.message.reply_text('Проверьте ID grafico') return def main(): global job_queue updater = Updater(varBotToken) job_queue = updater.job_queue dp = updater.dispatcher dp.add_handler(CommandHandler("atm", mapa1)) dp.add_handler(CommandHandler("postamat", mapa2)) dp.add_handler(CommandHandler("140100", mapa3)) dp.add_handler(CommandHandler("140200", mapa4)) dp.add_handler(CommandHandler("140300", mapa5)) dp.add_handler(CommandHandler("140400", mapa6)) dp.add_handler(CommandHandler("140500", mapa7)) dp.add_handler(CommandHandler("140600", mapa8)) dp.add_handler(CommandHandler("140700", mapa9)) dp.add_handler(CommandHandler("140800", mapa10)) dp.add_handler(CommandHandler("140900", mapa11)) dp.add_handler(CommandHandler("141000", mapa12)) dp.add_handler(CommandHandler("141100", mapa13)) dp.add_handler(CommandHandler("141200", mapa14)) dp.add_handler(CommandHandler("140000", mapa15)) dp.add_handler(CommandHandler("grafico", grafico, pass_args=True)) dp.add_handler(CommandHandler("help", help)) # log all errors dp.add_error_handler(error) # Start the Bot updater.start_polling() updater.idle() logging.info('Finished') logging.shutdown() if __name__ == '__main__': main()
gpl-2.0
-7,515,542,066,276,007,000
29.095016
122
0.571192
false
3.22339
false
false
false
xow/mdk
mdk/commands/install.py
1
3148
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Moodle Development Kit Copyright (c) 2013 Frédéric Massart - FMCorz.net This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. http://github.com/FMCorz/mdk """ import os import logging from .. import db from ..command import Command from ..tools import mkdir DB = db.DB class InstallCommand(Command): _description = 'Install a Moodle instance' def __init__(self, *args, **kwargs): super(InstallCommand, self).__init__(*args, **kwargs) self._arguments = [ ( ['-e', '--engine'], { 'action': 'store', 'choices': ['mariadb', 'mysqli', 'pgsql'], 'default': self.C.get('defaultEngine'), 'help': 'database engine to use', 'metavar': 'engine' } ), ( ['-f', '--fullname'], { 'action': 'store', 'help': 'full name of the instance', 'metavar': 'fullname' } ), ( ['-r', '--run'], { 'action': 'store', 'help': 'scripts to run after installation', 'metavar': 'run', 'nargs': '*' } ), ( ['name'], { 'default': None, 'help': 'name of the instance', 'metavar': 'name', 'nargs': '?' }) ] def run(self, args): name = args.name engine = args.engine fullname = args.fullname M = self.Wp.resolve(name) if not M: raise Exception('This is not a Moodle instance') name = M.get('identifier') dataDir = self.Wp.getPath(name, 'data') if not os.path.isdir(dataDir): mkdir(dataDir, 0777) kwargs = { 'engine': engine, 'fullname': fullname, 'dataDir': dataDir, 'wwwroot': self.Wp.getUrl(name) } M.install(**kwargs) # Running scripts if M.isInstalled() and type(args.run) == list: for script in args.run: logging.info('Running script \'%s\'' % (script)) try: M.runScript(script) except Exception as e: logging.warning('Error while running the script: %s' % e)
gpl-3.0
-6,347,851,598,104,812,000
28.12963
77
0.493961
false
4.406162
false
false
false
mick-d/nipype
nipype/algorithms/modelgen.py
5
37627
# -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ The modelgen module provides classes for specifying designs for individual subject analysis of task-based fMRI experiments. In particular it also includes algorithms for generating regressors for sparse and sparse-clustered acquisition experiments. These functions include: * SpecifyModel: allows specification of sparse and non-sparse models Change directory to provide relative paths for doctests >>> import os >>> filepath = os.path.dirname( os.path.realpath( __file__ ) ) >>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data')) >>> os.chdir(datadir) """ from __future__ import print_function, division, unicode_literals, absolute_import from builtins import range, str, bytes, int from copy import deepcopy import os from nibabel import load import numpy as np from scipy.special import gammaln from ..utils import NUMPY_MMAP from ..interfaces.base import (BaseInterface, TraitedSpec, InputMultiPath, traits, File, Bunch, BaseInterfaceInputSpec, isdefined) from ..utils.filemanip import filename_to_list from ..utils.misc import normalize_mc_params from .. import config, logging iflogger = logging.getLogger('interface') def gcd(a, b): """Returns the greatest common divisor of two integers uses Euclid's algorithm >>> gcd(4, 5) 1 >>> gcd(4, 8) 4 >>> gcd(22, 55) 11 """ while b > 0: a, b = b, a % b return a def spm_hrf(RT, P=None, fMRI_T=16): """ python implementation of spm_hrf see spm_hrf for implementation details % RT - scan repeat time % p - parameters of the response function (two gamma % functions) % defaults (seconds) % p(0) - delay of response (relative to onset) 6 % p(1) - delay of undershoot (relative to onset) 16 % p(2) - dispersion of response 1 % p(3) - dispersion of undershoot 1 % p(4) - ratio of response to undershoot 6 % p(5) - onset (seconds) 0 % p(6) - length of kernel (seconds) 32 % % hrf - hemodynamic response function % p - parameters of the response function the following code using scipy.stats.distributions.gamma doesn't return the same result as the spm_Gpdf function :: hrf = gamma.pdf(u, p[0]/p[2], scale=dt/p[2]) - gamma.pdf(u, p[1]/p[3], scale=dt/p[3])/p[4] >>> print(spm_hrf(2)) [ 0.00000000e+00 8.65660810e-02 3.74888236e-01 3.84923382e-01 2.16117316e-01 7.68695653e-02 1.62017720e-03 -3.06078117e-02 -3.73060781e-02 -3.08373716e-02 -2.05161334e-02 -1.16441637e-02 -5.82063147e-03 -2.61854250e-03 -1.07732374e-03 -4.10443522e-04 -1.46257507e-04] """ p = np.array([6, 16, 1, 1, 6, 0, 32], dtype=float) if P is not None: p[0:len(P)] = P _spm_Gpdf = lambda x, h, l: np.exp(h * np.log(l) + (h - 1) * np.log(x) - (l * x) - gammaln(h)) # modelled hemodynamic response function - {mixture of Gammas} dt = RT / float(fMRI_T) u = np.arange(0, int(p[6] / dt + 1)) - p[5] / dt with np.errstate(divide='ignore'): # Known division-by-zero hrf = _spm_Gpdf(u, p[0] / p[2], dt / p[2]) - _spm_Gpdf(u, p[1] / p[3], dt / p[3]) / p[4] idx = np.arange(0, int((p[6] / RT) + 1)) * fMRI_T hrf = hrf[idx] hrf = hrf / np.sum(hrf) return hrf def orth(x_in, y_in): """Orthogonalize y_in with respect to x_in. >>> orth_expected = np.array([1.7142857142857144, 0.42857142857142883, \ -0.85714285714285676]) >>> err = np.abs(np.array(orth([1, 2, 3],[4, 5, 6]) - orth_expected)) >>> all(err < np.finfo(float).eps) True """ x = np.array(x_in)[:, None] y = np.array(y_in)[:, None] y = y - np.dot(x, np.dot(np.linalg.inv(np.dot(x.T, x)), np.dot(x.T, y))) if np.linalg.norm(y, 1) > np.exp(-32): y = y[:, 0].tolist() else: y = y_in return y def scale_timings(timelist, input_units, output_units, time_repetition): """Scales timings given input and output units (scans/secs) Parameters ---------- timelist: list of times to scale input_units: 'secs' or 'scans' output_units: Ibid. time_repetition: float in seconds """ if input_units == output_units: _scalefactor = 1. if (input_units == 'scans') and (output_units == 'secs'): _scalefactor = time_repetition if (input_units == 'secs') and (output_units == 'scans'): _scalefactor = 1. / time_repetition timelist = [np.max([0., _scalefactor * t]) for t in timelist] return timelist def gen_info(run_event_files): """Generate subject_info structure from a list of event files """ info = [] for i, event_files in enumerate(run_event_files): runinfo = Bunch(conditions=[], onsets=[], durations=[], amplitudes=[]) for event_file in event_files: _, name = os.path.split(event_file) if '.run' in name: name, _ = name.split('.run%03d' % (i + 1)) elif '.txt' in name: name, _ = name.split('.txt') runinfo.conditions.append(name) event_info = np.atleast_2d(np.loadtxt(event_file)) runinfo.onsets.append(event_info[:, 0].tolist()) if event_info.shape[1] > 1: runinfo.durations.append(event_info[:, 1].tolist()) else: runinfo.durations.append([0]) if event_info.shape[1] > 2: runinfo.amplitudes.append(event_info[:, 2].tolist()) else: delattr(runinfo, 'amplitudes') info.append(runinfo) return info class SpecifyModelInputSpec(BaseInterfaceInputSpec): subject_info = InputMultiPath(Bunch, mandatory=True, xor=['subject_info', 'event_files'], desc='Bunch or List(Bunch) subject-specific ' 'condition information. see ' ':ref:`SpecifyModel` or ' 'SpecifyModel.__doc__ for details') event_files = InputMultiPath(traits.List(File(exists=True)), mandatory=True, xor=['subject_info', 'event_files'], desc='List of event description files 1, 2 or 3 ' 'column format corresponding to onsets, ' 'durations and amplitudes') realignment_parameters = InputMultiPath(File(exists=True), desc='Realignment parameters returned ' 'by motion correction algorithm', copyfile=False) parameter_source = traits.Enum("SPM", "FSL", "AFNI", "FSFAST", "NIPY", usedefault=True, desc="Source of motion parameters") outlier_files = InputMultiPath(File(exists=True), desc='Files containing scan outlier indices ' 'that should be tossed', copyfile=False) functional_runs = InputMultiPath(traits.Either(traits.List(File(exists=True)), File(exists=True)), mandatory=True, desc='Data files for model. List of 4D ' 'files or list of list of 3D ' 'files per session', copyfile=False) input_units = traits.Enum('secs', 'scans', mandatory=True, desc='Units of event onsets and durations (secs ' 'or scans). Output units are always in secs') high_pass_filter_cutoff = traits.Float(mandatory=True, desc='High-pass filter cutoff in secs') time_repetition = traits.Float(mandatory=True, desc='Time between the start of one volume ' 'to the start of the next image volume.') # Not implemented yet # polynomial_order = traits.Range(0, low=0, # desc ='Number of polynomial functions to model high pass filter.') class SpecifyModelOutputSpec(TraitedSpec): session_info = traits.Any(desc='Session info for level1designs') class SpecifyModel(BaseInterface): """Makes a model specification compatible with spm/fsl designers. The subject_info field should contain paradigm information in the form of a Bunch or a list of Bunch. The Bunch should contain the following information:: [Mandatory] - conditions : list of names - onsets : lists of onsets corresponding to each condition - durations : lists of durations corresponding to each condition. Should be left to a single 0 if all events are being modelled as impulses. [Optional] - regressor_names : list of str list of names corresponding to each column. Should be None if automatically assigned. - regressors : list of lists values for each regressor - must correspond to the number of volumes in the functional run - amplitudes : lists of amplitudes for each event. This will be ignored by SPM's Level1Design. The following two (tmod, pmod) will be ignored by any Level1Design class other than SPM: - tmod : lists of conditions that should be temporally modulated. Should default to None if not being used. - pmod : list of Bunch corresponding to conditions - name : name of parametric modulator - param : values of the modulator - poly : degree of modulation Alternatively, you can provide information through event files. The event files have to be in 1, 2 or 3 column format with the columns corresponding to Onsets, Durations and Amplitudes and they have to have the name event_name.runXXX... e.g.: Words.run001.txt. The event_name part will be used to create the condition names. Examples -------- >>> from nipype.algorithms import modelgen >>> from nipype.interfaces.base import Bunch >>> s = modelgen.SpecifyModel() >>> s.inputs.input_units = 'secs' >>> s.inputs.functional_runs = ['functional2.nii', 'functional3.nii'] >>> s.inputs.time_repetition = 6 >>> s.inputs.high_pass_filter_cutoff = 128. >>> evs_run2 = Bunch(conditions=['cond1'], onsets=[[2, 50, 100, 180]], durations=[[1]]) >>> evs_run3 = Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]], durations=[[1]]) >>> s.inputs.subject_info = [evs_run2, evs_run3] Using pmod: >>> evs_run2 = Bunch(conditions=['cond1', 'cond2'], onsets=[[2, 50], [100, 180]], \ durations=[[0], [0]], pmod=[Bunch(name=['amp'], poly=[2], param=[[1, 2]]), \ None]) >>> evs_run3 = Bunch(conditions=['cond1', 'cond2'], onsets=[[20, 120], [80, 160]], \ durations=[[0], [0]], pmod=[Bunch(name=['amp'], poly=[2], param=[[1, 2]]), \ None]) >>> s.inputs.subject_info = [evs_run2, evs_run3] """ input_spec = SpecifyModelInputSpec output_spec = SpecifyModelOutputSpec def _generate_standard_design(self, infolist, functional_runs=None, realignment_parameters=None, outliers=None): """ Generates a standard design matrix paradigm given information about each run """ sessinfo = [] output_units = 'secs' if 'output_units' in self.inputs.traits(): output_units = self.inputs.output_units for i, info in enumerate(infolist): sessinfo.insert(i, dict(cond=[])) if isdefined(self.inputs.high_pass_filter_cutoff): sessinfo[i]['hpf'] = \ np.float(self.inputs.high_pass_filter_cutoff) if hasattr(info, 'conditions') and info.conditions is not None: for cid, cond in enumerate(info.conditions): sessinfo[i]['cond'].insert(cid, dict()) sessinfo[i]['cond'][cid]['name'] = info.conditions[cid] scaled_onset = scale_timings(info.onsets[cid], self.inputs.input_units, output_units, self.inputs.time_repetition) sessinfo[i]['cond'][cid]['onset'] = scaled_onset scaled_duration = scale_timings(info.durations[cid], self.inputs.input_units, output_units, self.inputs.time_repetition) sessinfo[i]['cond'][cid]['duration'] = scaled_duration if hasattr(info, 'amplitudes') and info.amplitudes: sessinfo[i]['cond'][cid]['amplitudes'] = \ info.amplitudes[cid] if hasattr(info, 'tmod') and info.tmod and \ len(info.tmod) > cid: sessinfo[i]['cond'][cid]['tmod'] = info.tmod[cid] if hasattr(info, 'pmod') and info.pmod and \ len(info.pmod) > cid: if info.pmod[cid]: sessinfo[i]['cond'][cid]['pmod'] = [] for j, name in enumerate(info.pmod[cid].name): sessinfo[i]['cond'][cid]['pmod'].insert(j, {}) sessinfo[i]['cond'][cid]['pmod'][j]['name'] = \ name sessinfo[i]['cond'][cid]['pmod'][j]['poly'] = \ info.pmod[cid].poly[j] sessinfo[i]['cond'][cid]['pmod'][j]['param'] = \ info.pmod[cid].param[j] sessinfo[i]['regress'] = [] if hasattr(info, 'regressors') and info.regressors is not None: for j, r in enumerate(info.regressors): sessinfo[i]['regress'].insert(j, dict(name='', val=[])) if hasattr(info, 'regressor_names') and \ info.regressor_names is not None: sessinfo[i]['regress'][j]['name'] = \ info.regressor_names[j] else: sessinfo[i]['regress'][j]['name'] = 'UR%d' % (j + 1) sessinfo[i]['regress'][j]['val'] = info.regressors[j] sessinfo[i]['scans'] = functional_runs[i] if realignment_parameters is not None: for i, rp in enumerate(realignment_parameters): mc = realignment_parameters[i] for col in range(mc.shape[1]): colidx = len(sessinfo[i]['regress']) sessinfo[i]['regress'].insert(colidx, dict(name='', val=[])) sessinfo[i]['regress'][colidx]['name'] = 'Realign%d' % (col + 1) sessinfo[i]['regress'][colidx]['val'] = mc[:, col].tolist() if outliers is not None: for i, out in enumerate(outliers): numscans = 0 for f in filename_to_list(sessinfo[i]['scans']): shape = load(f, mmap=NUMPY_MMAP).shape if len(shape) == 3 or shape[3] == 1: iflogger.warning(('You are using 3D instead of 4D ' 'files. Are you sure this was ' 'intended?')) numscans += 1 else: numscans += shape[3] for j, scanno in enumerate(out): colidx = len(sessinfo[i]['regress']) sessinfo[i]['regress'].insert(colidx, dict(name='', val=[])) sessinfo[i]['regress'][colidx]['name'] = 'Outlier%d' % (j + 1) sessinfo[i]['regress'][colidx]['val'] = \ np.zeros((1, numscans))[0].tolist() sessinfo[i]['regress'][colidx]['val'][int(scanno)] = 1 return sessinfo def _generate_design(self, infolist=None): """Generate design specification for a typical fmri paradigm """ realignment_parameters = [] if isdefined(self.inputs.realignment_parameters): for parfile in self.inputs.realignment_parameters: realignment_parameters.append( np.apply_along_axis(func1d=normalize_mc_params, axis=1, arr=np.loadtxt(parfile), source=self.inputs.parameter_source)) outliers = [] if isdefined(self.inputs.outlier_files): for filename in self.inputs.outlier_files: try: outindices = np.loadtxt(filename, dtype=int) except IOError: outliers.append([]) else: if outindices.size == 1: outliers.append([outindices.tolist()]) else: outliers.append(outindices.tolist()) if infolist is None: if isdefined(self.inputs.subject_info): infolist = self.inputs.subject_info else: infolist = gen_info(self.inputs.event_files) self._sessinfo = self._generate_standard_design(infolist, functional_runs=self.inputs.functional_runs, realignment_parameters=realignment_parameters, outliers=outliers) def _run_interface(self, runtime): """ """ self._sessioninfo = None self._generate_design() return runtime def _list_outputs(self): outputs = self._outputs().get() if not hasattr(self, '_sessinfo'): self._generate_design() outputs['session_info'] = self._sessinfo return outputs class SpecifySPMModelInputSpec(SpecifyModelInputSpec): concatenate_runs = traits.Bool(False, usedefault=True, desc='Concatenate all runs to look like a ' 'single session.') output_units = traits.Enum('secs', 'scans', usedefault=True, desc='Units of design event onsets and durations ' '(secs or scans)') class SpecifySPMModel(SpecifyModel): """Adds SPM specific options to SpecifyModel adds: - concatenate_runs - output_units Examples -------- >>> from nipype.algorithms import modelgen >>> from nipype.interfaces.base import Bunch >>> s = modelgen.SpecifySPMModel() >>> s.inputs.input_units = 'secs' >>> s.inputs.output_units = 'scans' >>> s.inputs.high_pass_filter_cutoff = 128. >>> s.inputs.functional_runs = ['functional2.nii', 'functional3.nii'] >>> s.inputs.time_repetition = 6 >>> s.inputs.concatenate_runs = True >>> evs_run2 = Bunch(conditions=['cond1'], onsets=[[2, 50, 100, 180]], durations=[[1]]) >>> evs_run3 = Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]], durations=[[1]]) >>> s.inputs.subject_info = [evs_run2, evs_run3] """ input_spec = SpecifySPMModelInputSpec def _concatenate_info(self, infolist): nscans = [] for i, f in enumerate(self.inputs.functional_runs): if isinstance(f, list): numscans = len(f) elif isinstance(f, (str, bytes)): img = load(f, mmap=NUMPY_MMAP) numscans = img.shape[3] else: raise Exception('Functional input not specified correctly') nscans.insert(i, numscans) # now combine all fields into 1 # names, onsets, durations, amplitudes, pmod, tmod, regressor_names, # regressors infoout = infolist[0] for j, val in enumerate(infolist[0].durations): if len(infolist[0].onsets[j]) > 1 and len(val) == 1: infoout.durations[j] = (infolist[0].durations[j] * len(infolist[0].onsets[j])) for i, info in enumerate(infolist[1:]): # info.[conditions, tmod] remain the same if info.onsets: for j, val in enumerate(info.onsets): if self.inputs.input_units == 'secs': onsets = np.array(info.onsets[j]) +\ self.inputs.time_repetition * \ sum(nscans[0:(i + 1)]) infoout.onsets[j].extend(onsets.tolist()) else: onsets = np.array(info.onsets[j]) + \ sum(nscans[0:(i + 1)]) infoout.onsets[j].extend(onsets.tolist()) for j, val in enumerate(info.durations): if len(info.onsets[j]) > 1 and len(val) == 1: infoout.durations[j].extend(info.durations[j] * len(info.onsets[j])) elif len(info.onsets[j]) == len(val): infoout.durations[j].extend(info.durations[j]) else: raise ValueError('Mismatch in number of onsets and \ durations for run {0}, condition \ {1}'.format(i + 2, j + 1)) if hasattr(info, 'amplitudes') and info.amplitudes: for j, val in enumerate(info.amplitudes): infoout.amplitudes[j].extend(info.amplitudes[j]) if hasattr(info, 'pmod') and info.pmod: for j, val in enumerate(info.pmod): if val: for key, data in enumerate(val.param): infoout.pmod[j].param[key].extend(data) if hasattr(info, 'regressors') and info.regressors: # assumes same ordering of regressors across different # runs and the same names for the regressors for j, v in enumerate(info.regressors): infoout.regressors[j].extend(info.regressors[j]) # insert session regressors if not hasattr(infoout, 'regressors') or not infoout.regressors: infoout.regressors = [] onelist = np.zeros((1, sum(nscans))) onelist[0, sum(nscans[0:i]):sum(nscans[0:(i + 1)])] = 1 infoout.regressors.insert(len(infoout.regressors), onelist.tolist()[0]) return [infoout], nscans def _generate_design(self, infolist=None): if not isdefined(self.inputs.concatenate_runs) or \ not self.inputs.concatenate_runs: super(SpecifySPMModel, self)._generate_design(infolist=infolist) return if isdefined(self.inputs.subject_info): infolist = self.inputs.subject_info else: infolist = gen_info(self.inputs.event_files) concatlist, nscans = self._concatenate_info(infolist) functional_runs = [filename_to_list(self.inputs.functional_runs)] realignment_parameters = [] if isdefined(self.inputs.realignment_parameters): realignment_parameters = [] for parfile in self.inputs.realignment_parameters: mc = np.apply_along_axis(func1d=normalize_mc_params, axis=1, arr=np.loadtxt(parfile), source=self.inputs.parameter_source) if not realignment_parameters: realignment_parameters.insert(0, mc) else: realignment_parameters[0] = \ np.concatenate((realignment_parameters[0], mc)) outliers = [] if isdefined(self.inputs.outlier_files): outliers = [[]] for i, filename in enumerate(self.inputs.outlier_files): try: out = np.loadtxt(filename) except IOError: iflogger.warn('Error reading outliers file %s', filename) out = np.array([]) if out.size > 0: iflogger.debug('fname=%s, out=%s, nscans=%d', filename, out, sum(nscans[0:i])) sumscans = out.astype(int) + sum(nscans[0:i]) if out.size == 1: outliers[0]+= [np.array(sumscans, dtype=int).tolist()] else: outliers[0]+= np.array(sumscans, dtype=int).tolist() self._sessinfo = self._generate_standard_design(concatlist, functional_runs=functional_runs, realignment_parameters=realignment_parameters, outliers=outliers) class SpecifySparseModelInputSpec(SpecifyModelInputSpec): time_acquisition = traits.Float(0, mandatory=True, desc='Time in seconds to acquire a single ' 'image volume') volumes_in_cluster = traits.Range(1, usedefault=True, desc='Number of scan volumes in a cluster') model_hrf = traits.Bool(desc='Model sparse events with hrf') stimuli_as_impulses = traits.Bool(True, desc='Treat each stimulus to be impulse-like', usedefault=True) use_temporal_deriv = traits.Bool(requires=['model_hrf'], desc='Create a temporal derivative in ' 'addition to regular regressor') scale_regressors = traits.Bool(True, desc='Scale regressors by the peak', usedefault=True) scan_onset = traits.Float(0.0, desc='Start of scanning relative to onset of run in secs', usedefault=True) save_plot = traits.Bool(desc=('Save plot of sparse design calculation ' '(requires matplotlib)')) class SpecifySparseModelOutputSpec(SpecifyModelOutputSpec): sparse_png_file = File(desc='PNG file showing sparse design') sparse_svg_file = File(desc='SVG file showing sparse design') class SpecifySparseModel(SpecifyModel): """ Specify a sparse model that is compatible with spm/fsl designers References ---------- .. [1] Perrachione TK and Ghosh SS (2013) Optimized design and analysis of sparse-sampling fMRI experiments. Front. Neurosci. 7:55 http://journal.frontiersin.org/Journal/10.3389/fnins.2013.00055/abstract Examples -------- >>> from nipype.algorithms import modelgen >>> from nipype.interfaces.base import Bunch >>> s = modelgen.SpecifySparseModel() >>> s.inputs.input_units = 'secs' >>> s.inputs.functional_runs = ['functional2.nii', 'functional3.nii'] >>> s.inputs.time_repetition = 6 >>> s.inputs.time_acquisition = 2 >>> s.inputs.high_pass_filter_cutoff = 128. >>> s.inputs.model_hrf = True >>> evs_run2 = Bunch(conditions=['cond1'], onsets=[[2, 50, 100, 180]], \ durations=[[1]]) >>> evs_run3 = Bunch(conditions=['cond1'], onsets=[[30, 40, 100, 150]], \ durations=[[1]]) >>> s.inputs.subject_info = [evs_run2, evs_run3] """ input_spec = SpecifySparseModelInputSpec output_spec = SpecifySparseModelOutputSpec def _gen_regress(self, i_onsets, i_durations, i_amplitudes, nscans): """Generates a regressor for a sparse/clustered-sparse acquisition """ bplot = False if isdefined(self.inputs.save_plot) and self.inputs.save_plot: bplot = True import matplotlib matplotlib.use(config.get('execution', 'matplotlib_backend')) import matplotlib.pyplot as plt TR = np.round(self.inputs.time_repetition * 1000) # in ms if self.inputs.time_acquisition: TA = np.round(self.inputs.time_acquisition * 1000) # in ms else: TA = TR # in ms nvol = self.inputs.volumes_in_cluster SCANONSET = np.round(self.inputs.scan_onset * 1000) total_time = TR * (nscans - nvol) / nvol + TA * nvol + SCANONSET SILENCE = TR - TA * nvol dt = TA / 10.0 durations = np.round(np.array(i_durations) * 1000) if len(durations) == 1: durations = durations * np.ones((len(i_onsets))) onsets = np.round(np.array(i_onsets) * 1000) dttemp = gcd(TA, gcd(SILENCE, TR)) if dt < dttemp: if dttemp % dt != 0: dt = float(gcd(dttemp, dt)) if dt < 1: raise Exception('Time multiple less than 1 ms') iflogger.info('Setting dt = %d ms\n' % dt) npts = int(np.ceil(total_time / dt)) times = np.arange(0, total_time, dt) * 1e-3 timeline = np.zeros((npts)) timeline2 = np.zeros((npts)) if isdefined(self.inputs.model_hrf) and self.inputs.model_hrf: hrf = spm_hrf(dt * 1e-3) reg_scale = 1.0 if self.inputs.scale_regressors: boxcar = np.zeros(int(50.0 * 1e3 / dt)) if self.inputs.stimuli_as_impulses: boxcar[int(1.0 * 1e3 / dt)] = 1.0 reg_scale = float(TA / dt) else: boxcar[int(1.0 * 1e3 / dt):int(2.0 * 1e3 / dt)] = 1.0 if isdefined(self.inputs.model_hrf) and self.inputs.model_hrf: response = np.convolve(boxcar, hrf) reg_scale = 1.0 / response.max() iflogger.info('response sum: %.4f max: %.4f' % (response.sum(), response.max())) iflogger.info('reg_scale: %.4f' % reg_scale) for i, t in enumerate(onsets): idx = int(np.round(t / dt)) if i_amplitudes: if len(i_amplitudes) > 1: timeline2[idx] = i_amplitudes[i] else: timeline2[idx] = i_amplitudes[0] else: timeline2[idx] = 1 if bplot: plt.subplot(4, 1, 1) plt.plot(times, timeline2) if not self.inputs.stimuli_as_impulses: if durations[i] == 0: durations[i] = TA * nvol stimdur = np.ones((int(durations[i] / dt))) timeline2 = np.convolve(timeline2, stimdur)[0:len(timeline2)] timeline += timeline2 timeline2[:] = 0 if bplot: plt.subplot(4, 1, 2) plt.plot(times, timeline) if isdefined(self.inputs.model_hrf) and self.inputs.model_hrf: timeline = np.convolve(timeline, hrf)[0:len(timeline)] if isdefined(self.inputs.use_temporal_deriv) and \ self.inputs.use_temporal_deriv: # create temporal deriv timederiv = np.concatenate(([0], np.diff(timeline))) if bplot: plt.subplot(4, 1, 3) plt.plot(times, timeline) if isdefined(self.inputs.use_temporal_deriv) and \ self.inputs.use_temporal_deriv: plt.plot(times, timederiv) # sample timeline timeline2 = np.zeros((npts)) reg = [] regderiv = [] for i, trial in enumerate(np.arange(nscans) / nvol): scanstart = int((SCANONSET + trial * TR + (i % nvol) * TA) / dt) scanidx = scanstart + np.arange(int(TA / dt)) timeline2[scanidx] = np.max(timeline) reg.insert(i, np.mean(timeline[scanidx]) * reg_scale) if isdefined(self.inputs.use_temporal_deriv) and \ self.inputs.use_temporal_deriv: regderiv.insert(i, np.mean(timederiv[scanidx]) * reg_scale) if isdefined(self.inputs.use_temporal_deriv) and \ self.inputs.use_temporal_deriv: iflogger.info('orthoganlizing derivative w.r.t. main regressor') regderiv = orth(reg, regderiv) if bplot: plt.subplot(4, 1, 3) plt.plot(times, timeline2) plt.subplot(4, 1, 4) plt.bar(np.arange(len(reg)), reg, width=0.5) plt.savefig('sparse.png') plt.savefig('sparse.svg') if regderiv: return [reg, regderiv] else: return reg def _cond_to_regress(self, info, nscans): """Converts condition information to full regressors """ reg = [] regnames = [] for i, cond in enumerate(info.conditions): if hasattr(info, 'amplitudes') and info.amplitudes: amplitudes = info.amplitudes[i] else: amplitudes = None regnames.insert(len(regnames), cond) scaled_onsets = scale_timings(info.onsets[i], self.inputs.input_units, 'secs', self.inputs.time_repetition) scaled_durations = scale_timings(info.durations[i], self.inputs.input_units, 'secs', self.inputs.time_repetition) regressor = self._gen_regress(scaled_onsets, scaled_durations, amplitudes, nscans) if isdefined(self.inputs.use_temporal_deriv) and \ self.inputs.use_temporal_deriv: reg.insert(len(reg), regressor[0]) regnames.insert(len(regnames), cond + '_D') reg.insert(len(reg), regressor[1]) else: reg.insert(len(reg), regressor) # need to deal with temporal and parametric modulators # for sparse-clustered acquisitions enter T1-effect regressors nvol = self.inputs.volumes_in_cluster if nvol > 1: for i in range(nvol - 1): treg = np.zeros((nscans / nvol, nvol)) treg[:, i] = 1 reg.insert(len(reg), treg.ravel().tolist()) regnames.insert(len(regnames), 'T1effect_%d' % i) return reg, regnames def _generate_clustered_design(self, infolist): """Generates condition information for sparse-clustered designs. """ infoout = deepcopy(infolist) for i, info in enumerate(infolist): infoout[i].conditions = None infoout[i].onsets = None infoout[i].durations = None if info.conditions: img = load(self.inputs.functional_runs[i], mmap=NUMPY_MMAP) nscans = img.shape[3] reg, regnames = self._cond_to_regress(info, nscans) if hasattr(infoout[i], 'regressors') and infoout[i].regressors: if not infoout[i].regressor_names: infoout[i].regressor_names = \ ['R%d' % j for j in range(len(infoout[i].regressors))] else: infoout[i].regressors = [] infoout[i].regressor_names = [] for j, r in enumerate(reg): regidx = len(infoout[i].regressors) infoout[i].regressor_names.insert(regidx, regnames[j]) infoout[i].regressors.insert(regidx, r) return infoout def _generate_design(self, infolist=None): if isdefined(self.inputs.subject_info): infolist = self.inputs.subject_info else: infolist = gen_info(self.inputs.event_files) sparselist = self._generate_clustered_design(infolist) super(SpecifySparseModel, self)._generate_design(infolist=sparselist) def _list_outputs(self): outputs = self._outputs().get() if not hasattr(self, '_sessinfo'): self._generate_design() outputs['session_info'] = self._sessinfo if isdefined(self.inputs.save_plot) and self.inputs.save_plot: outputs['sparse_png_file'] = os.path.join(os.getcwd(), 'sparse.png') outputs['sparse_svg_file'] = os.path.join(os.getcwd(), 'sparse.svg') return outputs
bsd-3-clause
-2,022,518,823,186,561,500
42.349078
102
0.52372
false
3.956988
false
false
false
gatoravi/svviz
src/svviz/kde.py
1
3322
#------------------------------------------------------------------------------- # # Define classes for (uni/multi)-variate kernel density estimation. # # Currently, only Gaussian kernels are implemented. # # Written by: Robert Kern # # Date: 2004-08-09 # # Modified: 2005-02-10 by Robert Kern. # Contributed to Scipy # 2005-10-07 by Robert Kern. # Some fixes to match the new scipy_core # # Copyright 2004-2005 by Enthought, Inc. # #------------------------------------------------------------------------------- from __future__ import division from numpy import atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, power, sum, linalg import numpy as np class gaussian_kde(object): def __init__(self, dataset): self.dataset = atleast_2d(dataset) if not self.dataset.size > 1: raise ValueError("`dataset` input should have multiple elements.") self.d, self.n = self.dataset.shape self._compute_covariance() def evaluate(self, points): points = atleast_2d(points) d, m = points.shape if d != self.d: if d == 1 and m == self.d: # points was passed in as a row vector points = reshape(points, (self.d, 1)) m = 1 else: msg = "points have dimension %s, dataset has dimension %s" % (d, self.d) raise ValueError(msg) result = zeros((m,), dtype=np.float) if m >= self.n: # there are more points than data, so loop over data for i in range(self.n): diff = self.dataset[:, i, newaxis] - points tdiff = dot(self.inv_cov, diff) energy = sum(diff*tdiff,axis=0) / 2.0 result = result + exp(-energy) else: # loop over points for i in range(m): diff = self.dataset - points[:, i, newaxis] tdiff = dot(self.inv_cov, diff) energy = sum(diff * tdiff, axis=0) / 2.0 result[i] = sum(exp(-energy), axis=0) result = result / self._norm_factor return result __call__ = evaluate def scotts_factor(self): return power(self.n, -1./(self.d+4)) def _compute_covariance(self): self.factor = self.scotts_factor() # Cache covariance and inverse covariance of the data if not hasattr(self, '_data_inv_cov'): self._data_covariance = atleast_2d(np.cov(self.dataset, rowvar=1, bias=False)) self._data_inv_cov = linalg.inv(self._data_covariance) self.covariance = self._data_covariance * self.factor**2 self.inv_cov = self._data_inv_cov / self.factor**2 self._norm_factor = sqrt(linalg.det(2*pi*self.covariance)) * self.n if __name__ == '__main__': from biorpy import r from scipy import stats values = np.concatenate([np.random.normal(size=20), np.random.normal(loc=6, size=30)]) kde = stats.gaussian_kde(values) x = np.linspace(-5,10, 50) y = kde(x) print y r.plot(x, y, type="l", col="red") kde2 = gaussian_kde(values) y2 = kde2(x) r.lines(x, y2, col="blue", lty=2) raw_input("")
mit
-4,350,426,905,358,045,000
29.477064
93
0.524985
false
3.728395
false
false
false
kevinlee12/oppia
core/controllers/suggestion_test.py
1
56805
# coding: utf-8 # # Copyright 2018 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for suggestion controllers.""" from __future__ import absolute_import # pylint: disable=import-only-modules from __future__ import unicode_literals # pylint: disable=import-only-modules from constants import constants from core.domain import exp_domain from core.domain import exp_fetchers from core.domain import exp_services from core.domain import feedback_services from core.domain import question_domain from core.domain import question_services from core.domain import rights_manager from core.domain import skill_services from core.domain import state_domain from core.domain import story_domain from core.domain import story_services from core.domain import suggestion_services from core.domain import topic_domain from core.domain import topic_services from core.domain import user_services from core.platform import models from core.tests import test_utils import feconf (suggestion_models, feedback_models) = models.Registry.import_models([ models.NAMES.suggestion, models.NAMES.feedback]) class SuggestionUnitTests(test_utils.GenericTestBase): EXP_ID = 'exp1' TRANSLATION_LANGUAGE_CODE = 'en' AUTHOR_EMAIL = 'author@example.com' AUTHOR_EMAIL_2 = 'author2@example.com' REVIEWER_EMAIL = 'reviewer@example.com' TRANSLATOR_EMAIL = 'translator@example.com' NORMAL_USER_EMAIL = 'user@example.com' def setUp(self): super(SuggestionUnitTests, self).setUp() self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME) self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.signup(self.AUTHOR_EMAIL, 'author') self.signup(self.AUTHOR_EMAIL_2, 'author2') self.signup(self.NORMAL_USER_EMAIL, 'normalUser') self.signup(self.REVIEWER_EMAIL, 'reviewer') self.signup(self.TRANSLATOR_EMAIL, 'translator') self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL) self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL) self.author_id_2 = self.get_user_id_from_email(self.AUTHOR_EMAIL_2) self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL) self.translator_id = self.get_user_id_from_email(self.TRANSLATOR_EMAIL) self.set_admins([self.ADMIN_USERNAME]) user_services.allow_user_to_review_translation_in_language( self.reviewer_id, 'hi') self.editor = user_services.UserActionsInfo(self.editor_id) # Login and create exploration and suggestions. self.login(self.EDITOR_EMAIL) exploration = ( self.save_new_linear_exp_with_state_names_and_interactions( self.EXP_ID, self.editor_id, ['State 1', 'State 2', 'State 3'], ['TextInput'], category='Algebra')) self.old_content = state_domain.SubtitledHtml( 'content', '<p>old content html</p>').to_dict() exploration.states['State 1'].update_content( state_domain.SubtitledHtml.from_dict(self.old_content)) exploration.states['State 2'].update_content( state_domain.SubtitledHtml.from_dict(self.old_content)) exploration.states['State 3'].update_content( state_domain.SubtitledHtml.from_dict(self.old_content)) exp_services._save_exploration(self.editor_id, exploration, '', []) # pylint: disable=protected-access rights_manager.publish_exploration(self.editor, self.EXP_ID) rights_manager.assign_role_for_exploration( self.editor, self.EXP_ID, self.owner_id, rights_manager.ROLE_EDITOR) self.new_content = state_domain.SubtitledHtml( 'content', '<p>new content html</p>').to_dict() self.resubmit_change_content = state_domain.SubtitledHtml( 'content', '<p>resubmit change content html</p>').to_dict() self.logout() self.login(self.AUTHOR_EMAIL) csrf_token = self.get_new_csrf_token() self.post_json( '%s/' % feconf.SUGGESTION_URL_PREFIX, { 'suggestion_type': ( suggestion_models .SUGGESTION_TYPE_EDIT_STATE_CONTENT), 'target_type': ( suggestion_models.TARGET_TYPE_EXPLORATION), 'target_id': 'exp1', 'target_version_at_submission': exploration.version, 'change': { 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_CONTENT, 'state_name': 'State 1', 'old_value': self.old_content, 'new_value': self.new_content }, 'description': 'change to state 1', }, csrf_token=csrf_token) self.logout() self.login(self.AUTHOR_EMAIL_2) csrf_token = self.get_new_csrf_token() self.post_json( '%s/' % feconf.SUGGESTION_URL_PREFIX, { 'suggestion_type': ( suggestion_models .SUGGESTION_TYPE_EDIT_STATE_CONTENT), 'target_type': ( suggestion_models.TARGET_TYPE_EXPLORATION), 'target_id': 'exp1', 'target_version_at_submission': exploration.version, 'change': { 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_CONTENT, 'state_name': 'State 2', 'old_value': self.old_content, 'new_value': self.new_content }, 'description': 'change to state 2', }, csrf_token=csrf_token) self.post_json( '%s/' % feconf.SUGGESTION_URL_PREFIX, { 'suggestion_type': ( suggestion_models .SUGGESTION_TYPE_EDIT_STATE_CONTENT), 'target_type': ( suggestion_models.TARGET_TYPE_EXPLORATION), 'target_id': 'exp1', 'target_version_at_submission': exploration.version, 'change': { 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_CONTENT, 'state_name': 'State 3', 'old_value': self.old_content, 'new_value': self.new_content }, 'description': 'change to state 3', }, csrf_token=csrf_token) self.logout() self.login(self.TRANSLATOR_EMAIL) csrf_token = self.get_new_csrf_token() self.post_json( '%s/' % feconf.SUGGESTION_URL_PREFIX, { 'suggestion_type': ( suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT), 'target_type': suggestion_models.TARGET_TYPE_EXPLORATION, 'target_id': 'exp1', 'target_version_at_submission': exploration.version, 'change': { 'cmd': exp_domain.CMD_ADD_TRANSLATION, 'state_name': 'State 3', 'content_id': 'content', 'language_code': 'hi', 'content_html': '<p>old content html</p>', 'translation_html': '<p>In Hindi</p>' }, 'description': 'change to state 3', }, csrf_token=csrf_token) self.logout() def test_create_suggestion(self): self.login(self.AUTHOR_EMAIL_2) csrf_token = self.get_new_csrf_token() exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID) self.post_json( '%s/' % feconf.SUGGESTION_URL_PREFIX, { 'suggestion_type': ( suggestion_models .SUGGESTION_TYPE_EDIT_STATE_CONTENT), 'target_type': ( suggestion_models.TARGET_TYPE_EXPLORATION), 'target_id': 'exp1', 'target_version_at_submission': exploration.version, 'change': { 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_CONTENT, 'state_name': 'State 3', 'new_value': self.new_content }, 'description': 'change again to state 3', }, csrf_token=csrf_token) suggestions = self.get_json( '%s?author_id=%s' % ( feconf.SUGGESTION_LIST_URL_PREFIX, self.author_id_2))['suggestions'] self.assertEqual(len(suggestions), 3) self.logout() def test_create_suggestion_invalid_target_version_input(self): self.login(self.AUTHOR_EMAIL_2) csrf_token = self.get_new_csrf_token() response = self.post_json( '%s/' % feconf.SUGGESTION_URL_PREFIX, { 'suggestion_type': ( suggestion_models .SUGGESTION_TYPE_EDIT_STATE_CONTENT), 'target_type': ( suggestion_models.TARGET_TYPE_EXPLORATION), 'target_id': 'exp1', 'target_version_at_submission': 'invalid target version', 'change': { 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_CONTENT, 'state_name': 'State 3', 'new_value': self.new_content }, 'description': 'change again to state 3', }, csrf_token=csrf_token, expected_status_int=400) suggestions = self.get_json( '%s?author_id=%s' % ( feconf.SUGGESTION_LIST_URL_PREFIX, self.author_id_2))['suggestions'] self.assertEqual( response['error'], 'Expected target_version_at_submission to be an int, received <type' ' \'unicode\'>') self.assertEqual(len(suggestions), 2) self.logout() def test_suggestion_to_exploration_handler_with_invalid_suggestion_id(self): self.login(self.EDITOR_EMAIL) csrf_token = self.get_new_csrf_token() suggestion_to_accept = self.get_json( '%s?author_id=%s' % ( feconf.SUGGESTION_LIST_URL_PREFIX, self.author_id))['suggestions'][0] csrf_token = self.get_new_csrf_token() # Invalid format of suggestion id. response = self.put_json( '%s/exploration/%s/%s' % ( feconf.SUGGESTION_ACTION_URL_PREFIX, suggestion_to_accept['target_id'], 'invalid_suggestion_id'), { 'action': u'reject', 'review_message': u'Rejected!' }, csrf_token=csrf_token, expected_status_int=400) self.assertEqual( response['error'], 'Invalid format for suggestion_id. It must contain 3 parts ' 'separated by \'.\'') csrf_token = self.get_new_csrf_token() # Suggestion does not exist. self.put_json( '%s/exploration/%s/%s' % ( feconf.SUGGESTION_ACTION_URL_PREFIX, suggestion_to_accept['target_id'], 'exploration.target_id.id'), { 'action': u'reject', 'review_message': u'Rejected!' }, csrf_token=csrf_token, expected_status_int=404) self.logout() def test_suggestion_to_exploration_handler_with_invalid_target_type(self): self.login(self.EDITOR_EMAIL) question_dict = { 'question_state_data': self._create_valid_question_data( 'default_state').to_dict(), 'language_code': 'en', 'question_state_data_schema_version': ( feconf.CURRENT_STATE_SCHEMA_VERSION), 'linked_skill_ids': ['skill_id'] } exp_id = 'new_exp_id' self.save_new_default_exploration(exp_id, self.editor_id) suggestion_services.create_suggestion( suggestion_models.SUGGESTION_TYPE_ADD_QUESTION, suggestion_models.TARGET_TYPE_TOPIC, exp_id, 1, self.author_id, { 'cmd': ( question_domain .CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION), 'question_dict': question_dict, 'skill_id': None, 'skill_difficulty': 0.3 }, None) suggestion_id = suggestion_services.query_suggestions( [('author_id', self.author_id), ( 'target_id', exp_id)])[0].suggestion_id csrf_token = self.get_new_csrf_token() response = self.put_json( '%s/exploration/%s/%s' % ( feconf.SUGGESTION_ACTION_URL_PREFIX, exp_id, suggestion_id), { 'action': u'reject', 'review_message': u'Rejected!' }, csrf_token=csrf_token, expected_status_int=400) self.assertEqual( response['error'], 'This handler allows actions only on suggestions to explorations.') self.logout() def test_suggestion_to_exploration_handler_with_invalid_target_id(self): self.login(self.EDITOR_EMAIL) csrf_token = self.get_new_csrf_token() suggestion_to_accept = self.get_json( '%s?author_id=%s' % ( feconf.SUGGESTION_LIST_URL_PREFIX, self.author_id))['suggestions'][0] self.save_new_default_exploration('exp_id', self.editor_id) csrf_token = self.get_new_csrf_token() response = self.put_json( '%s/exploration/%s/%s' % ( feconf.SUGGESTION_ACTION_URL_PREFIX, 'exp_id', suggestion_to_accept['suggestion_id']), { 'action': u'reject', 'review_message': u'Rejected!' }, csrf_token=csrf_token, expected_status_int=400) self.assertEqual( response['error'], 'The exploration id provided does not match the exploration id ' 'present as part of the suggestion_id') self.logout() def test_owner_of_exploration_cannot_repond_to_own_suggestion(self): self.login(self.EDITOR_EMAIL) exp_id = 'new_exp_id' self.save_new_default_exploration(exp_id, self.editor_id) new_content = state_domain.SubtitledHtml( 'content', '<p>new content html</p>').to_dict() change_cmd = { 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_CONTENT, 'state_name': 'State 1', 'new_value': new_content } suggestion_services.create_suggestion( suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT, suggestion_models.TARGET_TYPE_EXPLORATION, exp_id, 1, self.editor_id, change_cmd, 'sample description') suggestion_id = suggestion_services.query_suggestions( [('author_id', self.editor_id), ( 'target_id', exp_id)])[0].suggestion_id csrf_token = self.get_new_csrf_token() response = self.put_json( '%s/exploration/%s/%s' % ( feconf.SUGGESTION_ACTION_URL_PREFIX, exp_id, suggestion_id), { 'action': u'reject', 'review_message': u'Rejected!' }, csrf_token=csrf_token, expected_status_int=401) self.assertEqual( response['error'], 'You cannot accept/reject your own suggestion.') self.logout() def test_suggestion_to_exploration_handler_with_invalid_action(self): self.login(self.EDITOR_EMAIL) csrf_token = self.get_new_csrf_token() suggestion_to_accept = self.get_json( '%s?author_id=%s' % ( feconf.SUGGESTION_LIST_URL_PREFIX, self.author_id))['suggestions'][0] csrf_token = self.get_new_csrf_token() response = self.put_json( '%s/exploration/%s/%s' % ( feconf.SUGGESTION_ACTION_URL_PREFIX, suggestion_to_accept['target_id'], suggestion_to_accept['suggestion_id']), {'action': 'invalid_action'}, csrf_token=csrf_token, expected_status_int=400) self.assertEqual( response['error'], 'Invalid action.') self.logout() def test_reject_suggestion_to_exploration(self): self.login(self.EDITOR_EMAIL) csrf_token = self.get_new_csrf_token() suggestion_to_reject = self.get_json( '%s?author_id=%s' % ( feconf.SUGGESTION_LIST_URL_PREFIX, self.author_id))['suggestions'][0] suggestion = suggestion_services.get_suggestion_by_id( suggestion_to_reject['suggestion_id']) self.assertEqual( suggestion.status, suggestion_models.STATUS_IN_REVIEW) csrf_token = self.get_new_csrf_token() self.put_json('%s/exploration/%s/%s' % ( feconf.SUGGESTION_ACTION_URL_PREFIX, suggestion_to_reject['target_id'], suggestion_to_reject['suggestion_id']), { 'action': u'reject', 'review_message': u'Rejected!' }, csrf_token=csrf_token) suggestion = suggestion_services.get_suggestion_by_id( suggestion_to_reject['suggestion_id']) self.assertEqual( suggestion.status, suggestion_models.STATUS_REJECTED) self.logout() def test_accept_suggestion(self): exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID) # Test editor can accept successfully. self.login(self.EDITOR_EMAIL) csrf_token = self.get_new_csrf_token() suggestion_to_accept = self.get_json( '%s?author_id=%s' % ( feconf.SUGGESTION_LIST_URL_PREFIX, self.author_id))['suggestions'][0] csrf_token = self.get_new_csrf_token() self.put_json('%s/exploration/%s/%s' % ( feconf.SUGGESTION_ACTION_URL_PREFIX, suggestion_to_accept['target_id'], suggestion_to_accept['suggestion_id']), { 'action': u'accept', 'commit_message': u'commit message', 'review_message': u'Accepted' }, csrf_token=csrf_token) suggestion_post_accept = self.get_json( '%s?author_id=%s' % ( feconf.SUGGESTION_LIST_URL_PREFIX, self.author_id))['suggestions'][0] self.assertEqual( suggestion_post_accept['status'], suggestion_models.STATUS_ACCEPTED) exploration = exp_fetchers.get_exploration_by_id(self.EXP_ID) self.assertEqual( exploration.states[suggestion_to_accept[ 'change']['state_name']].content.html, suggestion_to_accept['change']['new_value']['html']) self.logout() # Testing user without permissions cannot accept. self.login(self.NORMAL_USER_EMAIL) suggestion_to_accept = self.get_json( '%s?author_id=%s' % ( feconf.SUGGESTION_LIST_URL_PREFIX, self.author_id_2))['suggestions'][0] csrf_token = self.get_new_csrf_token() self.put_json('%s/exploration/%s/%s' % ( feconf.SUGGESTION_ACTION_URL_PREFIX, suggestion_to_accept['target_id'], suggestion_to_accept['suggestion_id']), { 'action': u'accept', 'commit_message': u'commit message', 'review_message': u'Accepted' }, csrf_token=csrf_token, expected_status_int=401) self.logout() # Testing that author cannot accept own suggestion. self.login(self.AUTHOR_EMAIL_2) suggestion_to_accept = self.get_json( '%s?author_id=%s' % ( feconf.SUGGESTION_LIST_URL_PREFIX, self.author_id_2))['suggestions'][0] csrf_token = self.get_new_csrf_token() self.put_json('%s/exploration/%s/%s' % ( feconf.SUGGESTION_ACTION_URL_PREFIX, suggestion_to_accept['target_id'], suggestion_to_accept['suggestion_id']), { 'action': u'accept', 'commit_message': u'commit message', 'review_message': u'Accepted' }, csrf_token=csrf_token, expected_status_int=401) # Testing users with scores above threshold can accept. self.login(self.AUTHOR_EMAIL) suggestion_services.increment_score_for_user( self.author_id, 'content.Algebra', 15) csrf_token = self.get_new_csrf_token() self.put_json('%s/exploration/%s/%s' % ( feconf.SUGGESTION_ACTION_URL_PREFIX, suggestion_to_accept['target_id'], suggestion_to_accept['suggestion_id']), { 'action': u'accept', 'commit_message': u'commit message', 'review_message': u'Accepted' }, csrf_token=csrf_token) suggestion_post_accept = self.get_json( '%s?author_id=%s' % ( feconf.SUGGESTION_LIST_URL_PREFIX, self.author_id_2))['suggestions'][0] self.assertEqual( suggestion_post_accept['status'], suggestion_models.STATUS_ACCEPTED) self.logout() # Testing admins can accept suggestions. self.login(self.ADMIN_EMAIL) csrf_token = self.get_new_csrf_token() suggestion_to_accept = self.get_json( '%s?author_id=%s' % ( feconf.SUGGESTION_LIST_URL_PREFIX, self.author_id_2))['suggestions'][1] self.put_json('%s/exploration/%s/%s' % ( feconf.SUGGESTION_ACTION_URL_PREFIX, suggestion_to_accept['target_id'], suggestion_to_accept['suggestion_id']), { 'action': u'accept', 'commit_message': u'commit message', 'review_message': u'Accepted' }, csrf_token=csrf_token) suggestion_post_accept = self.get_json( '%s?author_id=%s' % ( feconf.SUGGESTION_LIST_URL_PREFIX, self.author_id_2))['suggestions'][1] self.assertEqual( suggestion_post_accept['status'], suggestion_models.STATUS_ACCEPTED) self.logout() def test_suggestion_list_handler_with_invalid_query_field(self): response = self.get_json( '%s?invalid_query_field=value' % ( feconf.SUGGESTION_LIST_URL_PREFIX), expected_status_int=400) self.assertEqual( response['error'], 'Not allowed to query on field invalid_query_field') def test_suggestion_list_handler(self): suggestions = self.get_json( '%s?author_id=%s&target_type=%s&target_id=%s' % ( feconf.SUGGESTION_LIST_URL_PREFIX, self.author_id_2, suggestion_models.TARGET_TYPE_EXPLORATION, self.EXP_ID) )['suggestions'] self.assertEqual(len(suggestions), 2) def test_cannot_resubmit_suggestion_with_invalid_suggestion_id(self): self.login(self.EDITOR_EMAIL) csrf_token = self.get_new_csrf_token() response = self.put_json( '%s/resubmit/%s' % ( feconf.SUGGESTION_ACTION_URL_PREFIX, 'invalid_suggestion_id'), { 'action': u'reject', 'review_message': u'Rejected!' }, csrf_token=csrf_token, expected_status_int=400) self.assertEqual( response['error'], 'No suggestion found with given suggestion id') def test_resubmit_rejected_suggestion(self): self.login(self.EDITOR_EMAIL) csrf_token = self.get_new_csrf_token() suggestion = suggestion_services.query_suggestions( [('author_id', self.author_id), ('target_id', self.EXP_ID)])[0] suggestion_services.reject_suggestion( suggestion, self.reviewer_id, 'reject message') self.logout() self.login(self.AUTHOR_EMAIL) csrf_token = self.get_new_csrf_token() self.put_json('%s/resubmit/%s' % ( feconf.SUGGESTION_ACTION_URL_PREFIX, suggestion.suggestion_id), { 'summary_message': 'summary message', 'action': u'resubmit', 'change': { 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_CONTENT, 'state_name': 'State 1', 'new_value': self.resubmit_change_content, 'old_value': self.old_content } }, csrf_token=csrf_token) suggestion = suggestion_services.query_suggestions( [('author_id', self.author_id), ('target_id', self.EXP_ID)])[0] self.assertEqual( suggestion.status, suggestion_models.STATUS_IN_REVIEW) self.assertEqual( suggestion.change.new_value['html'], self.resubmit_change_content['html']) self.assertEqual( suggestion.change.cmd, exp_domain.CMD_EDIT_STATE_PROPERTY) self.assertEqual( suggestion.change.property_name, exp_domain.STATE_PROPERTY_CONTENT) self.assertEqual( suggestion.change.state_name, 'State 1') self.logout() def test_translation_accept_suggestion_by_reviewer(self): # Test reviewer can accept successfully. self.login(self.REVIEWER_EMAIL) csrf_token = self.get_new_csrf_token() suggestion_to_accept = self.get_json( '%s?author_id=%s' % ( feconf.SUGGESTION_LIST_URL_PREFIX, self.translator_id))['suggestions'][0] csrf_token = self.get_new_csrf_token() self.put_json('%s/exploration/%s/%s' % ( feconf.SUGGESTION_ACTION_URL_PREFIX, suggestion_to_accept['target_id'], suggestion_to_accept['suggestion_id']), { 'action': u'accept', 'commit_message': u'commit message', 'review_message': u'Accepted' }, csrf_token=csrf_token) suggestion_post_accept = self.get_json( '%s?author_id=%s' % ( feconf.SUGGESTION_LIST_URL_PREFIX, self.translator_id))['suggestions'][0] self.assertEqual( suggestion_post_accept['status'], suggestion_models.STATUS_ACCEPTED) self.logout() class QuestionSuggestionTests(test_utils.GenericTestBase): AUTHOR_EMAIL = 'author@example.com' AUTHOR_EMAIL_2 = 'author2@example.com' # Needs to be 12 characters long. SKILL_ID = 'skill1234567' SKILL_DESCRIPTION = 'skill to link question to' def setUp(self): super(QuestionSuggestionTests, self).setUp() self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME) self.signup(self.AUTHOR_EMAIL, 'author') self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL) self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL) self.set_admins([self.ADMIN_USERNAME]) self.save_new_skill( self.SKILL_ID, self.admin_id, description=self.SKILL_DESCRIPTION) self.question_dict = { 'question_state_data': self._create_valid_question_data( 'default_state').to_dict(), 'language_code': 'en', 'question_state_data_schema_version': ( feconf.CURRENT_STATE_SCHEMA_VERSION), 'linked_skill_ids': [self.SKILL_ID] } self.login(self.AUTHOR_EMAIL) csrf_token = self.get_new_csrf_token() self.post_json( '%s/' % feconf.SUGGESTION_URL_PREFIX, { 'suggestion_type': ( suggestion_models.SUGGESTION_TYPE_ADD_QUESTION), 'target_type': suggestion_models.TARGET_TYPE_SKILL, 'target_id': self.SKILL_ID, 'target_version_at_submission': 1, 'change': { 'cmd': ( question_domain .CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION), 'question_dict': self.question_dict, 'skill_id': self.SKILL_ID, 'skill_difficulty': 0.3 }, 'description': 'Add new question to skill' }, csrf_token=csrf_token) self.logout() def test_query_question_suggestions(self): suggestions = self.get_json( '%s?suggestion_type=%s' % ( feconf.SUGGESTION_LIST_URL_PREFIX, suggestion_models.SUGGESTION_TYPE_ADD_QUESTION) )['suggestions'] self.assertEqual(len(suggestions), 1) suggestion = suggestions[0] self.assertEqual( suggestion['suggestion_type'], suggestion_models.SUGGESTION_TYPE_ADD_QUESTION) self.assertEqual(suggestion['target_id'], self.SKILL_ID) self.assertEqual( suggestion['target_type'], suggestion_models.TARGET_TYPE_SKILL) self.assertEqual( suggestion['change']['cmd'], question_domain.CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION) def test_accept_question_suggestion(self): suggestion_to_accept = self.get_json( '%s?suggestion_type=%s' % ( feconf.SUGGESTION_LIST_URL_PREFIX, suggestion_models.SUGGESTION_TYPE_ADD_QUESTION) )['suggestions'][0] self.login(self.ADMIN_EMAIL) csrf_token = self.get_new_csrf_token() with self.swap(constants, 'ENABLE_NEW_STRUCTURE_VIEWER_UPDATES', True): self.put_json('%s/skill/%s/%s' % ( feconf.SUGGESTION_ACTION_URL_PREFIX, suggestion_to_accept['target_id'], suggestion_to_accept['suggestion_id']), { 'action': u'accept', 'commit_message': u'commit message', 'review_message': u'This looks good!', 'skill_id': self.SKILL_ID }, csrf_token=csrf_token) suggestion_post_accept = self.get_json( '%s?suggestion_type=%s' % ( feconf.SUGGESTION_LIST_URL_PREFIX, suggestion_models.SUGGESTION_TYPE_ADD_QUESTION) )['suggestions'][0] self.assertEqual( suggestion_post_accept['status'], suggestion_models.STATUS_ACCEPTED) ( questions, merged_question_skill_links, _) = ( question_services.get_displayable_question_skill_link_details( 1, [self.SKILL_ID], '')) self.assertEqual(len(questions), 1) self.assertEqual( merged_question_skill_links[0].skill_descriptions, [self.SKILL_DESCRIPTION]) self.assertEqual( merged_question_skill_links[0].skill_difficulties, [0.3]) self.assertEqual( questions[0].question_content, self.question_dict['question_state_data']['content']['html'] ) thread_messages = feedback_services.get_messages( suggestion_to_accept['suggestion_id']) last_message = thread_messages[len(thread_messages) - 1] self.assertEqual(last_message.text, 'This looks good!') class SkillSuggestionTests(test_utils.GenericTestBase): AUTHOR_EMAIL = 'author@example.com' REVIEWER_EMAIL = 'reviewer@example.com' def setUp(self): super(SkillSuggestionTests, self).setUp() self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME) self.signup(self.AUTHOR_EMAIL, 'author') self.signup(self.REVIEWER_EMAIL, 'reviewer') self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL) self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL) self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL) self.set_admins([self.ADMIN_USERNAME]) user_services.allow_user_to_review_question(self.reviewer_id) self.skill_id = skill_services.get_new_skill_id() self.save_new_skill( self.skill_id, self.admin_id, description='Description') self.question_dict = { 'question_state_data': self._create_valid_question_data( 'default_state').to_dict(), 'language_code': 'en', 'question_state_data_schema_version': ( feconf.CURRENT_STATE_SCHEMA_VERSION), 'linked_skill_ids': [self.skill_id] } self.login(self.AUTHOR_EMAIL) csrf_token = self.get_new_csrf_token() self.post_json( '%s/' % feconf.SUGGESTION_URL_PREFIX, { 'suggestion_type': ( suggestion_models.SUGGESTION_TYPE_ADD_QUESTION), 'target_type': suggestion_models.TARGET_TYPE_SKILL, 'target_id': self.skill_id, 'target_version_at_submission': 1, 'change': { 'cmd': ( question_domain .CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION), 'question_dict': self.question_dict, 'skill_id': self.skill_id, 'skill_difficulty': 0.3 }, 'description': 'Add new question to skill' }, csrf_token=csrf_token) self.logout() def test_cannot_access_suggestion_to_skill_handler(self): self.login(self.ADMIN_EMAIL) thread_id = feedback_services.create_thread( suggestion_models.TARGET_TYPE_QUESTION, self.skill_id, self.author_id, 'description', '', has_suggestion=True) csrf_token = self.get_new_csrf_token() self.put_json( '%s/skill/%s/%s' % ( feconf.SUGGESTION_ACTION_URL_PREFIX, self.skill_id, thread_id), { 'action': u'reject', 'review_message': u'Rejected!' }, csrf_token=csrf_token, expected_status_int=400) self.logout() def test_suggestion_to_skill_handler_with_invalid_target_type(self): self.login(self.ADMIN_EMAIL) exp_id = 'new_exp_id' self.save_new_default_exploration(exp_id, self.admin_id) new_content = state_domain.SubtitledHtml( 'content', '<p>new content html</p>').to_dict() change_cmd = { 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_CONTENT, 'state_name': 'State 1', 'new_value': new_content } suggestion_services.create_suggestion( suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT, suggestion_models.TARGET_TYPE_EXPLORATION, exp_id, 1, self.author_id, change_cmd, 'sample description') suggestion_id = suggestion_services.query_suggestions( [('author_id', self.author_id), ( 'target_id', exp_id)])[0].suggestion_id csrf_token = self.get_new_csrf_token() with self.swap(constants, 'ENABLE_NEW_STRUCTURE_VIEWER_UPDATES', True): response = self.put_json( '%s/skill/%s/%s' % ( feconf.SUGGESTION_ACTION_URL_PREFIX, self.skill_id, suggestion_id), { 'action': u'reject', 'review_message': u'Rejected!' }, csrf_token=csrf_token, expected_status_int=400) self.assertEqual( response['error'], 'This handler allows actions only on suggestions to skills.') self.logout() def test_suggestion_to_skill_handler_with_invalid_target_id(self): self.login(self.ADMIN_EMAIL) csrf_token = self.get_new_csrf_token() suggestion_to_accept = self.get_json( '%s?author_id=%s' % ( feconf.SUGGESTION_LIST_URL_PREFIX, self.author_id))['suggestions'][0] csrf_token = self.get_new_csrf_token() with self.swap(constants, 'ENABLE_NEW_STRUCTURE_VIEWER_UPDATES', True): response = self.put_json( '%s/skill/%s/%s' % ( feconf.SUGGESTION_ACTION_URL_PREFIX, 'skill_id', suggestion_to_accept['suggestion_id']), { 'action': u'reject', 'review_message': u'Rejected!' }, csrf_token=csrf_token, expected_status_int=400) self.assertEqual( response['error'], 'The skill id provided does not match the skill id ' 'present as part of the suggestion_id') self.logout() def test_suggestion_to_skill_handler_with_invalid_action(self): self.login(self.ADMIN_EMAIL) csrf_token = self.get_new_csrf_token() suggestion_to_accept = self.get_json( '%s?author_id=%s' % ( feconf.SUGGESTION_LIST_URL_PREFIX, self.author_id))['suggestions'][0] csrf_token = self.get_new_csrf_token() with self.swap(constants, 'ENABLE_NEW_STRUCTURE_VIEWER_UPDATES', True): response = self.put_json( '%s/skill/%s/%s' % ( feconf.SUGGESTION_ACTION_URL_PREFIX, suggestion_to_accept['target_id'], suggestion_to_accept['suggestion_id']), {'action': 'invalid_action'}, csrf_token=csrf_token, expected_status_int=400) self.assertEqual( response['error'], 'Invalid action.') self.logout() def test_reject_suggestion_to_skill(self): self.login(self.ADMIN_EMAIL) csrf_token = self.get_new_csrf_token() suggestion_to_reject = self.get_json( '%s?author_id=%s' % ( feconf.SUGGESTION_LIST_URL_PREFIX, self.author_id))['suggestions'][0] suggestion = suggestion_services.get_suggestion_by_id( suggestion_to_reject['suggestion_id']) self.assertEqual( suggestion.status, suggestion_models.STATUS_IN_REVIEW) csrf_token = self.get_new_csrf_token() with self.swap(constants, 'ENABLE_NEW_STRUCTURE_VIEWER_UPDATES', True): self.put_json('%s/skill/%s/%s' % ( feconf.SUGGESTION_ACTION_URL_PREFIX, suggestion_to_reject['target_id'], suggestion_to_reject['suggestion_id']), { 'action': u'reject', 'review_message': u'Rejected!' }, csrf_token=csrf_token) suggestion = suggestion_services.get_suggestion_by_id( suggestion_to_reject['suggestion_id']) self.assertEqual( suggestion.status, suggestion_models.STATUS_REJECTED) self.logout() def test_accept_suggestion_to_skill(self): self.login(self.ADMIN_EMAIL) csrf_token = self.get_new_csrf_token() suggestion_to_accept = self.get_json( '%s?author_id=%s' % ( feconf.SUGGESTION_LIST_URL_PREFIX, self.author_id))['suggestions'][0] suggestion = suggestion_services.get_suggestion_by_id( suggestion_to_accept['suggestion_id']) self.assertEqual( suggestion.status, suggestion_models.STATUS_IN_REVIEW) csrf_token = self.get_new_csrf_token() with self.swap(constants, 'ENABLE_NEW_STRUCTURE_VIEWER_UPDATES', True): self.put_json('%s/skill/%s/%s' % ( feconf.SUGGESTION_ACTION_URL_PREFIX, suggestion_to_accept['target_id'], suggestion_to_accept['suggestion_id']), { 'action': u'accept', 'commit_message': u'commit message', 'review_message': u'Accepted!', 'skill_id': self.skill_id }, csrf_token=csrf_token) suggestion = suggestion_services.get_suggestion_by_id( suggestion_to_accept['suggestion_id']) self.assertEqual( suggestion.status, suggestion_models.STATUS_ACCEPTED) self.logout() def test_reviewer_accept_suggestion_to_skill(self): self.login(self.REVIEWER_EMAIL) csrf_token = self.get_new_csrf_token() suggestion_to_accept = self.get_json( '%s?author_id=%s' % ( feconf.SUGGESTION_LIST_URL_PREFIX, self.author_id))['suggestions'][0] suggestion = suggestion_services.get_suggestion_by_id( suggestion_to_accept['suggestion_id']) self.assertEqual( suggestion.status, suggestion_models.STATUS_IN_REVIEW) csrf_token = self.get_new_csrf_token() with self.swap(constants, 'ENABLE_NEW_STRUCTURE_VIEWER_UPDATES', True): self.put_json('%s/skill/%s/%s' % ( feconf.SUGGESTION_ACTION_URL_PREFIX, suggestion_to_accept['target_id'], suggestion_to_accept['suggestion_id']), { 'action': u'accept', 'commit_message': u'commit message', 'review_message': u'Accepted!', 'skill_id': self.skill_id }, csrf_token=csrf_token) suggestion = suggestion_services.get_suggestion_by_id( suggestion_to_accept['suggestion_id']) self.assertEqual( suggestion.status, suggestion_models.STATUS_ACCEPTED) self.logout() class UserSubmittedSuggestionsHandlerTest(test_utils.GenericTestBase): """Unit test for the UserSubmittedSuggestionsHandler.""" AUTHOR_EMAIL = 'author@example.com' def setUp(self): super(UserSubmittedSuggestionsHandlerTest, self).setUp() self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME) self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.signup(self.AUTHOR_EMAIL, 'author') self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.TOPIC_ID = 'topic' self.STORY_ID = 'story' self.EXP_ID = 'exp1' # Needs to be 12 characters long. self.SKILL_ID = 'skill1234567' self.SKILL_DESCRIPTION = 'skill to link question to' exploration = self.save_new_valid_exploration( self.EXP_ID, self.owner_id, title='Exploration title', category='Algebra', end_state_name='End State') self.publish_exploration(self.owner_id, self.EXP_ID) topic = topic_domain.Topic.create_default_topic( self.TOPIC_ID, 'topic', 'abbrev', 'description') topic_services.save_new_topic(self.owner_id, topic) story = story_domain.Story.create_default_story( self.STORY_ID, 'A story', 'Description', self.TOPIC_ID, 'story-a') story_services.save_new_story(self.owner_id, story) topic_services.add_canonical_story( self.owner_id, self.TOPIC_ID, self.STORY_ID) story_services.update_story( self.owner_id, self.STORY_ID, [story_domain.StoryChange({ 'cmd': 'add_story_node', 'node_id': 'node_1', 'title': 'Node1', }), story_domain.StoryChange({ 'cmd': 'update_story_node_property', 'property_name': 'exploration_id', 'node_id': 'node_1', 'old_value': None, 'new_value': self.EXP_ID })], 'Changes.') self.save_new_skill( self.SKILL_ID, self.owner_id, description=self.SKILL_DESCRIPTION) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL) self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL) self.reviewer_id = self.editor_id self.set_admins([self.ADMIN_USERNAME]) self.editor = user_services.UserActionsInfo(self.editor_id) # Login and create exploration and suggestions. self.login(self.EDITOR_EMAIL) exp_services.update_exploration( self.owner_id, self.EXP_ID, [ exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_CONTENT, 'state_name': 'Introduction', 'new_value': { 'content_id': 'content', 'html': '<p>new content html</p>' } })], 'Add content') self.logout() self.login(self.AUTHOR_EMAIL) csrf_token = self.get_new_csrf_token() self.post_json( '%s/' % feconf.SUGGESTION_URL_PREFIX, { 'suggestion_type': ( suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT), 'target_type': (suggestion_models.TARGET_TYPE_EXPLORATION), 'target_id': self.EXP_ID, 'target_version_at_submission': exploration.version, 'change': { 'cmd': exp_domain.CMD_ADD_TRANSLATION, 'state_name': 'Introduction', 'content_id': 'content', 'language_code': 'hi', 'content_html': '<p>new content html</p>', 'translation_html': '<p>new content html in Hindi</p>' }, 'description': 'Adds translation', }, csrf_token=csrf_token) self.question_dict = { 'question_state_data': self._create_valid_question_data( 'default_state').to_dict(), 'language_code': 'en', 'question_state_data_schema_version': ( feconf.CURRENT_STATE_SCHEMA_VERSION), 'linked_skill_ids': [self.SKILL_ID] } self.post_json( '%s/' % feconf.SUGGESTION_URL_PREFIX, { 'suggestion_type': ( suggestion_models.SUGGESTION_TYPE_ADD_QUESTION), 'target_type': suggestion_models.TARGET_TYPE_SKILL, 'target_id': self.SKILL_ID, 'target_version_at_submission': 1, 'change': { 'cmd': ( question_domain .CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION), 'question_dict': self.question_dict, 'skill_id': None, 'skill_difficulty': 0.3 }, 'description': 'Add new question to skill' }, csrf_token=csrf_token) self.logout() def test_exploration_handler_returns_data(self): self.login(self.AUTHOR_EMAIL) response = self.get_json( '/getsubmittedsuggestions/exploration/translate_content') self.assertEqual(len(response['suggestions']), 1) self.assertEqual(len(response['target_id_to_opportunity_dict']), 1) response = self.get_json( '/getsubmittedsuggestions/topic/translate_content') self.assertEqual(response, {}) def test_skill_handler_returns_data(self): self.login(self.AUTHOR_EMAIL) response = self.get_json( '/getsubmittedsuggestions/skill/add_question') self.assertEqual(len(response['suggestions']), 1) self.assertEqual(len(response['target_id_to_opportunity_dict']), 1) response = self.get_json( '/getsubmittedsuggestions/topic/add_question') self.assertEqual(response, {}) def test_handler_with_invalid_suggestion_type_raise_error(self): self.login(self.AUTHOR_EMAIL) response = self.get_json( '/getsubmittedsuggestions/exploration/translate_content') self.assertEqual(len(response['suggestions']), 1) self.get_json( '/getsubmittedsuggestions/exploration/invalid_suggestion_type', expected_status_int=400) def test_handler_with_invalid_target_type_raise_error(self): self.login(self.AUTHOR_EMAIL) response = self.get_json( '/getsubmittedsuggestions/exploration/translate_content') self.assertEqual(len(response['suggestions']), 1) self.get_json( '/getsubmittedsuggestions/invalid_target_type' '/translate_content', expected_status_int=400) class ReviewableSuggestionsHandlerTest(test_utils.GenericTestBase): """Unit test for the ReviewableSuggestionsHandler.""" def setUp(self): super(ReviewableSuggestionsHandlerTest, self).setUp() self.AUTHOR_EMAIL = 'author@example.com' self.REVIEWER_EMAIL = 'reviewer@example.com' self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME) self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.signup(self.AUTHOR_EMAIL, 'author') self.signup(self.REVIEWER_EMAIL, 'reviewer') self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.TOPIC_ID = 'topic' self.STORY_ID = 'story' self.EXP_ID = 'exp1' # Needs to be 12 characters long. self.SKILL_ID = 'skill1234567' self.SKILL_DESCRIPTION = 'skill to link question to' exploration = self.save_new_valid_exploration( self.EXP_ID, self.owner_id, title='Exploration title', category='Algebra', end_state_name='End State') self.publish_exploration(self.owner_id, self.EXP_ID) topic = topic_domain.Topic.create_default_topic( self.TOPIC_ID, 'topic', 'abbrev', 'description') topic_services.save_new_topic(self.owner_id, topic) story = story_domain.Story.create_default_story( self.STORY_ID, 'A story', 'Description', self.TOPIC_ID, 'story-b') story_services.save_new_story(self.owner_id, story) topic_services.add_canonical_story( self.owner_id, self.TOPIC_ID, self.STORY_ID) story_services.update_story( self.owner_id, self.STORY_ID, [story_domain.StoryChange({ 'cmd': 'add_story_node', 'node_id': 'node_1', 'title': 'Node1', }), story_domain.StoryChange({ 'cmd': 'update_story_node_property', 'property_name': 'exploration_id', 'node_id': 'node_1', 'old_value': None, 'new_value': self.EXP_ID })], 'Changes.') self.save_new_skill( self.SKILL_ID, self.owner_id, description=self.SKILL_DESCRIPTION) self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL) self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL) self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL) self.set_admins([self.ADMIN_USERNAME]) self.editor = user_services.UserActionsInfo(self.editor_id) user_services.allow_user_to_review_question(self.reviewer_id) user_services.allow_user_to_review_translation_in_language( self.reviewer_id, 'hi') # Login and update exploration and suggestions. self.login(self.EDITOR_EMAIL) exp_services.update_exploration( self.owner_id, self.EXP_ID, [ exp_domain.ExplorationChange({ 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_CONTENT, 'state_name': 'Introduction', 'new_value': { 'content_id': 'content', 'html': '<p>new content html</p>' } })], 'Add content') self.logout() self.login(self.AUTHOR_EMAIL) csrf_token = self.get_new_csrf_token() self.post_json( '%s/' % feconf.SUGGESTION_URL_PREFIX, { 'suggestion_type': ( suggestion_models.SUGGESTION_TYPE_TRANSLATE_CONTENT), 'target_type': (suggestion_models.TARGET_TYPE_EXPLORATION), 'target_id': self.EXP_ID, 'target_version_at_submission': exploration.version, 'change': { 'cmd': exp_domain.CMD_ADD_TRANSLATION, 'state_name': 'Introduction', 'content_id': 'content', 'language_code': 'hi', 'content_html': '<p>new content html</p>', 'translation_html': '<p>new content html in Hindi</p>' }, 'description': 'Adds translation', }, csrf_token=csrf_token) self.question_dict = { 'question_state_data': self._create_valid_question_data( 'default_state').to_dict(), 'language_code': 'en', 'question_state_data_schema_version': ( feconf.CURRENT_STATE_SCHEMA_VERSION), 'linked_skill_ids': [self.SKILL_ID] } self.post_json( '%s/' % feconf.SUGGESTION_URL_PREFIX, { 'suggestion_type': ( suggestion_models.SUGGESTION_TYPE_ADD_QUESTION), 'target_type': suggestion_models.TARGET_TYPE_SKILL, 'target_id': self.SKILL_ID, 'target_version_at_submission': 1, 'change': { 'cmd': ( question_domain .CMD_CREATE_NEW_FULLY_SPECIFIED_QUESTION), 'question_dict': self.question_dict, 'skill_id': None, 'skill_difficulty': 0.3 }, 'description': 'Add new question to skill' }, csrf_token=csrf_token) self.logout() def test_exploration_handler_returns_data(self): self.login(self.REVIEWER_EMAIL) response = self.get_json( '/getreviewablesuggestions/exploration/translate_content') self.assertEqual(len(response['suggestions']), 1) self.assertEqual(len(response['target_id_to_opportunity_dict']), 1) response = self.get_json( '/getreviewablesuggestions/topic/translate_content') self.assertEqual(response, {}) def test_skill_handler_returns_data(self): self.login(self.REVIEWER_EMAIL) response = self.get_json( '/getreviewablesuggestions/skill/add_question') self.assertEqual(len(response['suggestions']), 1) self.assertEqual(len(response['target_id_to_opportunity_dict']), 1) response = self.get_json( '/getreviewablesuggestions/topic/add_question') self.assertEqual(response, {}) def test_handler_with_invalid_suggestion_type_raise_error(self): self.login(self.REVIEWER_EMAIL) response = self.get_json( '/getreviewablesuggestions/exploration/translate_content') self.assertEqual(len(response['suggestions']), 1) self.get_json( '/getreviewablesuggestions/exploration/invalid_suggestion_type', expected_status_int=404) def test_handler_with_invalid_target_type_raise_error(self): self.login(self.REVIEWER_EMAIL) response = self.get_json( '/getreviewablesuggestions/exploration/translate_content') self.assertEqual(len(response['suggestions']), 1) self.get_json( '/getreviewablesuggestions/invalid_target_type' '/translate_content', expected_status_int=400)
apache-2.0
-9,033,852,202,733,182,000
39.430605
111
0.565056
false
3.840511
true
false
false
grupoirona/django-date-validators
test_project/test_project/settings.py
1
2095
import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) SECRET_KEY = '6+dqad9^b51rix$3hc#rdn9@%6uhat+@$9udx^yh=j-1+8+2n*' DEBUG = True ALLOWED_HOSTS = [] INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django_date_validators' ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'test_project.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'test_project.wsgi.application' DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True STATIC_URL = '/static/'
isc
8,153,460,069,941,095,000
23.360465
91
0.64821
false
3.550847
false
true
false
abhaystoic/barati
barati/vendors/views_cluster/list_product.py
1
1399
from django.shortcuts import render from django.template import RequestContext from django.shortcuts import render, render_to_response from django.views.generic import View from django.http import HttpResponse from customers.models import Users as users from customers.models import Vendors as vendors from customers.models import Orders as orders from customers.models import Address, Venue_Types, Card_Types, Beautician_Types import sys, json class List_Product(View): try: template_name = 'vendors/list_product.html' def get(self, request): context_dict = {} orders_list = [] user = users.objects.get(username=request.user.username) #Allow only admin and vendors to see the vendor pages otherwise redirect to the customer index page if user.role == 'customer': self.template_name = 'customers/index.html' venue_subtypes = Venue_Types.objects.all() card_subtypes = Card_Types.objects.all() beautician_subtypes = Beautician_Types.objects.all() context_dict.update({ 'venue_subtypes' : venue_subtypes, 'card_subtypes' : card_subtypes, 'beautician_subtypes' : beautician_subtypes }) return render(request, self.template_name, context_dict) except Exception as e: print e print sys.exc_traceback.tb_lineno
apache-2.0
9,219,910,691,958,777,000
38.971429
108
0.68549
false
3.822404
false
false
false
ikargis/horizon_fod
openstack_dashboard/dashboards/project/images_and_snapshots/images/forms.py
1
10129
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Views for managing images. """ from django.conf import settings # noqa from django.forms import ValidationError # noqa from django.forms.widgets import HiddenInput # noqa from django.utils.translation import ugettext_lazy as _ # noqa from horizon import exceptions from horizon import forms from horizon import messages from openstack_dashboard import api IMAGE_BACKEND_SETTINGS = getattr(settings, 'OPENSTACK_IMAGE_BACKEND', {}) IMAGE_FORMAT_CHOICES = IMAGE_BACKEND_SETTINGS.get('image_formats', []) class CreateImageForm(forms.SelfHandlingForm): name = forms.CharField(max_length="255", label=_("Name"), required=True) description = forms.CharField(widget=forms.widgets.Textarea(), label=_("Description"), required=False) source_type = forms.ChoiceField( label=_('Image Source'), choices=[('url', _('Image Location')), ('file', _('Image File'))], widget=forms.Select(attrs={ 'class': 'switchable', 'data-slug': 'source'})) copy_from = forms.CharField(max_length="255", label=_("Image Location"), help_text=_("An external (HTTP) URL to load " "the image from."), widget=forms.TextInput(attrs={ 'class': 'switched', 'data-switch-on': 'source', 'data-source-url': _('Image Location')}), required=False) image_file = forms.FileField(label=_("Image File"), help_text=_("A local image to upload."), widget=forms.FileInput(attrs={ 'class': 'switched', 'data-switch-on': 'source', 'data-source-file': _('Image File')}), required=False) disk_format = forms.ChoiceField(label=_('Format'), required=True, choices=[], widget=forms.Select(attrs={'class': 'switchable'})) architecture = forms.CharField(max_length="255", label=_("Architecture"), required=False) minimum_disk = forms.IntegerField(label=_("Minimum Disk (GB)"), help_text=_('The minimum disk size' ' required to boot the' ' image. If unspecified, this' ' value defaults to 0' ' (no minimum).'), required=False) minimum_ram = forms.IntegerField(label=_("Minimum Ram (MB)"), help_text=_('The minimum memory size' ' required to boot the' ' image. If unspecified, this' ' value defaults to 0 (no' ' minimum).'), required=False) is_public = forms.BooleanField(label=_("Public"), required=False) protected = forms.BooleanField(label=_("Protected"), required=False) def __init__(self, *args, **kwargs): super(CreateImageForm, self).__init__(*args, **kwargs) if not settings.HORIZON_IMAGES_ALLOW_UPLOAD: self._hide_file_source_type() self.fields['disk_format'].choices = IMAGE_FORMAT_CHOICES def _hide_file_source_type(self): self.fields['image_file'].widget = HiddenInput() source_type = self.fields['source_type'] source_type.choices = [choice for choice in source_type.choices if choice[0] != 'file'] if len(source_type.choices) == 1: source_type.widget = HiddenInput() def clean(self): data = super(CreateImageForm, self).clean() # The image_file key can be missing based on particular upload # conditions. Code defensively for it here... image_file = data.get('image_file', None) if not data['copy_from'] and not image_file: raise ValidationError( _("A image or external image location must be specified.")) elif data['copy_from'] and image_file: raise ValidationError( _("Can not specify both image and external image location.")) else: return data def handle(self, request, data): # Glance does not really do anything with container_format at the # moment. It requires it is set to the same disk_format for the three # Amazon image types, otherwise it just treats them as 'bare.' As such # we will just set that to be that here instead of bothering the user # with asking them for information we can already determine. if data['disk_format'] in ('ami', 'aki', 'ari',): container_format = data['disk_format'] else: container_format = 'bare' meta = {'is_public': data['is_public'], 'protected': data['protected'], 'disk_format': data['disk_format'], 'container_format': container_format, 'min_disk': (data['minimum_disk'] or 0), 'min_ram': (data['minimum_ram'] or 0), 'name': data['name'], 'properties': {}} if data['description']: meta['properties']['description'] = data['description'] if data['architecture']: meta['properties']['architecture'] = data['architecture'] if (settings.HORIZON_IMAGES_ALLOW_UPLOAD and data.get('image_file', None)): meta['data'] = self.files['image_file'] else: meta['copy_from'] = data['copy_from'] try: image = api.glance.image_create(request, **meta) messages.success(request, _('Your image %s has been queued for creation.') % data['name']) return image except Exception: exceptions.handle(request, _('Unable to create new image.')) class UpdateImageForm(forms.SelfHandlingForm): image_id = forms.CharField(widget=forms.HiddenInput()) name = forms.CharField(max_length="255", label=_("Name")) description = forms.CharField(widget=forms.widgets.Textarea(), label=_("Description"), required=False) kernel = forms.CharField(max_length="36", label=_("Kernel ID"), required=False, widget=forms.TextInput( attrs={'readonly': 'readonly'} )) ramdisk = forms.CharField(max_length="36", label=_("Ramdisk ID"), required=False, widget=forms.TextInput( attrs={'readonly': 'readonly'} )) architecture = forms.CharField(label=_("Architecture"), required=False, widget=forms.TextInput( attrs={'readonly': 'readonly'} )) disk_format = forms.CharField(label=_("Format"), widget=forms.TextInput( attrs={'readonly': 'readonly'} )) public = forms.BooleanField(label=_("Public"), required=False) protected = forms.BooleanField(label=_("Protected"), required=False) def handle(self, request, data): image_id = data['image_id'] error_updating = _('Unable to update image "%s".') if data['disk_format'] in ['aki', 'ari', 'ami']: container_format = data['disk_format'] else: container_format = 'bare' meta = {'is_public': data['public'], 'protected': data['protected'], 'disk_format': data['disk_format'], 'container_format': container_format, 'name': data['name'], 'properties': {'description': data['description']}} if data['kernel']: meta['properties']['kernel_id'] = data['kernel'] if data['ramdisk']: meta['properties']['ramdisk_id'] = data['ramdisk'] if data['architecture']: meta['properties']['architecture'] = data['architecture'] # Ensure we do not delete properties that have already been # set on an image. meta['purge_props'] = False try: image = api.glance.image_update(request, image_id, **meta) messages.success(request, _('Image was successfully updated.')) return image except Exception: exceptions.handle(request, error_updating % image_id)
apache-2.0
7,224,893,702,934,788,000
44.832579
78
0.51782
false
5.021815
false
false
false
eljrax/autoscale_setup
load_balancing/add_self_to_lb.py
1
5035
#!/usr/bin/env python ################################################################################### # # # This script should be executed as the last thing that happens during # # the configuration phase of a server. It will perform the health check # # defined in the load balanceri(s) configured below, and add itself as a # # node if successful. # # For example: if the load balancer has a HTTP health check expecting a # # 200 response from a request to /, it will make this call and verify the # # status code before adding itself as an ENABLED/ONLINE node. # # # # Please modify the variables in the CONFIGURATION section below before executing # # Author: Erik Ljungstrom # # License: Apache License Version 2.0 http://www.apache.org/licenses/LICENSE-2.0 # ################################################################################### from __future__ import print_function import os import pyrax import netifaces as ni import urllib2 import socket import re import random from time import sleep ####################### CONFIGURATION ####################### # Network interface to grab the IP address from. This is the IP address # that will be used in the health check and ultimately added to the # load balancer. (REQUIRED) # e.g. iface = "eth1" iface = "" # LOAD BALANCER(S) (REQUIRED) # # e.g. # Single Load Balancer # lbs = [1234] # Multiple Load balancers # lbs = [1234, 5678] lbs = [] # Path to file containing credentials (REQUIRED) # e.g. credentials = '/opt/autoscale/.cloud_credentials' # File format: # # [rackspace_cloud] # username = # api_key = # credentials = '' # Name to send as Host: header with health check request (optional) host_header = None # Protocol to utilise in url check (override LB health check) (optional) protocol = None ###################################################################### def get_addr(iface): ni.ifaddresses(iface) ip = ni.ifaddresses(iface)[2][0]['addr'] return ip def health_check(health_check, port=80): addr = get_addr(iface) if not health_check.has_key('type'): print ("No health check present on load balancer") return if health_check.get('type') == 'CONNECT': check_port(addr, port, health_check.get('timeout')) elif health_check.get('type') in ['HTTP', 'HTTPS']: check_url(health_check, addr) else: raise Exception("Unsupported health check, please implement your own") def check_port(addr, port, timeout): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(timeout) result = sock.connect_ex((addr, port)) if result != 0: raise Exception("Error connecting to port %s: error: %s" % (port, result)) return result def check_url(health_check, addr): global host_header expected_resp = re.compile(health_check.get('bodyRegex', '.*')) expected_code = re.compile(health_check.get('statusRegex', '.*')) proto = protocol if protocol else health_check.get('type').lower() url = ("%s://%s/%s" % (proto, addr, health_check.get('path', '/'))) if not host_header: host_header = addr headers = { 'Host': host_header } req = urllib2.Request(url, headers=headers) response = urllib2.urlopen(req) contents_result = expected_resp.search(response.read()) status_result = expected_code.match(str(response.getcode())) if not contents_result or not status_result: raise Exception("check_url(): Response content does not match expected result") return True def main(): pyrax.set_setting("identity_type", "rackspace") pyrax.set_credential_file(credentials) clb = pyrax.cloud_loadbalancers my_ip = get_addr(iface) for lb_id in lbs: retry = 5 lb=clb.get(lb_id) try: health_check(lb.get_health_monitor(), lb.port) except Exception as e: print("Health check for LB %s failed with error: %s Not adding..." % (lb_id, str(e))) continue while retry > 0: try: pyrax.utils.wait_until(lb, "status", "ACTIVE", interval=1, attempts=30, verbose=False) node = clb.Node(address = my_ip, port = lb.port, condition = "ENABLED") res = lb.add_nodes([node]) print ("Node added to LB %s" % lb_id) break except pyrax.exceptions.ClientException as e: if "PENDING" in e.message: print ("Race condition hit, another server is adding itself. Retrying...") sleep(random.random()) if "Duplicate nodes" in e.message: print ("Node %s:%s already in LB %s.." % (my_ip, lb.port, lb_id)) break else: print ("Exception: %s" % e.message) break retry -= 1 if __name__ == "__main__": main()
apache-2.0
-7,555,560,228,802,524,000
32.125
102
0.57994
false
3.831811
false
false
false
coin-or/GiMPy
src/gimpy/graph.py
1
139676
''' A Graph class implementation. The aim for this implementation is 1. To reflect implementation methods in literature as much as possible 3. To have something close to a "classic" object-oriented design (compared to previous versions) This implementation can be considered as a compromise between a graph class designed for visualization and an efficient graph data structure. One deviation from standard Graph implementations is to keep in neighbors in an other adjacency list. We do this for efficiency reasons considering traversing residual graphs. We have a class for Graph and a class for Node. Edges are not represented as objects. They are kept in a dictionary which also keeps their attributes. Graph display related methods are inspired from Pydot. They are re-written considering GIMPy needs. We also borrow two methods from Pydot, see global_constants.py for details. Default graph type is an undirected graph. No custom exception will raise when the user tries to get in_neighbors of an undirected graph. She should be aware of this. Python will raise an exception since user is trying to read an attribute that does not exits. Methods that implement algorithms has display argument in their API. If this argument is not specified global display setting will be used for display purposes of the algorithm method implements. You can use display argument to get visualization of algorithm without changing global display behavior of your Graph/Tree object. Method documentation strings are orginized as follows. API: method_name(arguments) Description: Description of the method. Input: Arguments and their explanation. Pre: Necessary class attributes that should exists, methods to be called before this method. Post: Class attributes changed within the method. Return: Return value of the method. TODO(aykut): -> svg display mode -> label_strong_components() API change. Check backward compatibilty. -> dfs should use search()? -> display mode svg is not supported. future: -> The solution we find is not strongly feasible. Fix this. ''' from __future__ import division from __future__ import print_function from __future__ import absolute_import from future import standard_library standard_library.install_aliases() from builtins import str from builtins import range from past.utils import old_div from builtins import object from .global_constants import * try: from src.blimpy import Stack, Queue, PriorityQueue except ImportError: from coinor.blimpy import Stack, Queue, PriorityQueue import subprocess # for call() import io # for StringIO() import copy # for deepcopy() import sys # for exit() import random # for seed, random, randint import tempfile # for mkstemp() import os # for close() import operator # for itemgetter() try: import pygtk import gtk import xdot except ImportError: XDOT_INSTALLED = False else: XDOT_INSTALLED = True try: import dot2tex # for dot2tex method except ImportError: DOT2TEX_INSTALLED = False else: DOT2TEX_INSTALLED = True try: from PIL import Image as PIL_Image except ImportError: PIL_INSTALLED = False else: PIL_INSTALLED = True try: import matplotlib except ImportError: MATPLOTLIB_INSTALLED = False else: MATPLOTLIB_INSTALLED = True # matplotlib.use('TkAgg') import matplotlib.pyplot as plt plt.rcParams['figure.dpi'] = 300 def handle_close(evt): print('Figure closed. Exiting!') exit() class Node(object): ''' Node class. A node object keeps node attributes. Has a method to write node in Dot language grammer. ''' def __init__(self, name, **attr): ''' API: __init__(self, name, **attrs) Description: Node class constructor. Sets name and attributes using arguments. Input: name: Name of node. **attrs: Node attributes. Post: Sets self.name and self.attr. ''' self.name = name self.attr = copy.deepcopy(DEFAULT_NODE_ATTRIBUTES) for a in attr: self.attr[a] = attr[a] def get_attr(self, attr): ''' API: get_attr(self, attr) Description: Returns node attribute attr. Input: attr: Node attribute to get. Return: Returns Node attribute attr if exists returns None, otherwise. ''' if attr in self.attr: return self.attr[attr] else: return None def set_attr(self, attr, value): ''' API: set_attr(self, attr, value) Description: Sets node attribute attr to value. Input: attr: Node attribute to set. value: New value of the attribute. Post: Updates self.attr[attr]. ''' self.attr[attr] = value def to_string(self): ''' API: to_string(self) Description: Returns string representation of node in dot language. Return: String representation of node. ''' node = list() node.append(quote_if_necessary(str(self.name))) node.append(' [') flag = False for a in self.attr: flag = True node.append(a) node.append('=') node.append(quote_if_necessary(str(self.attr[a]))) node.append(', ') if flag is True: node = node[:-1] node.append(']') return ''.join(node) def __repr__(self): ''' API: __repr__(self) Description: Returns string representation of node in dot language. Return: String representation of node. ''' return self.to_string() class Graph(object): ''' Graph class, implemented using adjacency list. See GIMPy README for more information. ''' def __init__(self, **attr): ''' API: __init__(self, **attrs) Description: Graph class constructor. Sets attributes using argument. Input: **attrs: Graph attributes. Post: Sets following attributes using **attrs; self.attr, self.graph_type. Creates following initial attributes; self.neighbors, self.in_neighbors, self.nodes, self.out_neighbors, self.cluster ''' # graph attributes self.attr = copy.deepcopy(DEFAULT_GRAPH_ATTRIBUTES) # set attributes using constructor for a in attr: self.attr[a] = attr[a] # set name if 'name' in self.attr: self.name = self.attr['name'] else: self.name = 'G' # edge attributes self.edge_attr = dict() # we treat type attribute and keep it in a separate class attribute if 'type' in self.attr: self.graph_type = self.attr['type'] else: self.graph_type = UNDIRECTED_GRAPH # adjacency list of nodes, it is a dictionary of lists self.neighbors = {} # if the graph is undirected we do not need in_neighbor if self.graph_type is DIRECTED_GRAPH: self.in_neighbors = {} self.nodes = {} self.edge_connect_symbol = EDGE_CONNECT_SYMBOL[self.graph_type] self.out_neighbors = self.neighbors if 'display' not in self.attr: self.attr['display']='off' if 'layout' not in self.attr: self.attr['layout'] = 'fdp' self.attr['cluster_count'] = 0 self.cluster = {} def __repr__(self): ''' API: __repr__(self) Description: Returns string representation of the graph. Return: String representation of the graph. ''' data = str() for n in self.nodes: data += str(n) data += ' -> ' data += self.neighbors[n].__repr__() data += '\n' data = data[:-1] return data def __contains__(self, item): ''' API: __contains__(self, item) Description: Return true if item is in graph. item can be a node name or a tuple that represents an edge. Return: True if item is in graph. ''' if isinstance(item, tuple): name1 = item[0] name2 = item[1] if self.graph_type is DIRECTED_GRAPH: return (name1, name2) in self.edge_attr else: return ((name1, name2) in self.edge_attr or (name2, name1) in self.edge_attr) else: return item in self.nodes def add_node(self, name, **attr): ''' API: add_node(self, name, **attr) Description: Adds node to the graph. Pre: Graph should not contain a node with this name. We do not allow multiple nodes with the same name. Input: name: Name of the node. attr: Node attributes. Post: self.neighbors, self.nodes and self.in_neighbors are updated. Return: Node (a Node class instance) added to the graph. ''' if name in self.neighbors: raise MultipleNodeException self.neighbors[name] = list() if self.graph_type is DIRECTED_GRAPH: self.in_neighbors[name] = list() self.nodes[name] = Node(name, **attr) return self.nodes[name] def del_node(self, name): ''' API: del_node(self, name) Description: Removes node from Graph. Input: name: Name of the node. Pre: Graph should contain a node with this name. Post: self.neighbors, self.nodes and self.in_neighbors are updated. ''' if name not in self.neighbors: raise Exception('Node %s does not exist!' %str(name)) for n in self.neighbors[name]: del self.edge_attr[(name, n)] if self.graph_type == UNDIRECTED_GRAPH: self.neighbors[n].remove(name) else: self.in_neighbors[n].remove(name) if self.graph_type is DIRECTED_GRAPH: for n in self.in_neighbors[name]: del self.edge_attr[(n, name)] self.neighbors[n].remove(name) del self.neighbors[name] del self.in_neighbors[name] del self.nodes[name] def add_edge(self, name1, name2, **attr): ''' API: add_edge(self, name1, name2, **attr) Description: Adds edge to the graph. Sets edge attributes using attr argument. Input: name1: Name of the source node (if directed). name2: Name of the sink node (if directed). attr: Edge attributes. Pre: Graph should not already contain this edge. We do not allow multiple edges with same source and sink nodes. Post: self.edge_attr is updated. self.neighbors, self.nodes and self.in_neighbors are updated if graph was missing at least one of the nodes. ''' if (name1, name2) in self.edge_attr: raise MultipleEdgeException if self.graph_type is UNDIRECTED_GRAPH and (name2,name1) in self.edge_attr: raise MultipleEdgeException self.edge_attr[(name1,name2)] = copy.deepcopy(DEFAULT_EDGE_ATTRIBUTES) for a in attr: self.edge_attr[(name1,name2)][a] = attr[a] if name1 not in self.nodes: self.add_node(name1) if name2 not in self.nodes: self.add_node(name2) self.neighbors[name1].append(name2) if self.graph_type is UNDIRECTED_GRAPH: self.neighbors[name2].append(name1) else: self.in_neighbors[name2].append(name1) def del_edge(self, e): ''' API: del_edge(self, e) Description: Removes edge from graph. Input: e: Tuple that represents edge, in (source,sink) form. Pre: Graph should contain this edge. Post: self.edge_attr, self.neighbors and self.in_neighbors are updated. ''' if self.graph_type is DIRECTED_GRAPH: try: del self.edge_attr[e] except KeyError: raise Exception('Edge %s does not exists!' %str(e)) self.neighbors[e[0]].remove(e[1]) self.in_neighbors[e[1]].remove(e[0]) else: try: del self.edge_attr[e] except KeyError: try: del self.edge_attr[(e[1],e[0])] except KeyError: raise Exception('Edge %s does not exists!' %str(e)) self.neighbors[e[0]].remove(e[1]) self.neighbors[e[1]].remove(e[0]) def get_node(self, name): ''' API: get_node(self, name) Description: Returns node object with the provided name. Input: name: Name of the node. Return: Returns node object if node exists, returns None otherwise. ''' if name in self.nodes: return self.nodes[name] else: return None def get_edge_cost(self, edge): ''' API: get_edge_cost(self, edge) Description: Returns cost attr of edge, required for minimum_spanning_tree_kruskal(). Input: edge: Tuple that represents edge, in (source,sink) form. Return: Returns cost attribute value of the edge. ''' return self.get_edge_attr(edge[0], edge[1], 'cost') def check_edge(self, name1, name2): ''' API: check_edge(self, name1, name2) Description: Return True if edge exists, False otherwise. Input: name1: name of the source node. name2: name of the sink node. Return: Returns True if edge exists, False otherwise. ''' if self.graph_type is DIRECTED_GRAPH: return (name1, name2) in self.edge_attr else: return ((name1, name2) in self.edge_attr or (name2, name1) in self.edge_attr) def get_node_list(self): ''' API: get_node_list(self) Description: Returns node list. Return: List of nodes. ''' return list(self.neighbors.keys()) def get_edge_list(self): ''' API: get_edge_list(self) Description: Returns edge list. Return: List of edges, edges are tuples and in (source,sink) format. ''' return list(self.edge_attr.keys()) def get_node_num(self): ''' API: get_node_num(self) Description: Returns number of nodes. Return: Number of nodes. ''' return len(self.neighbors) def get_edge_num(self): ''' API: get_edge_num(self) Description: Returns number of edges. Return: Number of edges. ''' return len(self.edge_attr) def get_node_attr(self, name, attr): ''' API: get_node_attr(self, name, attr) Description: Returns attribute attr of given node. Input: name: Name of node. attr: Attribute of node. Pre: Graph should have this node. Return: Value of node attribute attr. ''' return self.get_node(name).get_attr(attr) def get_edge_attr(self, n, m, attr): ''' API: get_edge_attr(self, n, m, attr) Description: Returns attribute attr of edge (n,m). Input: n: Source node name. m: Sink node name. attr: Attribute of edge. Pre: Graph should have this edge. Return: Value of edge attribute attr. ''' if self.graph_type is DIRECTED_GRAPH: return self.edge_attr[(n,m)][attr] else: try: return self.edge_attr[(n,m)][attr] except KeyError: return self.edge_attr[(m,n)][attr] def set_node_attr(self, name, attr, value): ''' API: set_node_attr(self, name, attr) Description: Sets attr attribute of node named name to value. Input: name: Name of node. attr: Attribute of node to set. Pre: Graph should have this node. Post: Node attribute will be updated. ''' self.get_node(name).set_attr(attr, value) def set_edge_attr(self, n, m, attr, value): ''' API: set_edge_attr(self, n, m, attr, value) Description: Sets attr attribute of edge (n,m) to value. Input: n: Source node name. m: Sink node name. attr: Attribute of edge to set. value: New value of attribute. Pre: Graph should have this edge. Post: Edge attribute will be updated. ''' if self.graph_type is DIRECTED_GRAPH: self.edge_attr[(n,m)][attr] = value else: try: self.edge_attr[(n,m)][attr] = value except KeyError: self.edge_attr[(m,n)][attr] = value def get_neighbors(self, name): ''' API: get_neighbors(self, name) Description: Returns list of neighbors of given node. Input: name: Node name. Pre: Graph should have this node. Return: List of neighbor node names. ''' return self.neighbors[name] def get_in_neighbors(self, name): ''' API: get_in_neighbors(self, name) Description: Returns list of in neighbors of given node. Input: name: Node name. Pre: Graph should have this node. Return: List of in-neighbor node names. ''' return self.in_neighbors[name] def get_out_neighbors(self, name): ''' API: get_out_neighbors(self, name) Description: Returns list of out-neighbors of given node. Input: name: Node name. Pre: Graph should have this node. Return: List of out-neighbor node names. ''' return self.neighbors[name] def edge_to_string(self, e): ''' API: edge_to_string(self, e) Description: Return string that represents edge e in dot language. Input: e: Edge tuple in (source,sink) format. Pre: Graph should have this edge. Return: String that represents given edge. ''' edge = list() edge.append(quote_if_necessary(str(e[0]))) edge.append(self.edge_connect_symbol) edge.append(quote_if_necessary(str(e[1]))) # return if there is nothing in self.edge_attr[e] if len(self.edge_attr[e]) == 0: return ''.join(edge) edge.append(' [') for a in self.edge_attr[e]: edge.append(a) edge.append('=') edge.append(quote_if_necessary(str(self.edge_attr[e][a]))) edge.append(', ') edge = edge[:-1] edge.append(']') return ''.join(edge) def to_string(self): ''' API: to_string(self) Description: This method is based on pydot Graph class with the same name. Returns a string representation of the graph in dot language. It will return the graph and all its subelements in string form. Return: String that represents graph in dot language. ''' graph = list() processed_edges = {} graph.append('%s %s {\n' %(self.graph_type, self.name)) for a in self.attr: if a not in GRAPH_ATTRIBUTES: continue val = self.attr[a] if val is not None: graph.append( '%s=%s' % (a, quote_if_necessary(val)) ) else: graph.append(a) graph.append( ';\n' ) # clusters for c in self.cluster: graph.append('subgraph cluster_%s {\n' %c) for a in self.cluster[c]['attrs']: if a=='label': graph.append(a+'='+quote_if_necessary(self.cluster[c]['attrs'][a])+';\n') continue graph.append(a+'='+self.cluster[c]['attrs'][a]+';\n') if len(self.cluster[c]['node_attrs'])!=0: graph.append('node [') for a in self.cluster[c]['node_attrs']: graph.append(a+'='+self.cluster[c]['node_attrs'][a]) graph.append(',') if len(self.cluster[c]['node_attrs'])!=0: graph.pop() graph.append('];\n') # process cluster nodes for n in self.cluster[c]['node_list']: data = self.get_node(n).to_string() graph.append(data + ';\n') # process cluster edges for n in self.cluster[c]['node_list']: for m in self.cluster[c]['node_list']: if self.check_edge(n,m): data = self.edge_to_string((n,m)) graph.append(data + ';\n') processed_edges[(n,m)]=None graph.append('}\n') # process remaining (non-cluster) nodes for n in self.neighbors: for c in self.cluster: if n in self.cluster[c]['node_list']: break else: data = self.get_node(n).to_string() graph.append(data + ';\n') # process edges for e in self.edge_attr: if e in processed_edges: continue data = self.edge_to_string(e) graph.append(data + ';\n') graph.append( '}\n' ) return ''.join(graph) def label_components(self, display = None): ''' API: label_components(self, display=None) Description: This method labels the nodes of an undirected graph with component numbers so that each node has the same label as all nodes in the same component. It will display the algortihm if display argument is provided. Input: display: display method. Pre: self.graph_type should be UNDIRECTED_GRAPH. Post: Nodes will have 'component' attribute that will have component number as value. ''' if self.graph_type == DIRECTED_GRAPH: raise Exception("label_components only works for ", "undirected graphs") self.num_components = 0 for n in self.get_node_list(): self.get_node(n).set_attr('component', None) for n in self.neighbors: self.get_node(n).set_attr('label', '-') for n in self.get_node_list(): if self.get_node(n).get_attr('component') == None: self.search(n, display=display, component=self.num_components, algo='DFS') self.num_components += 1 def tarjan(self): ''' API: tarjan(self) Description: Implements Tarjan's algorithm for determining strongly connected set of nodes. Pre: self.graph_type should be DIRECTED_GRAPH. Post: Nodes will have 'component' attribute that will have component number as value. Changes 'index' attribute of nodes. ''' index = 0 component = 0 q = [] for n in self.get_node_list(): if self.get_node_attr(n, 'index') is None: index, component = self.strong_connect(q, n, index, component) def strong_connect(self, q, node, index, component): ''' API: strong_connect (self, q, node, index, component) Description: Used by tarjan method. This method should not be called directly by user. Input: q: Node list. node: Node that is being connected to nodes in q. index: Index used by tarjan method. component: Current component number. Pre: Should be called by tarjan and itself (recursive) only. Post: Nodes will have 'component' attribute that will have component number as value. Changes 'index' attribute of nodes. Return: Returns new index and component numbers. ''' self.set_node_attr(node, 'index', index) self.set_node_attr(node, 'lowlink', index) index += 1 q.append(node) for m in self.get_neighbors(node): if self.get_node_attr(m, 'index') is None: index, component = self.strong_connect(q, m, index, component) self.set_node_attr(node, 'lowlink', min([self.get_node_attr(node, 'lowlink'), self.get_node_attr(m, 'lowlink')])) elif m in q: self.set_node_attr(node, 'lowlink', min([self.get_node_attr(node, 'lowlink'), self.get_node_attr(m, 'index')])) if self.get_node_attr(node, 'lowlink') == self.get_node_attr(node, 'index'): m = q.pop() self.set_node_attr(m, 'component', component) while (node!=m): m = q.pop() self.set_node_attr(m, 'component', component) component += 1 self.num_components = component return (index, component) def label_strong_component(self): ''' API: label_strong_component(self) Description: This method labels the nodes of a directed graph with component numbers so that each node has the same label as all nodes in the same component. Pre: self.graph_type should be DIRECTED_GRAPH. Post: Nodes will have 'component' attribute that will have component number as value. Changes 'index' attribute of nodes. ''' self.num_components = 0 self.tarjan() def dfs(self, root, disc_count = 0, finish_count = 1, component = None, transpose = False, display = None, pred = None): ''' API: dfs(self, root, disc_count = 0, finish_count = 1, component=None, transpose=False) Description: Make a depth-first search starting from node with name root. Input: root: Starting node name. disc_count: Discovery time. finish_count: Finishing time. component: component number. transpose: Goes in the reverse direction along edges if transpose is True. Post: Nodes will have 'component' attribute that will have component number as value. Updates 'disc_time' and 'finish_time' attributes of nodes which represents discovery time and finishing time. Return: Returns a tuple that has discovery time and finish time of the last node in the following form (disc_time,finish_time). ''' if pred == None: pred = {} if display == None: display = self.attr['display'] else: self.set_display_mode(display) neighbors = self.neighbors if self.graph_type == DIRECTED_GRAPH and transpose: neighbors = self.in_neighbors self.get_node(root).set_attr('component', component) disc_count += 1 self.get_node(root).set_attr('disc_time', disc_count) self.get_node(root).set_attr('label', str(disc_count)+',-') self.get_node(root).set_attr('color', 'blue') if root in pred: self.set_edge_attr(pred[root], root, 'color', 'green') self.display() if transpose: fTime = [] for n in neighbors[root]: fTime.append((n,self.get_node(n).get_attr('finish_time'))) neighbor_list = sorted(fTime, key=operator.itemgetter(1)) neighbor_list = list(t[0] for t in neighbor_list) neighbor_list.reverse() else: neighbor_list = neighbors[root] for i in neighbor_list: if not transpose: if self.get_node(i).get_attr('disc_time') is None: pred[i] = root disc_count, finish_count = self.dfs(i, disc_count, finish_count, component, transpose, pred = pred) else: if self.get_node(i).get_attr('component') is None: disc_count, finish_count = self.dfs(i, disc_count, finish_count, component, transpose, pred = pred) self.get_node(root).set_attr('finish_time', finish_count) d_time = self.get_node(root).get_attr('disc_time') label = '"' + str(d_time) + ',' + str(finish_count) + '"' self.get_node(root).set_attr('label', label) self.get_node(root).set_attr('color', 'green') self.display() finish_count += 1 return disc_count, finish_count def bfs(self, root, display = None, component = None): ''' API: bfs(self, root, display = None, component=None) Description: Make a breadth-first search starting from node with name root. Input: root: Starting node name. display: display method. component: component number. Post: Nodes will have 'component' attribute that will have component number as value. ''' self.search(root, display = display, component = component, q = Queue()) def search(self, source, destination = None, display = None, component = None, q = None, algo = 'DFS', reverse = False, **kargs): ''' API: search(self, source, destination = None, display = None, component = None, q = Stack(), algo = 'DFS', reverse = False, **kargs) Description: Generic search method. Changes behavior (dfs,bfs,dijkstra,prim) according to algo argument. if destination is not specified: This method determines all nodes reachable from "source" ie. creates precedence tree and returns it (dictionary). if destionation is given: If there exists a path from "source" to "destination" it will return list of the nodes is this path. If there is no such path, it will return the precedence tree constructed from source (dictionary). Optionally, it marks all nodes reachable from "source" with a component number. The variable "q" determines the order in which the nodes are searched. Input: source: Search starts from node with this name. destination: Destination node name. display: Display method. algo: Algortihm that specifies search. Available algortihms are 'DFS', 'BFS', 'Dijkstra' and 'Prim'. reverse: Search goes in reverse arc directions if True. kargs: Additional keyword arguments. Post: Nodes will have 'component' attribute that will have component number as value (if component argument provided). Color attribute of nodes and edges may change. Return: Returns predecessor tree in dictionary form if destination is not specified, returns list of node names in the path from source to destionation if destionation is specified and there is a path. If there is no path returns predecessor tree in dictionary form. See description section. ''' if display == None: display = self.attr['display'] else: self.set_display_mode(display) if algo == 'DFS': if q is None: q = Stack() self.get_node(source).set_attr('component', component) elif algo == 'BFS' or algo == 'UnweightedSPT': if q is None: q = Queue() self.get_node(source).set_attr('component', component) elif algo == 'Dijkstra' or algo == 'Prim': if q is None: q = PriorityQueue() else: print("Unknown search algorithm...exiting") return neighbors = self.neighbors if self.graph_type == DIRECTED_GRAPH and reverse: neighbors = self.in_neighbors for i in self.get_node_list(): self.get_node(i).set_attr('label', '-') self.get_node(i).attr.pop('priority', None) self.get_node(i).set_attr('distance', None) self.get_node(i).set_attr('color', 'black') for j in neighbors[i]: if reverse: self.set_edge_attr(j, i, 'color', 'black') else: self.set_edge_attr(i, j, 'color', 'black') self.display() pred = {} self.process_edge_search(None, source, pred, q, component, algo, **kargs) found = True if source != destination: found = False while not q.isEmpty() and not found: current = q.peek() if self.get_node(current).get_attr('color') == 'green': q.remove(current) continue self.process_node_search(current, q, **kargs) self.get_node(current).set_attr('color', 'blue') if current != source: if reverse: self.set_edge_attr(current, pred[current], 'color', 'green') else: self.set_edge_attr(pred[current], current, 'color', 'green') if current == destination: found = True break self.display() for n in neighbors[current]: if self.get_node(n).get_attr('color') != 'green': if reverse: self.set_edge_attr(n, current, 'color', 'yellow') else: self.set_edge_attr(current, n, 'color', 'yellow') self.display() self.process_edge_search(current, n, pred, q, component, algo, **kargs) if reverse: self.set_edge_attr(n, current, 'color', 'black') else: self.set_edge_attr(current, n, 'color', 'black') q.remove(current) self.get_node(current).set_attr('color', 'green') self.display() if found: path = [destination] current = destination while current != source: path.insert(0, pred[current]) current = pred[current] return path if destination == None: return pred else: return None def process_node_search(self, node, q, **kwargs): ''' API: process_node_search(self, node, q, **kwargs) Description: Used by search() method. Process nodes along the search. Should not be called by user directly. Input: node: Name of the node being processed. q: Queue data structure. kwargs: Keyword arguments. Post: 'priority' attribute of the node may get updated. ''' if isinstance(q, PriorityQueue): self.get_node(node).set_attr('priority', q.get_priority(node)) def process_edge_dijkstra(self, current, neighbor, pred, q, component): ''' API: process_edge_dijkstra(self, current, neighbor, pred, q, component) Description: Used by search() method if the algo argument is 'Dijkstra'. Processes edges along Dijkstra's algorithm. User does not need to call this method directly. Input: current: Name of the current node. neighbor: Name of the neighbor node. pred: Predecessor tree. q: Data structure that holds nodes to be processed in a queue. component: component number. Post: 'color' attribute of nodes and edges may change. ''' if current is None: self.get_node(neighbor).set_attr('color', 'red') self.get_node(neighbor).set_attr('label', 0) q.push(neighbor, 0) self.display() self.get_node(neighbor).set_attr('color', 'black') return new_estimate = (q.get_priority(current) + self.get_edge_attr(current, neighbor, 'cost')) if neighbor not in pred or new_estimate < q.get_priority(neighbor): pred[neighbor] = current self.get_node(neighbor).set_attr('color', 'red') self.get_node(neighbor).set_attr('label', new_estimate) q.push(neighbor, new_estimate) self.display() self.get_node(neighbor).set_attr('color', 'black') def process_edge_prim(self, current, neighbor, pred, q, component): ''' API: process_edge_prim(self, current, neighbor, pred, q, component) Description: Used by search() method if the algo argument is 'Prim'. Processes edges along Prim's algorithm. User does not need to call this method directly. Input: current: Name of the current node. neighbor: Name of the neighbor node. pred: Predecessor tree. q: Data structure that holds nodes to be processed in a queue. component: component number. Post: 'color' attribute of nodes and edges may change. ''' if current is None: self.get_node(neighbor).set_attr('color', 'red') self.get_node(neighbor).set_attr('label', 0) q.push(neighbor, 0) self.display() self.get_node(neighbor).set_attr('color', 'black') return new_estimate = self.get_edge_attr(current, neighbor, 'cost') if not neighbor in pred or new_estimate < q.get_priority(neighbor): pred[neighbor] = current self.get_node(neighbor).set_attr('color', 'red') self.get_node(neighbor).set_attr('label', new_estimate) q.push(neighbor, new_estimate) self.display() self.get_node(neighbor).set_attr('color', 'black') def process_edge_search(self, current, neighbor, pred, q, component, algo, **kargs): ''' API: process_edge_search(self, current, neighbor, pred, q, component, algo, **kargs) Description: Used by search() method. Processes edges according to the underlying algortihm. User does not need to call this method directly. Input: current: Name of the current node. neighbor: Name of the neighbor node. pred: Predecessor tree. q: Data structure that holds nodes to be processed in a queue. component: component number. algo: Search algorithm. See search() documentation. kwargs: Keyword arguments. Post: 'color', 'distance', 'component' attribute of nodes and edges may change. ''' if algo == 'Dijkstra': return self.process_edge_dijkstra(current, neighbor, pred, q, component) if algo == 'Prim': return self.process_edge_prim(current, neighbor, pred, q, component) neighbor_node = self.get_node(neighbor) if current == None: neighbor_node.set_attr('distance', 0) if isinstance(q, PriorityQueue): q.push(neighbor, 0) else: q.push(neighbor) if component != None: neighbor_node.set_attr('component', component) neighbor_node.set_attr('label', component) else: neighbor_node.set_attr('label', 0) return if isinstance(q, PriorityQueue): current_priority = q.get_priority(neighbor) if algo == 'UnweightedSPT' or algo == 'BFS': priority = self.get_node(current).get_attr('distance') + 1 if algo == 'DFS': priority = -self.get_node(current).get_attr('distance') - 1 if current_priority is not None and priority >= current_priority: return q.push(neighbor, priority) if algo == 'UnweightedSPT' or algo == 'BFS': neighbor_node.set_attr('distance', priority) if algo == 'DFS': neighbor_node.set_attr('depth', -priority) else: distance = self.get_node(current).get_attr('distance') + 1 if ((algo == 'UnweightedSPT' or algo == 'BFS') and neighbor_node.get_attr('distance') is not None): return neighbor_node.set_attr('distance', distance) neighbor_node.set_attr('label', str(distance)) q.push(neighbor) pred[neighbor] = current neighbor_node.set_attr('color', 'red') if component != None: neighbor_node.set_attr('component', component) neighbor_node.set_attr('label', component) self.display() def minimum_spanning_tree_prim(self, source, display = None, q = PriorityQueue()): ''' API: minimum_spanning_tree_prim(self, source, display = None, q = PriorityQueue()) Description: Determines a minimum spanning tree of all nodes reachable from source using Prim's Algorithm. Input: source: Name of source node. display: Display method. q: Data structure that holds nodes to be processed in a queue. Post: 'color', 'distance', 'component' attribute of nodes and edges may change. Return: Returns predecessor tree in dictionary format. ''' if display == None: display = self.attr['display'] else: self.set_display_mode(display) if isinstance(q, PriorityQueue): addToQ = q.push removeFromQ = q.pop peek = q.peek isEmpty = q.isEmpty neighbors = self.get_neighbors pred = {} addToQ(source) done = False while not isEmpty() and not done: current = removeFromQ() self.set_node_attr(current, 'color', 'blue') if current != source: self.set_edge_attr(pred[current], current, 'color', 'green') self.display() for n in neighbors(current): if self.get_node_attr(n, 'color') != 'green': self.set_edge_attr(current, n, 'color', 'yellow') self.display() new_estimate = self.get_edge_attr(current, n, 'cost') if not n in pred or new_estimate < peek(n)[0]: pred[n] = current self.set_node_attr(n, 'color', 'red') self.set_node_attr(n, 'label', new_estimate) addToQ(n, new_estimate) self.display() self.set_node_attr(n, 'color', 'black') self.set_edge_attr(current, n, 'color', 'black') self.set_node_attr(current, 'color', 'green') self.display() return pred def minimum_spanning_tree_kruskal(self, display = None, components = None): ''' API: minimum_spanning_tree_kruskal(self, display = None, components = None) Description: Determines a minimum spanning tree using Kruskal's Algorithm. Input: display: Display method. component: component number. Post: 'color' attribute of nodes and edges may change. Return: Returns list of edges where edges are tuples in (source,sink) format. ''' if display == None: display = self.attr['display'] else: self.set_display_mode(display) if components is None: components = DisjointSet(display = display, layout = 'dot', optimize = False) sorted_edge_list = sorted(self.get_edge_list(), key=self.get_edge_cost) edges = [] for n in self.get_node_list(): components.add([n]) components.display() for e in sorted_edge_list: if len(edges) == len(self.get_node_list()) - 1: break self.set_edge_attr(e[0], e[1], 'color', 'yellow') self.display() if components.union(e[0], e[1]): self.set_edge_attr(e[0], e[1], 'color', 'green') self.display() edges.append(e) else: self.set_edge_attr(e[0], e[1], 'color', 'black') self.display() components.display() return edges def max_flow_preflowpush(self, source, sink, algo = 'FIFO', display = None): ''' API: max_flow_preflowpush(self, source, sink, algo = 'FIFO', display = None) Description: Finds maximum flow from source to sink by a depth-first search based augmenting path algorithm. Pre: Assumes a directed graph in which each arc has a 'capacity' attribute and for which there does does not exist both arcs (i,j) and (j,i) for any pair of nodes i and j. Input: source: Source node name. sink: Sink node name. algo: Algorithm choice, 'FIFO', 'SAP' or 'HighestLabel'. display: display method. Post: The 'flow' attribute of each arc gives a maximum flow. ''' if display == None: display = self.attr['display'] else: self.set_display_mode(display) nl = self.get_node_list() # set excess of all nodes to 0 for n in nl: self.set_node_attr(n, 'excess', 0) # set flow of all edges to 0 for e in self.edge_attr: self.edge_attr[e]['flow'] = 0 if 'capacity' in self.edge_attr[e]: capacity = self.edge_attr[e]['capacity'] self.edge_attr[e]['label'] = str(capacity)+'/0' else: self.edge_attr[e]['capacity'] = INF self.edge_attr[e]['label'] = 'INF/0' self.display() self.set_display_mode('off') self.search(sink, algo = 'UnweightedSPT', reverse = True) self.set_display_mode(display) disconnect = False for n in nl: if self.get_node_attr(n, 'distance') is None: disconnect = True self.set_node_attr(n, 'distance', 2*len(nl) + 1) if disconnect: print('Warning: graph contains nodes not connected to the sink...') if algo == 'FIFO': q = Queue() elif algo == 'SAP': q = Stack() elif algo == 'HighestLabel': q = PriorityQueue() for n in self.get_neighbors(source): capacity = self.get_edge_attr(source, n, 'capacity') self.set_edge_attr(source, n, 'flow', capacity) self.set_node_attr(n, 'excess', capacity) excess = self.get_node_attr(source, 'excess') self.set_node_attr(source, 'excess', excess - capacity) if algo == 'FIFO' or algo == 'SAP': q.push(n) elif algo == 'HighestLabel': q.push(n, -1) self.set_node_attr(source, 'distance', len(nl)) self.show_flow() while not q.isEmpty(): relabel = True current = q.peek() neighbors = (self.get_neighbors(current) + self.get_in_neighbors(current)) for n in neighbors: pushed = self.process_edge_flow(source, sink, current, n, algo, q) if pushed: self.show_flow() if algo == 'FIFO': '''With FIFO, we need to add the neighbors to the queue before the current is added back in or the nodes will be out of order ''' if q.peek(n) is None and n != source and n != sink: q.push(n) '''Keep pushing while there is excess''' if self.get_node_attr(current, 'excess') > 0: continue '''If we were able to push, then there we should not relabel ''' relabel = False break q.remove(current) if current != sink: if relabel: self.relabel(current) self.show_flow() if self.get_node_attr(current, 'excess') > 0: if algo == 'FIFO' or algo == 'SAP': q.push(current) elif algo == 'HighestLabel': q.push(current, -self.get_node_attr(current, 'distance')) if pushed and q.peek(n) is None and n != source: if algo == 'SAP': q.push(n) elif algo == 'HighestLabel': q.push(n, -self.get_node_attr(n, 'distance')) def process_edge_flow(self, source, sink, i, j, algo, q): ''' API: process_edge_flow(self, source, sink, i, j, algo, q) Description: Used by by max_flow_preflowpush() method. Processes edges along prefolow push. Input: source: Source node name of flow graph. sink: Sink node name of flow graph. i: Source node in the processed edge (tail of arc). j: Sink node in the processed edge (head of arc). Post: The 'flow' and 'excess' attributes of nodes may get updated. Return: Returns False if residual capacity is 0, True otherwise. ''' if (self.get_node_attr(i, 'distance') != self.get_node_attr(j, 'distance') + 1): return False if (i, j) in self.edge_attr: edge = (i, j) capacity = self.get_edge_attr(i, j, 'capacity') mult = 1 else: edge = (j, i) capacity = 0 mult = -1 flow = mult*self.edge_attr[edge]['flow'] residual_capacity = capacity - flow if residual_capacity == 0: return False excess_i = self.get_node_attr(i, 'excess') excess_j = self.get_node_attr(j, 'excess') push_amount = min(excess_i, residual_capacity) self.edge_attr[edge]['flow'] = mult*(flow + push_amount) self.set_node_attr(i, 'excess', excess_i - push_amount) self.set_node_attr(j, 'excess', excess_j + push_amount) return True def relabel(self, i): ''' API: relabel(self, i) Description: Used by max_flow_preflowpush() method for relabelling node i. Input: i: Node that is being relabelled. Post: 'distance' attribute of node i is updated. ''' min_distance = 2*len(self.get_node_list()) + 1 for j in self.get_neighbors(i): if (self.get_node_attr(j, 'distance') < min_distance and (self.get_edge_attr(i, j, 'flow') < self.get_edge_attr(i, j, 'capacity'))): min_distance = self.get_node_attr(j, 'distance') for j in self.get_in_neighbors(i): if (self.get_node_attr(j, 'distance') < min_distance and self.get_edge_attr(j, i, 'flow') > 0): min_distance = self.get_node_attr(j, 'distance') self.set_node_attr(i, 'distance', min_distance + 1) def show_flow(self): ''' API: relabel(self, i) Description: Used by max_flow_preflowpush() method for display purposed. Post: 'color' and 'label' attribute of edges/nodes are updated. ''' for n in self.get_node_list(): excess = self.get_node_attr(n, 'excess') distance = self.get_node_attr(n, 'distance') self.set_node_attr(n, 'label', str(excess)+'/'+str(distance)) for neighbor in self.get_neighbors(n): capacity = self.get_edge_attr(n, neighbor, 'capacity') flow = self.get_edge_attr(n, neighbor, 'flow') if capacity == INF: self.set_edge_attr(n, neighbor, 'label', 'INF'+'/'+str(flow)) else: self.set_edge_attr(n, neighbor, 'label', str(capacity)+'/'+str(flow)) if capacity == flow: self.set_edge_attr(n, neighbor, 'color', 'red') elif flow > 0: self.set_edge_attr(n, neighbor, 'color', 'green') else: self.set_edge_attr(n, neighbor, 'color', 'black') self.display() def create_residual_graph(self): ''' API: create_residual_graph(self) Description: Creates and returns residual graph, which is a Graph instance itself. Pre: (1) Arcs should have 'flow', 'capacity' and 'cost' attribute (2) Graph should be a directed graph Return: Returns residual graph, which is a Graph instance. ''' if self.graph_type is UNDIRECTED_GRAPH: raise Exception('residual graph is defined for directed graphs.') residual_g = Graph(type = DIRECTED_GRAPH) for e in self.get_edge_list(): capacity_e = self.get_edge_attr(e[0], e[1], 'capacity') flow_e = self.get_edge_attr(e[0], e[1], 'flow') cost_e = self.get_edge_attr(e[0], e[1], 'cost') if flow_e > 0: residual_g.add_edge(e[1], e[0], cost=-1*cost_e, capacity=flow_e) if capacity_e - flow_e > 0: residual_g.add_edge(e[0], e[1], cost=cost_e, capacity=capacity_e-flow_e) return residual_g def cycle_canceling(self, display): ''' API: cycle_canceling(self, display) Description: Solves minimum cost feasible flow problem using cycle canceling algorithm. Returns True when an optimal solution is found, returns False otherwise. 'flow' attribute values of arcs should be considered as junk when returned False. Input: display: Display method. Pre: (1) Arcs should have 'capacity' and 'cost' attribute. (2) Nodes should have 'demand' attribute, this value should be positive if the node is a supply node, negative if it is demand node and 0 if it is transhipment node. (3) graph should not have node 's' and 't'. Post: Changes 'flow' attributes of arcs. Return: Returns True when an optimal solution is found, returns False otherwise. ''' # find a feasible solution to flow problem if not self.find_feasible_flow(): return False # create residual graph residual_g = self.create_residual_graph() # identify a negative cycle in residual graph ncycle = residual_g.get_negative_cycle() # loop while residual graph has a negative cycle while ncycle is not None: # find capacity of cycle cap = residual_g.find_cycle_capacity(ncycle) # augment capacity amount along the cycle self.augment_cycle(cap, ncycle) # create residual graph residual_g = self.create_residual_graph() # identify next negative cycle ncycle = residual_g.get_negative_cycle() return True def find_feasible_flow(self): ''' API: find_feasible_flow(self) Description: Solves feasible flow problem, stores solution in 'flow' attribute or arcs. This method is used to get an initial feasible flow for simplex and cycle canceling algorithms. Uses max_flow() method. Other max flow methods can also be used. Returns True if a feasible flow is found, returns False, if the problem is infeasible. When the problem is infeasible 'flow' attributes of arcs should be considered as junk. Pre: (1) 'capacity' attribute of arcs (2) 'demand' attribute of nodes Post: Keeps solution in 'flow' attribute of arcs. Return: Returns True if a feasible flow is found, returns False, if the problem is infeasible ''' # establish a feasible flow in the network, to do this add nodes s and # t and solve a max flow problem. nl = self.get_node_list() for i in nl: b_i = self.get_node(i).get_attr('demand') if b_i > 0: # i is a supply node, add (s,i) arc self.add_edge('s', i, capacity=b_i) elif b_i < 0: # i is a demand node, add (i,t) arc self.add_edge(i, 't', capacity=-1*b_i) # solve max flow on this modified graph self.max_flow('s', 't', 'off') # check if all demand is satisfied, i.e. the min cost problem is # feasible or not for i in self.neighbors['s']: flow = self.get_edge_attr('s', i, 'flow') capacity = self.get_edge_attr('s', i, 'capacity') if flow != capacity: self.del_node('s') self.del_node('t') return False # remove node 's' and node 't' self.del_node('s') self.del_node('t') return True def get_layout(self): ''' API: get_layout(self) Description: Returns layout attribute of the graph. Return: Returns layout attribute of the graph. ''' return self.attr['layout'] def set_layout(self, value): ''' API: set_layout(self, value) Description: Sets layout attribute of the graph to value. Input: value: New value of the layout. ''' self.attr['layout']=value if value == 'dot2tex': self.attr['d2tgraphstyle'] = 'every text node part/.style={align=center}' def write(self, file_obj, layout = None, format='png'): ''' API: write(self, file_obj, layout = None, format='png') Description: Writes graph to dist using layout and format. Input: file_obj: a file-like object that will be written to. layout: Dot layout for generating graph image. format: Image format, all format supported by Dot are wellcome. Post: File will be written to disk. ''' if layout == None: layout = self.get_layout() if format == 'dot': file_obj.write(bytearray(self.to_string(), 'utf8')) else: out = self.create(layout, format) if (out != None): file_obj.write(out) def create(self, layout, format, **args): ''' API: create(self, layout, format, **args) Description: Returns postscript representation of graph. Input: layout: Dot layout for generating graph image. format: Image format, all format supported by Dot are wellcome. Return: Returns postscript representation of graph. ''' tmp_fd, tmp_name = tempfile.mkstemp() tmp_file = os.fdopen(tmp_fd, 'w') tmp_file.write(self.to_string()) tmp_file.close() try: p = subprocess.run([layout, '-T'+format, tmp_name], capture_output = True) except OSError: print('''Graphviz executable not found. Graphviz must be installed and in your search path. Please visit http://www.graphviz.org/ for information on installation. After installation, ensure that the PATH variable is properly set.''') return None p.check_returncode() os.remove(tmp_name) if p.stderr: print(p.stderr) return p.stdout def display(self, highlight = None, basename = 'graph', format = 'png', pause = False, wait_for_click = True): ''' API: display(self, highlight = None, basename = 'graph', format = 'png', pause = True) Description: Displays graph according to the arguments provided. Current display modes: 'off', 'file', 'PIL', 'matplotlib', 'xdot', 'svg' Current layout modes: Layouts provided by graphviz ('dot', 'fdp', 'circo', etc.) and 'dot2tex'. Current formats: Formats provided by graphviz ('ps', 'pdf', 'png', etc.) Input: highlight: List of nodes to be highlighted. basename: File name. It will be used if display mode is 'file'. format: Image format, all format supported by Dot are wellcome. pause: If display is 'matplotlib', window will remain open until closed. wait_for_click: If display is 'matplotlib', setting to True will wait for a button click before proceeding. This is useful when animating an algorithm. Post: A display window will pop up or a file will be written depending on display mode. ''' if self.attr['display'] == 'off': return if highlight != None: for n in highlight: if not isinstance(n, Node): n = self.get_node(n) n.set_attr('color', 'red') if self.get_layout() == 'dot2tex': if self.attr['display'] != 'file': self.attr['display'] = 'file' print("Warning: Dot2tex layout can only be used with display mode 'file'") print(" Automatically changing setting") if self.attr['display'] == 'file': if self.get_layout() == 'dot2tex': try: if DOT2TEX_INSTALLED: if format != 'pdf' or format != 'ps': print("Dot2tex only supports pdf and ps formats, falling back to pdf") format = 'pdf' self.set_layout('dot') tex = dot2tex.dot2tex(self.to_string(), autosize=True, texmode = 'math', template = DOT2TEX_TEMPLATE) else: print("Error: Dot2tex not installed.") except: try: self.set_layout('dot') with open(basename+'.dot', "w+b") as f: self.write(f, self.get_layout(), 'dot') p = subprocess.call(['dot2tex', '-t math', basename + '.dot']) except: print("There was an error running dot2tex.") with open(basename+'.tex', 'w') as f: f.write(tex) try: subprocess.call(['latex', basename]) if format == 'ps': subprocess.call(['dvips', basename]) elif format == 'pdf': subprocess.call(['pdflatex', basename]) self.set_layout('dot2tex') except: print("There was an error runing latex. Is it installed?") else: with open(basename+'.'+format, "w+b") as f: self.write(f, self.get_layout(), format) return elif self.attr['display'] == 'PIL': if PIL_INSTALLED: tmp_fd, tmp_name = tempfile.mkstemp() tmp_file = os.fdopen(tmp_fd, 'w+b') self.write(tmp_file, self.get_layout(), format) tmp_file.close() im = PIL_Image.open(tmp_name) im.show() os.remove(tmp_name) else: print('Error: PIL not installed. Display disabled.') self.attr['display'] = 'off' elif self.attr['display'] == 'matplotlib': if MATPLOTLIB_INSTALLED and PIL_INSTALLED: tmp_fd, tmp_name = tempfile.mkstemp() tmp_file = os.fdopen(tmp_fd, 'w+b') self.write(tmp_file, self.get_layout(), format) tmp_file.close() im = PIL_Image.open(tmp_name) fig = plt.figure(1) fig.canvas.mpl_connect('close_event', handle_close) plt.clf() plt.axis('off') plt.imshow(im, interpolation='bilinear' #resample=True #extent = (0, 100, 0, 100) ) if wait_for_click == True: plt.draw() try: if plt.waitforbuttonpress(timeout = 10000): plt.close() exit() except: exit() else: plt.show(block=pause) im.close() os.remove(tmp_name) else: print('Warning: Either matplotlib or Pillow is not installed. Display disabled.') self.attr['display'] = 'off' elif self.attr['display'] == 'xdot': if XDOT_INSTALLED: window = xdot.DotWindow() window.set_dotcode(self.to_string()) window.connect('destroy', gtk.main_quit) gtk.main() else: print('Error: xdot not installed. Display disabled.') self.attr['display'] = 'off' else: print("Unknown display mode: ", end=' ') print(self.attr['display']) if highlight != None: for n in highlight: if not isinstance(n, Node): n = self.get_node(n) n.set_attr('color', 'black') def set_display_mode(self, value): ''' API: set_display_mode(self, value) Description: Sets display mode to value. Input: value: New display mode. Post: Display mode attribute of graph is updated. ''' self.attr['display'] = value def max_flow(self, source, sink, display = None, algo = 'DFS'): ''' API: max_flow(self, source, sink, display=None) Description: Finds maximum flow from source to sink by a depth-first search based augmenting path algorithm. Pre: Assumes a directed graph in which each arc has a 'capacity' attribute and for which there does does not exist both arcs (i,j) and (j, i) for any pair of nodes i and j. Input: source: Source node name. sink: Sink node name. display: Display mode. Post: The 'flow" attribute of each arc gives a maximum flow. ''' if display is not None: old_display = self.attr['display'] self.attr['display'] = display nl = self.get_node_list() # set flow of all edges to 0 for e in self.edge_attr: self.edge_attr[e]['flow'] = 0 if 'capacity' in self.edge_attr[e]: capacity = self.edge_attr[e]['capacity'] self.edge_attr[e]['label'] = str(capacity)+'/0' else: self.edge_attr[e]['capacity'] = INF self.edge_attr[e]['label'] = 'INF/0' while True: # find an augmenting path from source to sink using DFS if algo == 'DFS': q = Stack() elif algo == 'BFS': q = Queue() q.push(source) pred = {source:None} explored = [source] for n in nl: self.get_node(n).set_attr('color', 'black') for e in self.edge_attr: if self.edge_attr[e]['flow'] == 0: self.edge_attr[e]['color'] = 'black' elif self.edge_attr[e]['flow']==self.edge_attr[e]['capacity']: self.edge_attr[e]['color'] = 'red' else: self.edge_attr[e]['color'] = 'green' self.display() while not q.isEmpty(): current = q.peek() q.remove(current) if current == sink: break out_neighbor = self.neighbors[current] in_neighbor = self.in_neighbors[current] neighbor = out_neighbor+in_neighbor for m in neighbor: if m in explored: continue self.get_node(m).set_attr('color', 'yellow') if m in out_neighbor: self.set_edge_attr(current, m, 'color', 'yellow') available_capacity = ( self.get_edge_attr(current, m, 'capacity')- self.get_edge_attr(current, m, 'flow')) else: self.set_edge_attr(m, current, 'color', 'yellow') available_capacity=self.get_edge_attr(m, current, 'flow') self.display() if available_capacity > 0: self.get_node(m).set_attr('color', 'blue') if m in out_neighbor: self.set_edge_attr(current, m, 'color', 'blue') else: self.set_edge_attr(m, current, 'color', 'blue') explored.append(m) pred[m] = current q.push(m) else: self.get_node(m).set_attr('color', 'black') if m in out_neighbor: if (self.get_edge_attr(current, m, 'flow') == self.get_edge_attr(current, m, 'capacity')): self.set_edge_attr(current, m, 'color', 'red') elif self.get_edge_attr(current, m, 'flow') == 0: self.set_edge_attr(current, m, 'color', 'black') #else: # self.set_edge_attr(current, m, 'color', 'green') else: if (self.get_edge_attr(m, current, 'flow') == self.get_edge_attr(m, current, 'capacity')): self.set_edge_attr(m, current, 'color', 'red') elif self.get_edge_attr(m, current, 'flow') == 0: self.set_edge_attr(m, current, 'color', 'black') #else: # self.set_edge_attr(m, current, 'color', 'green') self.display() # if no path with positive capacity from source sink exists, stop if sink not in pred: break # find capacity of the path current = sink min_capacity = 'infinite' while True: m = pred[current] if (m,current) in self.edge_attr: arc_capacity = self.edge_attr[(m, current)]['capacity'] flow = self.edge_attr[(m, current)]['flow'] potential = arc_capacity-flow if min_capacity == 'infinite': min_capacity = potential elif min_capacity > potential: min_capacity = potential else: potential = self.edge_attr[(current, m)]['flow'] if min_capacity == 'infinite': min_capacity = potential elif min_capacity > potential: min_capacity = potential if m == source: break current = m # update flows on the path current = sink while True: m = pred[current] if (m, current) in self.edge_attr: flow = self.edge_attr[(m, current)]['flow'] capacity = self.edge_attr[(m, current)]['capacity'] new_flow = flow+min_capacity self.edge_attr[(m, current)]['flow'] = new_flow if capacity == INF: self.edge_attr[(m, current)]['label'] = \ 'INF' + '/'+str(new_flow) else: self.edge_attr[(m, current)]['label'] = \ str(capacity)+'/'+str(new_flow) if new_flow==capacity: self.edge_attr[(m, current)]['color'] = 'red' else: self.edge_attr[(m, current)]['color'] = 'green' self.display() else: flow = self.edge_attr[(current, m)]['flow'] capacity = self.edge_attr[(current, m)]['capacity'] new_flow = flow-min_capacity self.edge_attr[(current, m)]['flow'] = new_flow if capacity == INF: self.edge_attr[(current, m)]['label'] = \ 'INF' + '/'+str(new_flow) else: self.edge_attr[(current, m)]['label'] = \ str(capacity)+'/'+str(new_flow) if new_flow==0: self.edge_attr[(current, m)]['color'] = 'red' else: self.edge_attr[(current, m)]['color'] = 'green' self.display() if m == source: break current = m if display is not None: self.attr['display'] = old_display def get_negative_cycle(self): ''' API: get_negative_cycle(self) Description: Finds and returns negative cost cycle using 'cost' attribute of arcs. Return value is a list of nodes representing cycle it is in the following form; n_1-n_2-...-n_k, when the cycle has k nodes. Pre: Arcs should have 'cost' attribute. Return: Returns a list of nodes in the cycle if a negative cycle exists, returns None otherwise. ''' nl = self.get_node_list() i = nl[0] (valid, distance, nextn) = self.floyd_warshall() if not valid: cycle = self.floyd_warshall_get_cycle(distance, nextn) return cycle else: return None def floyd_warshall(self): ''' API: floyd_warshall(self) Description: Finds all pair shortest paths and stores it in a list of lists. This is possible if the graph does not have negative cycles. It will return a tuple with 3 elements. The first element indicates whether the graph has a negative cycle. It is true if the graph does not have a negative cycle, ie. distances found are valid shortest distances. The second element is a dictionary of shortest distances between nodes. Keys are tuple of node pairs ie. (i,j). The third element is a dictionary that helps to retrieve the shortest path between nodes. Then return value can be represented as (validity, distance, nextn) where nextn is the dictionary to retrieve paths. distance and nextn can be used as inputs to other methods to get shortest path between nodes. Pre: Arcs should have 'cost' attribute. Return: Returns (validity, distance, nextn). The distances are valid if validity is True. ''' nl = self.get_node_list() el = self.get_edge_list() # initialize distance distance = {} for i in nl: for j in nl: distance[(i,j)] = 'infinity' for i in nl: distance[(i,i)] = 0 for e in el: distance[(e[0],e[1])] = self.get_edge_cost(e) # == end of distance initialization # initialize next nextn = {} for i in nl: for j in nl: if i==j or distance[(i,j)]=='infinity': nextn[(i,j)] = None else: nextn[(i,j)] = i # == end of next initialization # compute shortest distance for k in nl: for i in nl: for j in nl: if distance[(i,k)]=='infinity' or distance[(k,j)]=='infinity': continue elif distance[(i,j)]=='infinity': distance[(i,j)] = distance[(i,k)] + distance[(k,j)] nextn[(i,j)] = nextn[(k,j)] elif distance[(i,j)] > distance[(i,k)] + distance[(k,j)]: distance[(i,j)] = distance[(i,k)] + distance[(k,j)] nextn[(i,j)] = nextn[(k,j)] # == end of compute shortest distance # check if graph has negative cycles for i in nl: if distance[(i,i)] < 0: # shortest distances are not valid # graph has negative cycle return (False, distance, nextn) return (True, distance, nextn) def floyd_warshall_get_path(self, distance, nextn, i, j): ''' API: floyd_warshall_get_path(self, distance, nextn, i, j): Description: Finds shortest path between i and j using distance and nextn dictionaries. Pre: (1) distance and nextn are outputs of floyd_warshall method. (2) The graph does not have a negative cycle, , ie. distance[(i,i)] >=0 for all node i. Return: Returns the list of nodes on the path from i to j, ie. [i,...,j] ''' if distance[(i,j)]=='infinity': return None k = nextn[(i,j)] path = self.floyd_warshall_get_path if i==k: return [i, j] else: return path(distance, nextn, i,k) + [k] + path(distance, nextn, k,j) def floyd_warshall_get_cycle(self, distance, nextn, element = None): ''' API: floyd_warshall_get_cycle(self, distance, nextn, element = None) Description: Finds a negative cycle in the graph. Pre: (1) distance and nextn are outputs of floyd_warshall method. (2) The graph should have a negative cycle, , ie. distance[(i,i)] < 0 for some node i. Return: Returns the list of nodes on the cycle. Ex: [i,j,k,...,r], where (i,j), (j,k) and (r,i) are some edges in the cycle. ''' nl = self.get_node_list() if element is None: for i in nl: if distance[(i,i)] < 0: # graph has a cycle on the path from i to i. element = i break else: raise Exception('Graph does not have a negative cycle!') elif distance[(element,element)] >= 0: raise Exception('Graph does not have a negative cycle that contains node '+str(element)+'!') # find the cycle on the path from i to i. cycle = [element] k = nextn[(element,element)] while k not in cycle: cycle.insert(1,k) k = nextn[(element,k)] if k==element: return cycle else: return self.floyd_warshall_get_cycle(distance, nextn, k) def find_cycle_capacity(self, cycle): ''' API: find_cycle_capacity(self, cycle): Description: Finds capacity of the cycle input. Pre: (1) Arcs should have 'capacity' attribute. Input: cycle: a list representing a cycle Return: Returns an integer number representing capacity of cycle. ''' index = 0 k = len(cycle) capacity = self.get_edge_attr(cycle[k-1], cycle[0], 'capacity') while index<(k-1): i = cycle[index] j = cycle[index+1] capacity_ij = self.get_edge_attr(i, j, 'capacity') if capacity > capacity_ij: capacity = capacity_ij index += 1 return capacity def fifo_label_correcting(self, source): ''' API: fifo_label_correcting(self, source) Description: finds shortest path from source to every other node. Returns predecessor dictionary. If graph has a negative cycle, detects it and returns to it. Pre: (1) 'cost' attribute of arcs. It will be used to compute shortest path. Input: source: source node Post: Modifies 'distance' attribute of nodes. Return: If there is no negative cycle returns to (True, pred), otherwise returns to (False, cycle) where pred is the predecessor dictionary and cycle is a list of nodes that represents cycle. It is in [n_1, n_2, ..., n_k] form where the cycle has k nodes. ''' pred = {} self.get_node(source).set_attr('distance', 0) pred[source] = None for n in self.neighbors: if n!=source: self.get_node(n).set_attr('distance', 'inf') q = [source] while q: i = q[0] q = q[1:] for j in self.neighbors[i]: distance_j = self.get_node(j).get_attr('distance') distance_i = self.get_node(i).get_attr('distance') c_ij = self.get_edge_attr(i, j, 'cost') if distance_j > distance_i + c_ij: self.get_node(j).set_attr('distance', distance_i+c_ij) if j in pred: pred[j] = i cycle = self.label_correcting_check_cycle(j, pred) if cycle is not None: return (False, cycle) else: pred[j] = i if j not in q: q.append(j) return (True, pred) def label_correcting_check_cycle(self, j, pred): ''' API: label_correcting_check_cycle(self, j, pred) Description: Checks if predecessor dictionary has a cycle, j represents the node that predecessor is recently updated. Pre: (1) predecessor of source node should be None. Input: j: node that predecessor is recently updated. pred: predecessor dictionary Return: If there exists a cycle, returns the list that represents the cycle, otherwise it returns to None. ''' labelled = {} for n in self.neighbors: labelled[n] = None current = j while current != None: if labelled[current]==j: cycle = self.label_correcting_get_cycle(j, pred) return cycle labelled[current] = j current = pred[current] return None def label_correcting_get_cycle(self, j, pred): ''' API: label_correcting_get_cycle(self, labelled, pred) Description: In label correcting check cycle it is decided pred has a cycle and nodes in the cycle are labelled. We will create a list of nodes in the cycle using labelled and pred inputs. Pre: This method should be called from label_correcting_check_cycle(), unless you are sure about what you are doing. Input: j: Node that predecessor is recently updated. We know that it is in the cycle pred: Predecessor dictionary that contains a cycle Post: Returns a list of nodes that represents cycle. It is in [n_1, n_2, ..., n_k] form where the cycle has k nodes. ''' cycle = [] cycle.append(j) current = pred[j] while current!=j: cycle.append(current) current = pred[current] cycle.reverse() return cycle def augment_cycle(self, amount, cycle): ''' API: augment_cycle(self, amount, cycle): Description: Augments 'amount' unit of flow along cycle. Pre: Arcs should have 'flow' attribute. Inputs: amount: An integer representing the amount to augment cycle: A list representing a cycle Post: Changes 'flow' attributes of arcs. ''' index = 0 k = len(cycle) while index<(k-1): i = cycle[index] j = cycle[index+1] if (i,j) in self.edge_attr: flow_ij = self.edge_attr[(i,j)]['flow'] self.edge_attr[(i,j)]['flow'] = flow_ij+amount else: flow_ji = self.edge_attr[(j,i)]['flow'] self.edge_attr[(j,i)]['flow'] = flow_ji-amount index += 1 i = cycle[k-1] j = cycle[0] if (i,j) in self.edge_attr: flow_ij = self.edge_attr[(i,j)]['flow'] self.edge_attr[(i,j)]['flow'] = flow_ij+amount else: flow_ji = self.edge_attr[(j,i)]['flow'] self.edge_attr[(j,i)]['flow'] = flow_ji-amount def network_simplex(self, display, pivot, root): ''' API: network_simplex(self, display, pivot, root) Description: Solves minimum cost feasible flow problem using network simplex algorithm. It is recommended to use min_cost_flow(algo='simplex') instead of using network_simplex() directly. Returns True when an optimal solution is found, returns False otherwise. 'flow' attribute values of arcs should be considered as junk when returned False. Pre: (1) check Pre section of min_cost_flow() Input: pivot: specifies pivot rule. Check min_cost_flow() display: 'off' for no display, 'matplotlib' for live update of spanning tree. root: Root node for the underlying spanning trees that will be generated by network simplex algorthm. Post: (1) Changes 'flow' attribute of edges. Return: Returns True when an optimal solution is found, returns False otherwise. ''' # ==== determine an initial tree structure (T,L,U) # find a feasible flow if not self.find_feasible_flow(): return False t = self.simplex_find_tree() self.set_display_mode(display) # mark spanning tree arcs self.simplex_mark_st_arcs(t) # display initial spanning tree t.simplex_redraw(display, root) t.set_display_mode(display) #t.display() self.display() # set predecessor, depth and thread indexes t.simplex_search(root, 1) # compute potentials self.simplex_compute_potentials(t, root) # while some nontree arc violates optimality conditions while not self.simplex_optimal(t): self.display() # select an entering arc (k,l) (k,l) = self.simplex_select_entering_arc(t, pivot) self.simplex_mark_entering_arc(k, l) self.display() # determine leaving arc ((p,q), capacity, cycle)=self.simplex_determine_leaving_arc(t,k,l) # mark leaving arc self.simplex_mark_leaving_arc(p, q) self.display() self.simplex_remove_arc(t, p, q, capacity, cycle) # display after arc removed self.display() self.simplex_mark_st_arcs(t) self.display() # set predecessor, depth and thread indexes t.simplex_redraw(display, root) #t.display() t.simplex_search(root, 1) # compute potentials self.simplex_compute_potentials(t, root) return True def simplex_mark_leaving_arc(self, p, q): ''' API: simplex_mark_leving_arc(self, p, q) Description: Marks leaving arc. Input: p: tail of the leaving arc q: head of the leaving arc Post: Changes color attribute of leaving arc. ''' self.set_edge_attr(p, q, 'color', 'red') def simplex_determine_leaving_arc(self, t, k, l): ''' API: simplex_determine_leaving_arc(self, t, k, l) Description: Determines and returns the leaving arc. Input: t: current spanning tree solution. k: tail of the entering arc. l: head of the entering arc. Return: Returns the tuple that represents leaving arc, capacity of the cycle and cycle. ''' # k,l are the first two elements of the cycle cycle = self.simplex_identify_cycle(t, k, l) flow_kl = self.get_edge_attr(k, l, 'flow') capacity_kl = self.get_edge_attr(k, l, 'capacity') min_capacity = capacity_kl # check if k,l is in U or L if flow_kl==capacity_kl: # l,k will be the last two elements cycle.reverse() n = len(cycle) index = 0 # determine last blocking arc t.add_edge(k, l) tel = t.get_edge_list() while index < (n-1): if (cycle[index], cycle[index+1]) in tel: flow = self.edge_attr[(cycle[index], cycle[index+1])]['flow'] capacity = \ self.edge_attr[(cycle[index],cycle[index+1])]['capacity'] if min_capacity >= (capacity-flow): candidate = (cycle[index], cycle[index+1]) min_capacity = capacity-flow else: flow = self.edge_attr[(cycle[index+1], cycle[index])]['flow'] if min_capacity >= flow: candidate = (cycle[index+1], cycle[index]) min_capacity = flow index += 1 # check arc (cycle[n-1], cycle[0]) if (cycle[n-1], cycle[0]) in tel: flow = self.edge_attr[(cycle[n-1], cycle[0])]['flow'] capacity = self.edge_attr[(cycle[n-1], cycle[0])]['capacity'] if min_capacity >= (capacity-flow): candidate = (cycle[n-1], cycle[0]) min_capacity = capacity-flow else: flow = self.edge_attr[(cycle[0], cycle[n-1])]['flow'] if min_capacity >= flow: candidate = (cycle[0], cycle[n-1]) min_capacity = flow return (candidate, min_capacity, cycle) def simplex_mark_entering_arc(self, k, l): ''' API: simplex_mark_entering_arc(self, k, l) Description: Marks entering arc (k,l) Input: k: tail of the entering arc l: head of the entering arc Post: (1) color attribute of the arc (k,l) ''' self.set_edge_attr(k, l, 'color', 'green') def simplex_mark_st_arcs(self, t): ''' API: simplex_mark_st_arcs(self, t) Description: Marks spanning tree arcs. Case 1, Blue: Arcs that are at lower bound and in tree. Case 2, Red: Arcs that are at upper bound and in tree. Case 3, Green: Arcs that are between bounds are green. Case 4, Brown: Non-tree arcs at lower bound. Case 5, Violet: Non-tree arcs at upper bound. Input: t: t is the current spanning tree Post: (1) color attribute of edges. ''' tel = list(t.edge_attr.keys()) for e in self.get_edge_list(): flow_e = self.edge_attr[e]['flow'] capacity_e = self.edge_attr[e]['capacity'] if e in tel: if flow_e == 0: self.edge_attr[e]['color'] = 'blue' elif flow_e == capacity_e: self.edge_attr[e]['color'] = 'blue' else: self.edge_attr[e]['color'] = 'blue' else: if flow_e == 0: self.edge_attr[e]['color'] = 'black' elif flow_e == capacity_e: self.edge_attr[e]['color'] = 'black' else: msg = "Arc is not in ST but has flow between bounds." raise Exception(msg) def print_flow(self): ''' API: print_flow(self) Description: Prints all positive flows to stdout. This method can be used for debugging purposes. ''' print('printing current edge, flow, capacity') for e in self.edge_attr: if self.edge_attr[e]['flow']!=0: print(e, str(self.edge_attr[e]['flow']).ljust(4), end=' ') print(str(self.edge_attr[e]['capacity']).ljust(4)) def simplex_redraw(self, display, root): ''' API: simplex_redraw(self, display, root) Description: Returns a new graph instance that is same as self but adds nodes and arcs in a way that the resulting tree will be displayed properly. Input: display: display mode root: root node in tree. Return: Returns a graph same as self. ''' nl = self.get_node_list() el = self.get_edge_list() new = Graph(type=DIRECTED_GRAPH, layout='dot', display=display) pred_i = self.get_node(root).get_attr('pred') thread_i = self.get_node(root).get_attr('thread') depth_i = self.get_node(root).get_attr('depth') new.add_node(root, pred=pred_i, thread=thread_i, depth=depth_i) q = [root] visited = [root] while q: name = q.pop() visited.append(name) neighbors = self.neighbors[name] + self.in_neighbors[name] for n in neighbors: if n not in new.get_node_list(): pred_i = self.get_node(n).get_attr('pred') thread_i = self.get_node(n).get_attr('thread') depth_i = self.get_node(n).get_attr('depth') new.add_node(n, pred=pred_i, thread=thread_i, depth=depth_i) if (name,n) in el: if (name,n) not in new.edge_attr: new.add_edge(name,n) else: if (n,name) not in new.edge_attr: new.add_edge(n,name) if n not in visited: q.append(n) for e in el: flow = self.edge_attr[e]['flow'] capacity = self.edge_attr[e]['capacity'] cost = self.edge_attr[e]['cost'] new.edge_attr[e]['flow'] = flow new.edge_attr[e]['capacity'] = capacity new.edge_attr[e]['cost'] = cost new.edge_attr[e]['label'] = "%d/%d/%d" %(flow,capacity,cost) return new def simplex_remove_arc(self, t, p, q, min_capacity, cycle): ''' API: simplex_remove_arc(self, p, q, min_capacity, cycle) Description: Removes arc (p,q), updates t, updates flows, where (k,l) is the entering arc. Input: t: tree solution to be updated. p: tail of the leaving arc. q: head of the leaving arc. min_capacity: capacity of the cycle. cycle: cycle obtained when entering arc considered. Post: (1) updates t. (2) updates 'flow' attributes. ''' # augment min_capacity along cycle n = len(cycle) tel = list(t.edge_attr.keys()) index = 0 while index < (n-1): if (cycle[index], cycle[index+1]) in tel: flow_e = self.edge_attr[(cycle[index], cycle[index+1])]['flow'] self.edge_attr[(cycle[index], cycle[index+1])]['flow'] =\ flow_e+min_capacity else: flow_e = self.edge_attr[(cycle[index+1], cycle[index])]['flow'] self.edge_attr[(cycle[index+1], cycle[index])]['flow'] =\ flow_e-min_capacity index += 1 # augment arc cycle[n-1], cycle[0] if (cycle[n-1], cycle[0]) in tel: flow_e = self.edge_attr[(cycle[n-1], cycle[0])]['flow'] self.edge_attr[(cycle[n-1], cycle[0])]['flow'] =\ flow_e+min_capacity else: flow_e = self.edge_attr[(cycle[0], cycle[n-1])]['flow'] self.edge_attr[(cycle[0], cycle[n-1])]['flow'] =\ flow_e-min_capacity # remove leaving arc t.del_edge((p, q)) # set label of removed arc flow_pq = self.get_edge_attr(p, q, 'flow') capacity_pq = self.get_edge_attr(p, q, 'capacity') cost_pq = self.get_edge_attr(p, q, 'cost') self.set_edge_attr(p, q, 'label', "%d/%d/%d" %(flow_pq,capacity_pq,cost_pq)) for e in t.edge_attr: flow = self.edge_attr[e]['flow'] capacity = self.edge_attr[e]['capacity'] cost = self.edge_attr[e]['cost'] t.edge_attr[e]['flow'] = flow t.edge_attr[e]['capacity'] = capacity t.edge_attr[e]['cost'] = cost t.edge_attr[e]['label'] = "%d/%d/%d" %(flow,capacity,cost) self.edge_attr[e]['label'] = "%d/%d/%d" %(flow,capacity,cost) def simplex_select_entering_arc(self, t, pivot): ''' API: simplex_select_entering_arc(self, t, pivot) Description: Decides and returns entering arc using pivot rule. Input: t: current spanning tree solution pivot: May be one of the following; 'first_eligible' or 'dantzig'. 'dantzig' is the default value. Return: Returns entering arc tuple (k,l) ''' if pivot=='dantzig': # pick the maximum violation candidate = {} for e in self.edge_attr: if e in t.edge_attr: continue flow_ij = self.edge_attr[e]['flow'] potential_i = self.get_node(e[0]).get_attr('potential') potential_j = self.get_node(e[1]).get_attr('potential') capacity_ij = self.edge_attr[e]['capacity'] c_ij = self.edge_attr[e]['cost'] cpi_ij = c_ij - potential_i + potential_j if flow_ij==0: if cpi_ij < 0: candidate[e] = cpi_ij elif flow_ij==capacity_ij: if cpi_ij > 0: candidate[e] = cpi_ij for e in candidate: max_c = e max_v = abs(candidate[e]) break for e in candidate: if max_v < abs(candidate[e]): max_c = e max_v = abs(candidate[e]) elif pivot=='first_eligible': # pick the first eligible for e in self.edge_attr: if e in t.edge_attr: continue flow_ij = self.edge_attr[e]['flow'] potential_i = self.get_node(e[0]).get_attr('potential') potential_j = self.get_node(e[1]).get_attr('potential') capacity_ij = self.edge_attr[e]['capacity'] c_ij = self.edge_attr[e]['cost'] cpi_ij = c_ij - potential_i + potential_j if flow_ij==0: if cpi_ij < 0: max_c = e max_v = abs(cpi_ij) elif flow_ij==capacity_ij: if cpi_ij > 0: max_c = e max_v = cpi_ij else: raise Exception("Unknown pivot rule.") return max_c def simplex_optimal(self, t): ''' API: simplex_optimal(self, t) Description: Checks if the current solution is optimal, if yes returns True, False otherwise. Pre: 'flow' attributes represents a solution. Input: t: Graph instance tat reperesents spanning tree solution. Return: Returns True if the current solution is optimal (optimality conditions are satisfied), else returns False ''' for e in self.edge_attr: if e in t.edge_attr: continue flow_ij = self.edge_attr[e]['flow'] potential_i = self.get_node(e[0]).get_attr('potential') potential_j = self.get_node(e[1]).get_attr('potential') capacity_ij = self.edge_attr[e]['capacity'] c_ij = self.edge_attr[e]['cost'] cpi_ij = c_ij - potential_i + potential_j if flow_ij==0: if cpi_ij < 0: return False elif flow_ij==capacity_ij: if cpi_ij > 0: return False return True def simplex_find_tree(self): ''' API: simplex_find_tree(self) Description: Assumes a feasible flow solution stored in 'flow' attribute's of arcs and converts this solution to a feasible spanning tree solution. Pre: (1) 'flow' attributes represents a feasible flow solution. Post: (1) 'flow' attributes may change when eliminating cycles. Return: Return a Graph instance that is a spanning tree solution. ''' # find a cycle solution_g = self.get_simplex_solution_graph() cycle = solution_g.simplex_find_cycle() while cycle is not None: # find amount to augment and direction amount = self.simplex_augment_cycle(cycle) # augment along the cycle self.augment_cycle(amount, cycle) # find a new cycle solution_g = self.get_simplex_solution_graph() cycle = solution_g.simplex_find_cycle() # check if the solution is connected while self.simplex_connect(solution_g): pass # add attributes for e in self.edge_attr: flow = self.edge_attr[e]['flow'] capacity = self.edge_attr[e]['capacity'] cost = self.edge_attr[e]['cost'] self.edge_attr[e]['label'] = "%d/%d/%d" %(flow,capacity,cost) if e in solution_g.edge_attr: solution_g.edge_attr[e]['flow'] = flow solution_g.edge_attr[e]['capacity'] = capacity solution_g.edge_attr[e]['cost'] = cost solution_g.edge_attr[e]['label'] = "%d/%d/%d" %(flow,capacity,cost) return solution_g def simplex_connect(self, solution_g): ''' API: simplex_connect(self, solution_g) Description: At this point we assume that the solution does not have a cycle. We check if all the nodes are connected, if not we add an arc to solution_g that does not create a cycle and return True. Otherwise we do nothing and return False. Pre: (1) We assume there is no cycle in the solution. Input: solution_g: current spanning tree solution instance. Post: (1) solution_g is updated. An arc that does not create a cycle is added. (2) 'component' attribute of nodes are changed. Return: Returns True if an arc is added, returns False otherwise. ''' nl = solution_g.get_node_list() current = nl[0] pred = solution_g.simplex_search(current, current) separated = list(pred.keys()) for n in nl: if solution_g.get_node(n).get_attr('component') != current: # find an arc from n to seperated for m in separated: if (n,m) in self.edge_attr: solution_g.add_edge(n,m) return True elif (m,n) in self.edge_attr: solution_g.add_edge(m,n) return True return False def simplex_search(self, source, component_nr): ''' API: simplex_search(self, source, component_nr) Description: Searches graph starting from source. Its difference from usual search is we can also go backwards along an arc. When the graph is a spanning tree it computes predecessor, thread and depth indexes and stores them as node attributes. These values should be considered as junk when the graph is not a spanning tree. Input: source: source node component_nr: component number Post: (1) Sets the component number of all reachable nodes to component. Changes 'component' attribute of nodes. (2) Sets 'pred', 'thread' and 'depth' attributes of nodes. These values are junk if the graph is not a tree. Return: Returns predecessor dictionary. ''' q = [source] pred = {source:None} depth = {source:0} sequence = [] for n in self.neighbors: self.get_node(n).set_attr('component', None) while q: current = q.pop() self.get_node(current).set_attr('component', component_nr) sequence.append(current) neighbors = self.in_neighbors[current] + self.neighbors[current] for n in neighbors: if n in pred: continue self.get_node(n).set_attr('component', component_nr) pred[n] = current depth[n] = depth[current]+1 q.append(n) for i in range(len(sequence)-1): self.get_node(sequence[i]).set_attr('thread', int(sequence[i+1])) self.get_node(sequence[-1]).set_attr('thread', int(sequence[0])) for n in pred: self.get_node(n).set_attr('pred', pred[n]) self.get_node(n).set_attr('depth', depth[n]) return pred def simplex_augment_cycle(self, cycle): ''' API: simplex_augment_cycle(self, cycle) Description: Augments along the cycle to break it. Pre: 'flow', 'capacity' attributes on arcs. Input: cycle: list representing a cycle in the solution Post: 'flow' attribute will be modified. ''' # find amount to augment index = 0 k = len(cycle) el = list(self.edge_attr.keys()) # check arc (cycle[k-1], cycle[0]) if (cycle[k-1], cycle[0]) in el: min_capacity = self.edge_attr[(cycle[k-1], cycle[0])]['capacity']-\ self.edge_attr[(cycle[k-1], cycle[0])]['flow'] else: min_capacity = self.edge_attr[(cycle[0], cycle[k-1])]['flow'] # check rest of the arcs in the cycle while index<(k-1): i = cycle[index] j = cycle[index+1] if (i,j) in el: capacity_ij = self.edge_attr[(i,j)]['capacity'] -\ self.edge_attr[(i,j)]['flow'] else: capacity_ij = self.edge_attr[(j,i)]['flow'] if min_capacity > capacity_ij: min_capacity = capacity_ij index += 1 return min_capacity def simplex_find_cycle(self): ''' API: simplex_find_cycle(self) Description: Returns a cycle (list of nodes) if the graph has one, returns None otherwise. Uses DFS. During DFS checks existence of arcs to lower depth regions. Note that direction of the arcs are not important. Return: Returns list of nodes that represents cycle. Returns None if the graph does not have any cycle. ''' # make a dfs, if you identify an arc to a lower depth node we have a # cycle nl = self.get_node_list() q = [nl[0]] visited = [] depth = {nl[0]:0} pred = {nl[0]:None} for n in nl: self.get_node(n).set_attr('component', None) component_nr = int(nl[0]) self.get_node(nl[0]).set_attr('component', component_nr) while True: while q: current = q.pop() visited.append(current) neighbors = self.in_neighbors[current] +\ self.neighbors[current] for n in neighbors: if n==pred[current]: continue self.get_node(n).set_attr('component', component_nr) if n in depth: # we have a cycle cycle1 = [] cycle2 = [] temp = n while temp is not None: cycle1.append(temp) temp = pred[temp] temp = current while temp is not None: cycle2.append(temp) temp = pred[temp] cycle1.pop() cycle1.reverse() cycle2.extend(cycle1) return cycle2 else: pred[n] = current depth[n] = depth[current] + 1 if n not in visited: q.append(n) flag = False for n in nl: if self.get_node(n).get_attr('component') is None: q.append(n) depth = {n:0} pred = {n:None} visited = [] component_nr = int(n) self.get_node(n).set_attr('component', component_nr) flag = True break if not flag: break return None def get_simplex_solution_graph(self): ''' API: get_simplex_solution_graph(self): Description: Assumes a feasible flow solution stored in 'flow' attribute's of arcs. Returns the graph with arcs that have flow between 0 and capacity. Pre: (1) 'flow' attribute represents a feasible flow solution. See Pre section of min_cost_flow() for details. Return: Graph instance that only has the arcs that have flow strictly between 0 and capacity. ''' simplex_g = Graph(type=DIRECTED_GRAPH) for i in self.neighbors: simplex_g.add_node(i) for e in self.edge_attr: flow_e = self.edge_attr[e]['flow'] capacity_e = self.edge_attr[e]['capacity'] if flow_e>0 and flow_e<capacity_e: simplex_g.add_edge(e[0], e[1]) return simplex_g def simplex_compute_potentials(self, t, root): ''' API: simplex_compute_potentials(self, t, root) Description: Computes node potentials for a minimum cost flow problem and stores them as node attribute 'potential'. Based on pseudocode given in Network Flows by Ahuja et al. Pre: (1) Assumes a directed graph in which each arc has a 'cost' attribute. (2) Uses 'thread' and 'pred' attributes of nodes. Input: t: Current spanning tree solution, its type is Graph. root: root node of the tree. Post: Keeps the node potentials as 'potential' attribute. ''' self.get_node(root).set_attr('potential', 0) j = t.get_node(root).get_attr('thread') while j is not root: i = t.get_node(j).get_attr('pred') potential_i = self.get_node(i).get_attr('potential') if (i,j) in self.edge_attr: c_ij = self.edge_attr[(i,j)]['cost'] self.get_node(j).set_attr('potential', potential_i-c_ij) if (j,i) in self.edge_attr: c_ji = self.edge_attr[(j,i)]['cost'] self.get_node(j).set_attr('potential', potential_i+c_ji) j = t.get_node(j).get_attr('thread') def simplex_identify_cycle(self, t, k, l): ''' API: identify_cycle(self, t, k, l) Description: Identifies and returns to the pivot cycle, which is a list of nodes. Pre: (1) t is spanning tree solution, (k,l) is the entering arc. Input: t: current spanning tree solution k: tail of the entering arc l: head of the entering arc Returns: List of nodes in the cycle. ''' i = k j = l cycle = [] li = [k] lj = [j] while i is not j: depth_i = t.get_node(i).get_attr('depth') depth_j = t.get_node(j).get_attr('depth') if depth_i > depth_j: i = t.get_node(i).get_attr('pred') li.append(i) elif depth_i < depth_j: j = t.get_node(j).get_attr('pred') lj.append(j) else: i = t.get_node(i).get_attr('pred') li.append(i) j = t.get_node(j).get_attr('pred') lj.append(j) cycle.extend(lj) li.pop() li.reverse() cycle.extend(li) # l is beginning k is end return cycle def min_cost_flow(self, display = None, **args): ''' API: min_cost_flow(self, display='off', **args) Description: Solves minimum cost flow problem using node/edge attributes with the algorithm specified. Pre: (1) Assumes a directed graph in which each arc has 'capacity' and 'cost' attributes. (2) Nodes should have 'demand' attribute. This value should be positive for supply and negative for demand, and 0 for transhipment nodes. (3) The graph should be connected. (4) Assumes (i,j) and (j,i) does not exist together. Needed when solving max flow. (max flow problem is solved to get a feasible flow). Input: display: 'off' for no display, 'matplotlib' for live update of tree args: may have the following display: display method, if not given current mode (the one specified by __init__ or set_display) will be used. algo: determines algorithm to use, can be one of the following 'simplex': network simplex algorithm 'cycle_canceling': cycle canceling algorithm 'simplex' is used if not given. see Network Flows by Ahuja et al. for details of algorithms. pivot: valid if algo is 'simlex', determines pivoting rule for simplex, may be one of the following; 'first_eligible', 'dantzig' or 'scaled'. 'dantzig' is used if not given. see Network Flows by Ahuja et al. for pivot rules. root: valid if algo is 'simlex', specifies the root node for simplex algorithm. It is name of the one of the nodes. It will be chosen randomly if not provided. Post: The 'flow' attribute of each arc gives the optimal flows. 'distance' attribute of the nodes are also changed during max flow solution process. Examples: g.min_cost_flow(): solves minimum cost feasible flow problem using simplex algorithm with dantzig pivoting rule. See pre section for details. g.min_cost_flow(algo='cycle_canceling'): solves minimum cost feasible flow problem using cycle canceling agorithm. g.min_cost_flow(algo='simplex', pivot='scaled'): solves minimum cost feasible flow problem using network simplex agorithm with scaled pivot rule. ''' if display is None: display = self.attr['display'] if 'algo' in args: algorithm = args['algo'] else: algorithm = 'simplex' if algorithm == 'simplex': if 'root' in args: root = args['root'] else: for k in self.neighbors: root = k break if 'pivot' in args: if not self.network_simplex(display, args['pivot'], root): print('problem is infeasible') else: if not self.network_simplex(display, 'dantzig', root): print('problem is infeasible') elif algorithm == 'cycle_canceling': if not self.cycle_canceling(display): print('problem is infeasible') else: print(args['algo'], 'is not a defined algorithm. Exiting.') return def random(self, numnodes = 10, degree_range = (2, 4), length_range = (1, 10), density = None, edge_format = None, node_format = None, Euclidean = False, seedInput = 0, add_labels = True, parallel_allowed = False, node_selection = 'closest', scale = 10, scale_cost = 5): ''' API: random(self, numnodes = 10, degree_range = None, length_range = None, density = None, edge_format = None, node_format = None, Euclidean = False, seedInput = 0) Description: Populates graph with random edges and nodes. Input: numnodes: Number of nodes to add. degree_range: A tuple that has lower and upper bounds of degree for a node. length_range: A tuple that has lower and upper bounds for 'cost' attribute of edges. density: Density of edges, ie. 0.5 indicates a node will approximately have edge to half of the other nodes. edge_format: Dictionary that specifies attribute values for edges. node_format: Dictionary that specifies attribute values for nodes. Euclidean: Creates an Euclidean graph (Euclidean distance between nodes) if True. seedInput: Seed that will be used for random number generation. Pre: It is recommended to call this method on empty Graph objects. Post: Graph will be populated by nodes and edges. ''' random.seed(seedInput) if edge_format == None: edge_format = {'fontsize':10, 'fontcolor':'blue'} if node_format == None: node_format = {'height':0.5, 'width':0.5, 'fixedsize':'true', 'fontsize':10, 'fontcolor':'red', 'shape':'circle', } if Euclidean == False: for m in range(numnodes): self.add_node(m, **node_format) if degree_range is not None and density is None: for m in range(numnodes): degree = random.randint(degree_range[0], degree_range[1]) i = 0 while i < degree: n = random.randint(1, numnodes-1) if (((m,n) not in self.edge_attr and m != n) and (parallel_allowed or (n, m) not in self.edge_attr)): if length_range is not None: length = random.randint(length_range[0], length_range[1]) self.add_edge(m, n, cost = length, **edge_format) if add_labels: self.set_edge_attr(m, n, 'label', str(length)) else: self.add_edge(m, n, **edge_format) i += 1 elif density != None: for m in range(numnodes): if self.graph_type == DIRECTED_GRAPH: numnodes2 = numnodes else: numnodes2 = m for n in range(numnodes2): if ((parallel_allowed or (n, m) not in self.edge_attr) and m != n): if random.random() < density: if length_range is not None: length = random.randint(length_range[0], length_range[1]) self.add_edge(m, n, cost = length, **edge_format) if add_labels: self.set_edge_attr(m, n, 'label', str(length)) else: self.add_edge(m, n, **edge_format) else: print("Must set either degree range or density") else: for m in range(numnodes): ''' Assigns random coordinates (between 1 and 20) to the nodes ''' x = random.random()*scale y = random.random()*scale self.add_node(m, locationx = x, locationy = y, pos = '"'+str(x) + "," + str(y)+'!"', **node_format) if degree_range is not None and density is None: for m in range(numnodes): degree = random.randint(degree_range[0], degree_range[1]) i = 0 neighbors = [] if node_selection == 'random': while i < degree: length = round((((self.get_node(n).get_attr('locationx') - self.get_node(m).get_attr('locationx')) ** 2 + (self.get_node(n).get_attr('locationy') - self.get_node(m).get_attr('locationy')) ** 2) ** 0.5)*scale_cost, 0) if (((m,n) not in self.edge_attr and m != n) and (parallel_allowed or (n, m) not in self.edge_attr)): neighbors.append(random.randint(0, numnodes-1)) self.add_edge(m, n, cost = int(length), **edge_format) if add_labels: self.set_edge_attr(m, n, 'label', str(int(length))) i += 1 elif node_selection == 'closest': lengths = [] for n in range(numnodes): lengths.append((n, round((((self.get_node(n).get_attr('locationx') - self.get_node(m).get_attr('locationx')) ** 2 + (self.get_node(n).get_attr('locationy') - self.get_node(m).get_attr('locationy')) ** 2) ** 0.5)*scale_cost, 0))) lengths.sort(key = lambda l : l[1]) for i in range(degree+1): if not (lengths[i][0] == m or self.check_edge(m, lengths[i][0])): self.add_edge(m, lengths[i][0], cost = int(lengths[i][1]), **edge_format) if add_labels: self.set_edge_attr(m, lengths[i][0], 'label', str(int(lengths[i][1]))) else: print("Unknown node selection rule...exiting") return elif density != None: for m in range(numnodes): if self.graph_type == DIRECTED_GRAPH: numnodes2 = numnodes else: numnodes2 = m for n in range(numnodes2): if ((parallel_allowed or (n, m) not in self.edge_attr) and m != n): if random.random() < density: if length_range is None: ''' calculates the euclidean norm and round it to an integer ''' length = round((((self.get_node(n).get_attr('locationx') - self.get_node(m).get_attr('locationx')) ** 2 + (self.get_node(n).get_attr('locationy') - self.get_node(m).get_attr('locationy')) ** 2) ** 0.5), 0) self.add_edge(m, n, cost = int(length), **edge_format) if add_labels: self.set_edge_attr(m, n, 'label', str(int(length))) else: self.add_edge(m, n, **edge_format) else: print("Must set either degree range or density") def page_rank(self, damping_factor=0.85, max_iterations=100, min_delta=0.00001): ''' API: page_rank(self, damping_factor=0.85, max_iterations=100, min_delta=0.00001) Description: Compute and return the page-rank of a directed graph. This function was originally taken from here and modified for this graph class: http://code.google.com/p/python-graph/source/browse/ trunk/core/pygraph/algorithms/pagerank.py Input: damping_factor: Damping factor. max_iterations: Maximum number of iterations. min_delta: Smallest variation required to have a new iteration. Pre: Graph should be a directed graph. Return: Returns dictionary of page-ranks. Keys are node names, values are corresponding page-ranks. ''' nodes = self.get_node_list() graph_size = len(nodes) if graph_size == 0: return {} #value for nodes without inbound links min_value = old_div((1.0-damping_factor),graph_size) # itialize the page rank dict with 1/N for all nodes pagerank = dict.fromkeys(nodes, old_div(1.0,graph_size)) for _ in range(max_iterations): diff = 0 #total difference compared to last iteraction # computes each node PageRank based on inbound links for node in nodes: rank = min_value for referring_page in self.get_in_neighbors(node): rank += (damping_factor * pagerank[referring_page] / len(self.get_neighbors(referring_page))) diff += abs(pagerank[node] - rank) pagerank[node] = rank #stop if PageRank has converged if diff < min_delta: break return pagerank def get_degrees(self): ''' API: get_degree(self) Description: Returns degrees of nodes in dictionary format. Return: Returns a dictionary of node degrees. Keys are node names, values are corresponding degrees. ''' degree = {} if self.attr['type'] is not DIRECTED_GRAPH: for n in self.get_node_list(): degree[n] = len(self.get_neighbors(n)) return degree else: for n in self.get_node_list(): degree[n] = (len(self.get_in_neighbors(n)) + len(self.get_out_neighbors(n))) def get_in_degrees(self): ''' API: get_degree(self) Description: Returns degrees of nodes in dictionary format. Return: Returns a dictionary of node degrees. Keys are node names, values are corresponding degrees. ''' degree = {} if self.attr['type'] is not DIRECTED_GRAPH: print('This function only works for directed graphs') return for n in self.get_node_list(): degree[n] = len(self.get_in_neighbors(n)) return degree def get_out_degrees(self): ''' API: get_degree(self) Description: Returns degrees of nodes in dictionary format. Return: Returns a dictionary of node degrees. Keys are node names, values are corresponding degrees. ''' degree = {} if self.attr['type'] is not DIRECTED_GRAPH: print('This function only works for directed graphs') return for n in self.get_node_list(): degree[n] = len(self.get_out_neighbors(n)) return degree def get_diameter(self): ''' API: get_diameter(self) Description: Returns diameter of the graph. Diameter is defined as follows. distance(n,m): shortest unweighted path from n to m eccentricity(n) = $\max _m distance(n,m)$ diameter = $\min _n eccentricity(n) = \min _n \max _m distance(n,m)$ Return: Returns diameter of the graph. ''' if self.attr['type'] is not UNDIRECTED_GRAPH: print('This function only works for undirected graphs') return diameter = 'infinity' eccentricity_n = 0 for n in self.get_node_list(): for m in self.get_node_list(): path_n_m = self.search(n, destination = m, algo = 'BFS') if path_n_m is None: # this indicates there is no path from n to m, no diameter # is defined, since the graph is not connected, return # 'infinity' return 'infinity' distance_n_m = len(path_n_m)-1 if distance_n_m > eccentricity_n: eccentricity_n = distance_n_m if diameter == 'infinity' or eccentricity_n > diameter: diameter = eccentricity_n return diameter def create_cluster(self, node_list, cluster_attrs={}, node_attrs={}): ''' API: create_cluster(self, node_list, cluster_attrs, node_attrs) Description: Creates a cluster from the node given in the node list. Input: node_list: List of nodes in the cluster. cluster_attrs: Dictionary of cluster attributes, see Dot language grammer documentation for details. node_attrs: Dictionary of node attributes. It will overwrite previous attributes of the nodes in the cluster. Post: A cluster will be created. Attributes of the nodes in the cluster may change. ''' if 'name' in cluster_attrs: if 'name' in self.cluster: raise Exception('A cluster with name %s already exists!' %cluster_attrs['name']) else: name = cluster_attrs['name'] else: name = 'c%d' %self.attr['cluster_count'] self.attr['cluster_count'] += 1 cluster_attrs['name'] = name #cluster_attrs['name'] = self.cluster[name] = {'node_list':node_list, 'attrs':copy.deepcopy(cluster_attrs), 'node_attrs':copy.deepcopy(node_attrs)} class DisjointSet(Graph): ''' Disjoint set data structure. Inherits Graph class. ''' def __init__(self, optimize = True, **attrs): ''' API: __init__(self, optimize = True, **attrs): Description: Class constructor. Input: optimize: Optimizes find() if True. attrs: Graph attributes. Post: self.optimize will be updated. ''' attrs['type'] = DIRECTED_GRAPH Graph.__init__(self, **attrs) self.sizes = {} self.optimize = optimize def add(self, aList): ''' API: add(self, aList) Description: Adds items in the list to the set. Input: aList: List of items. Post: self.sizes will be updated. ''' self.add_node(aList[0]) for i in range(1, len(aList)): self.add_edge(aList[i], aList[0]) self.sizes[aList[0]] = len(aList) def union(self, i, j): ''' API: union(self, i, j): Description: Finds sets of i and j and unites them. Input: i: Item. j: Item. Post: self.sizes will be updated. ''' roots = (self.find(i), self.find(j)) if roots[0] == roots[1]: return False if self.sizes[roots[0]] <= self.sizes[roots[1]] or not self.optimize: self.add_edge(roots[0], roots[1]) self.sizes[roots[1]] += self.sizes[roots[0]] return True else: self.add_edge(roots[1], roots[0]) self.sizes[roots[0]] += self.sizes[roots[1]] return True def find(self, i): ''' API: find(self, i) Description: Returns root of set that has i. Input: i: Item. Return: Returns root of set that has i. ''' current = i edge_list = [] while len(self.get_neighbors(current)) != 0: successor = self.get_neighbors(current)[0] edge_list.append((current, successor)) current = successor if self.optimize: for e in edge_list: if e[1] != current: self.del_edge((e[0], e[1])) self.add_edge(e[0], current) return current if __name__ == '__main__': G = Graph(type = UNDIRECTED_GRAPH, splines = 'true', K = 1.5) #G.random(numnodes = 20, Euclidean = True, seedInput = 11, # add_labels = False, # scale = 10, # scale_cost = 10, # #degree_range = (2, 4), # #length_range = (1, 10) # ) #page_ranks = sorted(G.page_rank().iteritems(), key=operator.itemgetter(1)) #page_ranks.reverse() #for i in page_ranks: # print i #G = Graph(type = UNDIRECTED_GRAPH, splines = 'true', K = 1.5) G.random(numnodes = 10, Euclidean = True, seedInput = 13, add_labels = True, scale = 10, scale_cost = 10, #degree_range = (2, 4), #length_range = (1, 10) ) G.set_display_mode('matplotlib') G.display() #G.dfs(0) G.search(0, display = 'matplotlib', algo = 'Prim') #G.minimum_spanning_tree_kruskal()
epl-1.0
-4,642,684,740,455,221,000
38.827773
125
0.50736
false
4.284408
false
false
false
aaltay/beam
sdks/python/apache_beam/metrics/execution.py
1
11781
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # cython: language_level=3 """ This module is for internal use only; no backwards-compatibility guarantees. The classes in this file keep shared state, and organize metrics information. Available classes: - MetricKey - Internal key for a metric. - MetricResult - Current status of a metric's updates/commits. - _MetricsEnvironment - Keeps track of MetricsContainer and other metrics information for every single execution working thread. - MetricsContainer - Holds the metrics of a single step and a single unit-of-commit (bundle). """ # pytype: skip-file from __future__ import absolute_import import threading from builtins import object from typing import TYPE_CHECKING from typing import Any from typing import Dict from typing import FrozenSet from typing import Optional from typing import Type from typing import Union from typing import cast from apache_beam.metrics import monitoring_infos from apache_beam.metrics.cells import CounterCell from apache_beam.metrics.cells import DistributionCell from apache_beam.metrics.cells import GaugeCell from apache_beam.runners.worker import statesampler from apache_beam.runners.worker.statesampler import get_current_tracker if TYPE_CHECKING: from apache_beam.metrics.cells import GaugeData from apache_beam.metrics.cells import DistributionData from apache_beam.metrics.cells import MetricCell from apache_beam.metrics.cells import MetricCellFactory from apache_beam.metrics.metricbase import MetricName from apache_beam.portability.api import metrics_pb2 class MetricKey(object): """Key used to identify instance of metric cell. Metrics are internally keyed by the name of the step they're associated with, the name and namespace (if it is a user defined metric) of the metric, and any extra label metadata added by the runner specific metric collection service. """ def __init__(self, step, metric, labels=None): """Initializes ``MetricKey``. Args: step: A string with the step this metric cell is part of. metric: A ``MetricName`` namespace+name that identifies a metric. labels: An arbitrary set of labels that also identifies the metric. """ self.step = step self.metric = metric self.labels = labels if labels else dict() def __eq__(self, other): return ( self.step == other.step and self.metric == other.metric and self.labels == other.labels) def __hash__(self): return hash((self.step, self.metric, frozenset(self.labels))) def __repr__(self): return 'MetricKey(step={}, metric={}, labels={})'.format( self.step, self.metric, self.labels) class MetricResult(object): """Keeps track of the status of a metric within a single bundle. It contains the physical and logical updates to the metric. Physical updates are updates that have not necessarily been committed, but that have been made during pipeline execution. Logical updates are updates that have been committed. Attributes: key: A ``MetricKey`` that identifies the metric and bundle of this result. committed: The committed updates of the metric. This attribute's type is of metric type result (e.g. int, DistributionResult, GaugeResult). attempted: The logical updates of the metric. This attribute's type is that of metric type result (e.g. int, DistributionResult, GaugeResult). """ def __init__(self, key, committed, attempted): """Initializes ``MetricResult``. Args: key: A ``MetricKey`` object. committed: Metric data that has been committed (e.g. logical updates) attempted: Metric data that has been attempted (e.g. physical updates) """ self.key = key self.committed = committed self.attempted = attempted def __eq__(self, other): return ( self.key == other.key and self.committed == other.committed and self.attempted == other.attempted) def __hash__(self): return hash((self.key, self.committed, self.attempted)) def __repr__(self): return 'MetricResult(key={}, committed={}, attempted={})'.format( self.key, str(self.committed), str(self.attempted)) def __str__(self): return repr(self) @property def result(self): """Short-hand for falling back to attempted metrics if it seems that committed was not populated (e.g. due to not being supported on a given runner""" return self.committed if self.committed else self.attempted class _MetricsEnvironment(object): """Holds the MetricsContainer for every thread and other metric information. This class is not meant to be instantiated, instead being used to keep track of global state. """ def current_container(self): """Returns the current MetricsContainer.""" sampler = statesampler.get_current_tracker() if sampler is None: return None return sampler.current_state().metrics_container def process_wide_container(self): """Returns the MetricsContainer for process wide metrics, e.g. memory.""" return PROCESS_WIDE_METRICS_CONTAINER MetricsEnvironment = _MetricsEnvironment() class _TypedMetricName(object): """Like MetricName, but also stores the cell type of the metric.""" def __init__( self, cell_type, # type: Union[Type[MetricCell], MetricCellFactory] metric_name # type: Union[str, MetricName] ): # type: (...) -> None self.cell_type = cell_type self.metric_name = metric_name if isinstance(metric_name, str): self.fast_name = metric_name else: self.fast_name = metric_name.fast_name() # Cached for speed, as this is used as a key for every counter update. self._hash = hash((cell_type, self.fast_name)) def __eq__(self, other): return self is other or ( self.cell_type == other.cell_type and self.fast_name == other.fast_name) def __hash__(self): return self._hash def __str__(self): return '%s %s' % (self.cell_type, self.metric_name) def __reduce__(self): return _TypedMetricName, (self.cell_type, self.metric_name) _DEFAULT = None # type: Any class MetricUpdater(object): """A callable that updates the metric as quickly as possible.""" def __init__( self, cell_type, # type: Union[Type[MetricCell], MetricCellFactory] metric_name, # type: Union[str, MetricName] default_value=None, process_wide=False): self.process_wide = process_wide self.typed_metric_name = _TypedMetricName(cell_type, metric_name) self.default_value = default_value def __call__(self, value=_DEFAULT): # type: (Any) -> None if value is _DEFAULT: if self.default_value is _DEFAULT: raise ValueError( 'Missing value for update of %s' % self.typed_metric_name.fast_name) value = self.default_value if self.process_wide: MetricsEnvironment.process_wide_container().get_metric_cell( self.typed_metric_name).update(value) else: tracker = get_current_tracker() if tracker is not None: tracker.update_metric(self.typed_metric_name, value) def __reduce__(self): return MetricUpdater, ( self.typed_metric_name.cell_type, self.typed_metric_name.metric_name, self.default_value) class MetricsContainer(object): """Holds the metrics of a single step and a single bundle. Or the metrics associated with the process/SDK harness. I.e. memory usage. """ def __init__(self, step_name): self.step_name = step_name self.lock = threading.Lock() self.metrics = dict() # type: Dict[_TypedMetricName, MetricCell] def get_counter(self, metric_name): # type: (MetricName) -> CounterCell return cast( CounterCell, self.get_metric_cell(_TypedMetricName(CounterCell, metric_name))) def get_distribution(self, metric_name): # type: (MetricName) -> DistributionCell return cast( DistributionCell, self.get_metric_cell(_TypedMetricName(DistributionCell, metric_name))) def get_gauge(self, metric_name): # type: (MetricName) -> GaugeCell return cast( GaugeCell, self.get_metric_cell(_TypedMetricName(GaugeCell, metric_name))) def get_metric_cell(self, typed_metric_name): # type: (_TypedMetricName) -> MetricCell cell = self.metrics.get(typed_metric_name, None) if cell is None: with self.lock: cell = self.metrics[typed_metric_name] = typed_metric_name.cell_type() return cell def get_cumulative(self): # type: () -> MetricUpdates """Return MetricUpdates with cumulative values of all metrics in container. This returns all the cumulative values for all metrics. """ counters = { MetricKey(self.step_name, k.metric_name): v.get_cumulative() for k, v in self.metrics.items() if k.cell_type == CounterCell } distributions = { MetricKey(self.step_name, k.metric_name): v.get_cumulative() for k, v in self.metrics.items() if k.cell_type == DistributionCell } gauges = { MetricKey(self.step_name, k.metric_name): v.get_cumulative() for k, v in self.metrics.items() if k.cell_type == GaugeCell } return MetricUpdates(counters, distributions, gauges) def to_runner_api(self): return [ cell.to_runner_api_user_metric(key.metric_name) for key, cell in self.metrics.items() ] def to_runner_api_monitoring_infos(self, transform_id): # type: (str) -> Dict[FrozenSet, metrics_pb2.MonitoringInfo] """Returns a list of MonitoringInfos for the metrics in this container.""" with self.lock: items = list(self.metrics.items()) all_metrics = [ cell.to_runner_api_monitoring_info(key.metric_name, transform_id) for key, cell in items ] return { monitoring_infos.to_key(mi): mi for mi in all_metrics if mi is not None } def reset(self): # type: () -> None for metric in self.metrics.values(): metric.reset() def __reduce__(self): raise NotImplementedError PROCESS_WIDE_METRICS_CONTAINER = MetricsContainer(None) class MetricUpdates(object): """Contains updates for several metrics. A metric update is an object containing information to update a metric. For Distribution metrics, it is DistributionData, and for Counter metrics, it's an int. """ def __init__( self, counters=None, # type: Optional[Dict[MetricKey, int]] distributions=None, # type: Optional[Dict[MetricKey, DistributionData]] gauges=None # type: Optional[Dict[MetricKey, GaugeData]] ): # type: (...) -> None """Create a MetricUpdates object. Args: counters: Dictionary of MetricKey:MetricUpdate updates. distributions: Dictionary of MetricKey:MetricUpdate objects. gauges: Dictionary of MetricKey:MetricUpdate objects. """ self.counters = counters or {} self.distributions = distributions or {} self.gauges = gauges or {}
apache-2.0
1,169,649,252,800,144,600
31.907821
80
0.691962
false
3.94938
false
false
false
AlexanderPease/viv
app/ui_methods.py
1
2070
# Just for ordinalizing the number of district def ordinal(numb): if type(numb) is str: numb = int(float(numb)) if numb < 20: #determining suffix for < 20 if numb == 1: suffix = 'st' elif numb == 2: suffix = 'nd' elif numb == 3: suffix = 'rd' else: suffix = 'th' else: #determining suffix for > 20 tens = str(numb) tens = tens[-2] unit = str(numb) unit = unit[-1] if tens == "1": suffix = "th" else: if unit == "1": suffix = 'st' elif unit == "2": suffix = 'nd' elif unit == "3": suffix = 'rd' else: suffix = 'th' return str(numb)+ suffix def list_to_comma_delimited_string(list_arg): """ Takes a list and turns into comma-delimited string. Used for turning Group.invited_emails into correct form for template display. Args: list: A list, ex: ["alex@usv.com", "bob@usv.com"] or Group.users Returns A string , ex: "alex@usv.com, bob@usv.com" """ long_string = "" for item in list_arg: long_string += str(item) + ", " long_string = long_string[0:-2] # Remove last ", " return long_string def get_domain(email): """ Returns just the domain name of an email address Ex: reply.craigslist.com from foo@reply.craigslist.com """ return email.split('@')[1] def email_obscure(email): """ Obscures an email address Args: email: A string, ex: testcase@alexanderpease.com Returns A string , ex: t*******@alexanderpease.com """ first_letter = email[0] string_split = email.split('@') obscured = "" while len(obscured) < len(string_split[0])-1: obscured = obscured + "*" return first_letter + obscured + "@" + string_split[1] def encode(text): """ For printing unicode characters """ return text.encode('utf-8')
gpl-3.0
-6,869,473,597,955,327,000
24.256098
81
0.523671
false
3.526405
false
false
false
geceo/django-gallery
general/models.py
1
1025
from django.db import models from django import forms from django.forms import ModelForm # Create your models here. class Settings(models.Model): # General gallery informations general_title = models.CharField(max_length=255) intro = models.TextField(blank=True) url = models.CharField(max_length=255) # Facebook connector facebook_appid = models.CharField(blank=True,max_length=255) facebook_appsecret = models.CharField(blank=True,max_length=255) facebook_profile_id = models.CharField(blank=True,max_length=255) facebook_canvas_url = models.CharField(blank=True,max_length=255) # Twitter connector twitter_account = models.CharField(max_length=255) twitter_consumer_key = models.CharField(max_length=255) twitter_consumer_secret = models.CharField(max_length=255) twitter_access_token = models.CharField(max_length=255) twitter_access_token_secret = models.CharField(max_length=255) class SettingsForm(ModelForm): class Meta: model = Settings
bsd-3-clause
966,780,564,379,091,100
36.962963
70
0.743415
false
3.68705
false
false
false
pida42/Zabbix-Addons
App-Servers/Memcached/getMemcachedInfo.py
1
3120
#!/usr/bin/env python # -*- coding: utf-8 -*- import getopt, sys from telnetlib import Telnet # default memcached server to check memcachedServer = '127.0.0.1' memcachedPort = '11211' ITEMS = ( 'bytes', 'cmd_get', 'cmd_set', 'curr_items', 'curr_connections', 'evictions', 'limit_maxbytes', 'uptime', 'get_hits', 'get_misses', 'version', 'bytes_read', 'bytes_written', ) ################################################################################ ### This is based in Enrico Tröger sources from: ### http://www.pending.io/yet-another-zabbix-template-to-monitor-memcache/ ### but I chose to make it with dictionaries instead of objects. ################################################################################ class MemcachedStatsReader(object): #---------------------------------------------------------------------- def __init__(self, server, port): self._server = server self._port = port self._stats_raw = None self._stats = None #---------------------------------------------------------------------- def read(self): self._read_stats() self._parse_stats() return self._stats #---------------------------------------------------------------------- def _read_stats(self): connection = Telnet(self._server, self._port, timeout=30) connection.write('stats\n') connection.write('quit\n') self._stats_raw = connection.read_all() #---------------------------------------------------------------------- def _parse_stats(self): self._stats = {} for line in self._stats_raw.splitlines(): if not line.startswith('STAT'): continue parts = line.split() if not parts[1] in ITEMS: continue index = parts[1] self._stats[index] = parts[2] try: ratio = float (self._stats["get_hits"]) * 100 / float (self._stats["cmd_get"]) except ZeroDivisionError: ratio = 0.0 self._stats["ratio"] = round (ratio, 2) try: usage = float (self._stats["bytes"]) * 100 / float (self._stats["limit_maxbytes"]) except ZeroDivisionError: usage = 0.0 self._stats["usage"] = round (usage, 2) #---------------------------------------------------------------------- def Usage (): print "Usage: getMemcachedInfo.py -h 127.0.0.1 -p 11211 -a <item>" sys.exit(2) def main(host, port): getInfo = "ratio" argv = sys.argv[1:] try: opts, args = getopt.getopt(argv, "h:p:a:") for opt,arg in opts: if opt == '-h': host = arg if opt == '-p': port = arg if opt == '-a': getInfo = arg except: Usage() data = MemcachedStatsReader(host, port) items = data.read() try: print items[getInfo] except: print "Not valid item." if __name__ == '__main__': main(memcachedServer, memcachedPort)
mit
4,151,833,745,684,230,000
28.149533
94
0.445976
false
4.302069
false
false
false
sergiusens/snapcraft
tests/integration/__init__.py
1
27766
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*- # # Copyright (C) 2015-2018 Canonical Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import distutils.util import fileinput import glob import os import re import shutil import subprocess import sys import time import uuid from distutils import dir_util from textwrap import dedent from typing import Callable, List, Union import fixtures import pexpect from pexpect import popen_spawn import requests import testtools from testtools import content from testtools.matchers import MatchesRegex from snapcraft import yaml_utils from tests import fixture_setup, os_release, subprocess_utils from tests.integration import platform class RegisterError(Exception): pass class TestCase(testtools.TestCase): def setUp(self): super().setUp() if os.getenv("SNAPCRAFT_FROM_SNAP", False): self.snapcraft_command = "/snap/bin/snapcraft" elif os.getenv("SNAPCRAFT_FROM_DEB", False): self.snapcraft_command = "/usr/bin/snapcraft" self.snapcraft_parser_command = "/usr/bin/snapcraft-parser" elif os.getenv("VIRTUAL_ENV") and sys.platform == "win32": self.snapcraft_command = ["python", "-m", "snapcraft.cli.__main__"] self.snapcraft_parser_command = os.path.join( os.getenv("VIRTUAL_ENV"), "bin", "snapcraft-parser" ) elif os.getenv("VIRTUAL_ENV"): self.snapcraft_command = os.path.join( os.getenv("VIRTUAL_ENV"), "bin", "snapcraft" ) self.snapcraft_parser_command = os.path.join( os.getenv("VIRTUAL_ENV"), "bin", "snapcraft-parser" ) elif os.getenv("SNAPCRAFT_FROM_BREW", False): self.snapcraft_command = "/usr/local/bin/snapcraft" else: raise EnvironmentError( "snapcraft is not setup correctly for testing. Either set " "SNAPCRAFT_FROM_SNAP, SNAPCRAFT_FROM_DEB or " "SNAPCRAFT_FROM_BREW to run from either the snap, deb or " "brew, or make sure your venv is properly setup as described " "in HACKING.md." ) if os.getenv("SNAPCRAFT_FROM_SNAP", False): self.patchelf_command = "/snap/snapcraft/current/usr/bin/patchelf" self.execstack_command = "/snap/snapcraft/current/usr/sbin/execstack" else: self.patchelf_command = "patchelf" self.execstack_command = "execstack" self.snaps_dir = os.path.join(os.path.dirname(__file__), "snaps") temp_cwd_fixture = fixture_setup.TempCWD() self.useFixture(temp_cwd_fixture) self.path = temp_cwd_fixture.path # Use a separate path for XDG dirs, or changes there may be detected as # source changes. self.xdg_path = self.useFixture(fixtures.TempDir()).path self.useFixture(fixture_setup.TempXDG(self.xdg_path)) # Use a dumb terminal for tests self.useFixture(fixtures.EnvironmentVariable("TERM", "dumb")) # Disable Sentry reporting for tests, otherwise they'll hang waiting # for input self.useFixture( fixtures.EnvironmentVariable("SNAPCRAFT_ENABLE_ERROR_REPORTING", "false") ) # Don't let the managed host variable leak into tests self.useFixture(fixtures.EnvironmentVariable("SNAPCRAFT_MANAGED_HOST")) # Note that these directories won't exist when the test starts, # they might be created after calling the snapcraft command on the # project dir. self.parts_dir = "parts" self.stage_dir = "stage" self.prime_dir = "prime" self.deb_arch = platform.get_deb_arch() self.arch_triplet = platform.get_arch_triplet() self.distro_series = os_release.get_version_codename() def run_snapcraft( self, command: Union[str, List[str]] = None, project_dir: str = None, debug: bool = True, pre_func: Callable[[], None] = lambda: None, env=None, ) -> None: if project_dir: self.copy_project_to_cwd(project_dir) if command is None: command = [] if isinstance(command, str): command = [command] snapcraft_command = self.snapcraft_command if isinstance(snapcraft_command, str): snapcraft_command = [snapcraft_command] if debug: snapcraft_command.append("-d") try: pre_func() snapcraft_output = subprocess.check_output( snapcraft_command + command, stderr=subprocess.STDOUT, universal_newlines=True, env=env, ) except subprocess.CalledProcessError as e: self.addDetail("command", content.text_content(str(self.snapcraft_command))) self.addDetail("output", content.text_content(e.output)) raise except FileNotFoundError: self.addDetail("command", content.text_content(str(self.snapcraft_command))) raise if not os.getenv("SNAPCRAFT_IGNORE_APT_AUTOREMOVE", False): self.addCleanup(self.run_apt_autoremove) return snapcraft_output def spawn_snapcraft(self, command: Union[str, List[str]]): snapcraft_command = self.snapcraft_command if isinstance(snapcraft_command, str): snapcraft_command = [snapcraft_command] try: return popen_spawn.PopenSpawn(" ".join(snapcraft_command + command)) except FileNotFoundError: self.addDetail("command", content.text_content(str(snapcraft_command))) def run_snapcraft_parser(self, arguments): try: snapcraft_output = subprocess.check_output( [self.snapcraft_parser_command, "-d"] + arguments, stderr=subprocess.STDOUT, universal_newlines=True, ) except subprocess.CalledProcessError as e: self.addDetail("output", content.text_content(e.output)) raise return snapcraft_output def run_apt_autoremove(self): if sys.platform == "win32": return deb_env = os.environ.copy() deb_env.update( {"DEBIAN_FRONTEND": "noninteractive", "DEBCONF_NONINTERACTIVE_SEEN": "true"} ) try: autoremove_output = subprocess.check_output( "sudo apt-get autoremove -y".split(), stderr=subprocess.STDOUT, env=deb_env, ) self.addDetail( "apt-get autoremove output", content.text_content(autoremove_output.decode("utf-8")), ) except FileNotFoundError as e: self.addDetail("apt-get autoremove error", content.text_content(str(e))) except subprocess.CalledProcessError as e: self.addDetail("apt-get autoremove error", content.text_content(str(e))) self.addDetail( "apt-get autoremove output", content.text_content(e.output.decode("utf-8")), ) if os.getenv("SNAPCRAFT_APT_AUTOREMOVE_CHECK_FAIL", False): raise def copy_project_to_cwd(self, project_dir: str) -> None: # Because cwd already exists, shutil.copytree would raise # FileExistsError. Use the lesser known distutils.dir_util.copy_tree dir_util.copy_tree( os.path.join(self.snaps_dir, project_dir), self.path, preserve_symlinks=True ) def construct_yaml( self, name="test", version="0.1", summary="Simple test snap", description="Something something", grade=None, architectures=None, parts=dedent( """\ my-part: plugin: nil """ ), build_packages="[]", adopt_info=None, ): snapcraft_yaml = { "name": name, "summary": summary, "description": description, "parts": yaml_utils.load(parts), "build-packages": yaml_utils.load(build_packages), } if version: snapcraft_yaml["version"] = version if adopt_info: snapcraft_yaml["adopt-info"] = adopt_info if grade: snapcraft_yaml["grade"] = grade if architectures: snapcraft_yaml["architectures"] = architectures with open("snapcraft.yaml", "w") as f: yaml_utils.dump(snapcraft_yaml, stream=f) def get_output_ignoring_non_zero_exit(self, binary, cwd=None): # Executing the binaries exists > 0 on trusty. # TODO investigate more to understand the cause. try: output = subprocess.check_output(binary, universal_newlines=True, cwd=cwd) except subprocess.CalledProcessError as exception: output = exception.output return output def set_stage_package_version( self, snapcraft_yaml_path, part, package, version=None ): return self.set_package_version( "stage-packages", snapcraft_yaml_path, part, package, version ) def set_build_package_version( self, snapcraft_yaml_path, part, package, version=None ): return self.set_package_version( "build-packages", snapcraft_yaml_path, part, package, version ) def set_package_version( self, type_, snapcraft_yaml_path, part, package, version=None ): # This doesn't handle complex package syntax. with open(snapcraft_yaml_path) as snapcraft_yaml_file: snapcraft_yaml = yaml_utils.load(snapcraft_yaml_file) if part: packages = snapcraft_yaml["parts"][part].get(type_, []) else: packages = snapcraft_yaml.get(type_, []) for index, package_in_yaml in enumerate(packages): if package_in_yaml.split("=")[0] == package: if version is None: version = get_package_version( package, self.distro_series, self.deb_arch ) packages[index] = "{}={}".format(package, version) break else: self.fail("The part {} doesn't have a package {}".format(part, package)) with open(snapcraft_yaml_path, "w") as snapcraft_yaml_file: yaml_utils.dump(snapcraft_yaml, stream=snapcraft_yaml_file) return version def set_build_package_architecture( self, snapcraft_yaml_path, part, package, architecture ): # This doesn't handle complex package syntax. with open(snapcraft_yaml_path) as snapcraft_yaml_file: snapcraft_yaml = yaml_utils.load(snapcraft_yaml_file) packages = snapcraft_yaml["parts"][part]["build-packages"] for index, package_in_yaml in enumerate(packages): if package_in_yaml == package: packages[index] = "{}:{}".format(package, architecture) break else: self.fail("The part {} doesn't have a package {}".format(part, package)) with open(snapcraft_yaml_path, "w") as snapcraft_yaml_file: yaml_utils.dump(snapcraft_yaml, stream=snapcraft_yaml_file) class BzrSourceBaseTestCase(TestCase): def setUp(self): super().setUp() if shutil.which("bzr") is None: self.skipTest("bzr is not installed") def init_source_control(self): subprocess.check_call( ["bzr", "init", "."], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL ) subprocess.check_call( ["bzr", "whoami", "--branch", '"Example Dev <dev@example.com>"'] ) def commit(self, message, unchanged=False): command = ["bzr", "commit", "-m", message] if unchanged: command.append("--unchanged") subprocess.check_call( command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL ) def get_revno(self, path=None): command = ["bzr", "revno", "-r", "-1"] if path: command.append(path) return subprocess.check_output(command, universal_newlines=True).strip() class GitSourceBaseTestCase(TestCase): def setUp(self): super().setUp() if shutil.which("git") is None: self.skipTest("git is not installed") def init_source_control(self): subprocess.check_call(["git", "init", "."], stdout=subprocess.DEVNULL) subprocess.check_call( ["git", "config", "--local", "user.name", '"Example Dev"'] ) subprocess.check_call( ["git", "config", "--local", "user.email", "dev@example.com"] ) def add_file(self, file_path): subprocess.check_call(["git", "add", file_path], stdout=subprocess.DEVNULL) def commit(self, message, allow_empty=False): command = ["git", "commit", "-m", message] if allow_empty: command.append("--allow-empty") subprocess.check_call(command, stdout=subprocess.DEVNULL) def tag(self, tag_name): subprocess.check_call( ["git", "tag", "-a", "-m", tag_name, tag_name], stdout=subprocess.DEVNULL ) def get_revno(self): return subprocess_utils.call_with_output( ["git", "rev-list", "HEAD", "--max-count=1"] ) class HgSourceBaseTestCase(TestCase): def setUp(self): super().setUp() if shutil.which("hg") is None: self.skipTest("mercurial is not installed") def init_source_control(self): subprocess.check_call(["hg", "init", "."]) def commit(self, message, file_): subprocess.check_call( ["hg", "commit", "-m", message, "--user", '"Example Dev"', "-A", file_] ) def get_revno(self, path=None): command = ["hg", "log", "--template", '"{desc}"', "-r", "-1"] if path: command.extend(["--cwd", path]) return subprocess.check_output(command, universal_newlines=True).strip() def get_id(self): return subprocess_utils.call_with_output(["hg", "id"]).split()[0] class SubversionSourceBaseTestCase(TestCase): def setUp(self): super().setUp() if shutil.which("svn") is None: self.skipTest("svn is not installed") def init_source_control(self): subprocess.check_call(["svnadmin", "create", "repo"], stdout=subprocess.DEVNULL) def checkout(self, source, destination): subprocess.check_call( ["svn", "checkout", source, destination], stdout=subprocess.DEVNULL ) def add(self, file_path, cwd=None): subprocess.check_call( ["svn", "add", file_path], stdout=subprocess.DEVNULL, cwd=cwd ) def commit(self, message, cwd=None): subprocess.check_call( ["svn", "commit", "-m", message], stdout=subprocess.DEVNULL, cwd=cwd ) def update(self, cwd=None): subprocess.check_call(["svn", "update"], stdout=subprocess.DEVNULL, cwd=cwd) class StoreTestCase(TestCase): def setUp(self): super().setUp() self.test_store = fixture_setup.TestStore() self.useFixture(self.test_store) self.useFixture(fixtures.EnvironmentVariable("SNAPCRAFT_TEST_INPUT", "1")) def is_store_fake(self): return (os.getenv("TEST_STORE") or "fake") == "fake" def is_store_staging(self): return os.getenv("TEST_STORE") == "staging" def _conduct_login(self, process, email, password, expect_success) -> None: process.expect_exact( "Enter your Ubuntu One e-mail address and password." + os.linesep ) process.expect_exact( "If you do not have an Ubuntu One account, you can create one at " "https://dashboard.snapcraft.io/openid/login" + os.linesep ) process.expect_exact("Email: ") process.sendline(email) process.expect_exact("Password: ") process.sendline(password) if expect_success: process.expect_exact( "We strongly recommend enabling multi-factor authentication:" ) def export_login( self, export_path, email: str = None, password: str = None, expect_success: bool = True, ) -> None: email = email or self.test_store.user_email password = password or self.test_store.user_password process = self.spawn_snapcraft(["export-login", export_path]) self._conduct_login(process, email, password, expect_success) if expect_success: process.expect("This exported login is not encrypted") else: process.expect("Authentication error: Failed to get unbound discharge.") def login(self, email=None, password=None, expect_success=True): email = email or self.test_store.user_email password = password or self.test_store.user_password process = self.spawn_snapcraft(["login"]) self._conduct_login(process, email, password, expect_success) if expect_success: process.expect_exact("Login successful.") else: process.expect("Authentication error: Failed to get unbound discharge.") def logout(self): output = self.run_snapcraft("logout") expected = r".*Credentials cleared.\n.*" self.assertThat(output, MatchesRegex(expected, flags=re.DOTALL)) def register(self, snap_name, private=False, wait=True): command = ["register", snap_name] if private: command.append("--private") process = self.spawn_snapcraft(command) process.expect(r".*\[y/N\]: ") process.sendline("y") try: process.expect_exact( "Congrats! You are now the publisher of {!r}.".format(snap_name) ) except pexpect.exceptions.EOF: wait_error_regex = ( ".*You must wait (\d+) seconds before trying to register your " "next snap.*" ) output = process.before.decode(sys.getfilesystemencoding()) match = re.search(wait_error_regex, output) if wait and match: time.sleep(int(match.group(1))) # This could get stuck for ever if the user is registering # other snaps in parallel. self.register(snap_name, private, wait) else: raise RegisterError(output) def register_key(self, key_name, email=None, password=None, expect_success=True): email = email or self.test_store.user_email password = password or self.test_store.user_password process = self.spawn_snapcraft(["register-key", key_name]) process.expect_exact( "Enter your Ubuntu One e-mail address and password." + os.linesep ) process.expect_exact( "If you do not have an Ubuntu One account, you can create one at " "https://dashboard.snapcraft.io/openid/login" + os.linesep ) process.expect_exact("Email: ") process.sendline(email) process.expect_exact("Password: ") process.sendline(password) if expect_success: process.expect_exact( "We strongly recommend enabling multi-factor authentication:" ) process.expect( r'Done\. The key "{}" .* may be used to sign your ' r"assertions\.".format(key_name) ) else: process.expect_exact( "Cannot continue without logging in successfully: " "Authentication error: Failed to get unbound discharge" ) process.expect(pexpect.EOF) return process.wait() def list_keys(self, expected_keys): process = self.spawn_snapcraft(["list-keys"]) for enabled, key_name, key_id in expected_keys: process.expect( "{} *{} *{}".format("\*" if enabled else "-", key_name, key_id) ) process.expect(pexpect.EOF) return process.wait() def list_registered(self, expected_snaps): process = self.spawn_snapcraft(["list-registered"]) for name, visibility, price, notes in expected_snaps: # Ignores 'since' to avoid confusion on fake and actual stores. process.expect( "{} *[T:\-\d]+Z *{} *{} *{}".format(name, visibility, price, notes) ) process.expect(pexpect.EOF) return process.wait() def get_unique_name(self, prefix=""): """Return a unique snap name. It uses a UUIDv4 to create unique names and limits its full size to 40 chars (as defined in the snap specification). """ unique_id = uuid.uuid4().int # Do not change the test-snapcraft- prefix. Ensure that you # notify the store team if you need to use a different value when # working with the production store. return "test-snapcraft-{}{}".format(prefix, unique_id)[:40] def get_unique_version(self): """Return a unique snap version. It uses a UUIDv4 to create unique version and limits its full size to 32 chars (as defined in the snap specification). """ unique_id = uuid.uuid4().int return "{}".format(unique_id)[:32] def update_name_arch_and_version(self, name=None, arch=None, version=None): if name is None: name = self.get_unique_name() if version is None: version = self.get_unique_version() if arch is None: arch = "amd64" for line in fileinput.input( os.path.join("snap", "snapcraft.yaml"), inplace=True ): if "name: " in line: print("name: {}".format(name)) elif "version: " in line: print("version: {}".format(version)) elif "architectures: " in line: print("architectures: [{}]".format(arch)) else: print(line) def update_name_and_version(self, name=None, version=None): if name is None: name = self.get_unique_name() if version is None: version = self.get_unique_version() for line in fileinput.input( os.path.join("snap", "snapcraft.yaml"), inplace=True ): if "name: " in line: print("name: {}".format(name)) elif "version: " in line: print("version: {}".format(version)) else: print(line) def gated(self, snap_name, expected_validations=[], expected_output=None): process = self.spawn_snapcraft(["gated", snap_name]) if expected_output: process.expect(expected_output) else: for name, revision in expected_validations: process.expect("{} *{}".format(name, revision)) process.expect(pexpect.EOF) return process.wait() def validate(self, snap_name, validations, expected_error=None): process = self.spawn_snapcraft(["validate", snap_name] + validations) if expected_error: process.expect(expected_error) else: for v in validations: process.expect("Signing validations assertion for {}".format(v)) process.expect(pexpect.EOF) return process.wait() def sign_build( self, snap_filename, key_name="default", local=False, expect_success=True ): cmd = ["sign-build", snap_filename, "--key-name", key_name] if local: # only sign it, no pushing cmd.append("--local") process = self.spawn_snapcraft(cmd) if expect_success: if local: process.expect( "Build assertion .*{}-build saved to disk.".format(snap_filename) ) else: process.expect( "Build assertion .*{}-build pushed.".format(snap_filename) ) process.expect(pexpect.EOF) return process.wait() def close(self, *args, **kwargs): process = self.spawn_snapcraft(["close"] + list(args)) expected = kwargs.get("expected") if expected is not None: process.expect(expected) process.expect(pexpect.EOF) return process.wait() def push(self, snap, release=None, expected=None): actions = ["push", snap] if release is not None: actions += ["--release", release] process = self.spawn_snapcraft(actions) if expected is not None: process.expect(expected) process.expect(pexpect.EOF) return process.wait() class SnapdIntegrationTestCase(TestCase): slow_test = False def setUp(self) -> None: super().setUp() run_slow_tests = os.environ.get("SNAPCRAFT_SLOW_TESTS", False) if run_slow_tests: run_slow_tests = distutils.util.strtobool(str(run_slow_tests)) if self.slow_test and not run_slow_tests: self.skipTest("Not running slow tests") if os.environ.get("ADT_TEST") and self.deb_arch == "armhf": self.skipTest("The autopkgtest armhf runners can't install snaps") def install_snap(self) -> None: try: subprocess.check_output( ["sudo", "snap", "install", glob.glob("*.snap")[0], "--dangerous"], stderr=subprocess.STDOUT, universal_newlines=True, ) except subprocess.CalledProcessError as e: self.addDetail("output", content.text_content(e.output)) raise def get_package_version(package_name, series, deb_arch): # http://people.canonical.com/~ubuntu-archive/madison.cgi?package=hello&a=amd64&c=&s=zesty&text=on params = { "package": package_name, "s": "{0},{0}-updates,{0}-security".format(series), "a": deb_arch, "text": "on", } query = requests.get( "http://people.canonical.com/~ubuntu-archive/madison.cgi", params ) query.raise_for_status() package = query.text.strip().split("\n")[-1] package_status = [i.strip() for i in package.strip().split("|")] return package_status[1] def add_stage_packages( *, part_name: str, stage_packages: List[str], snapcraft_yaml_file=None ): if snapcraft_yaml_file is None: snapcraft_yaml_file = os.path.join("snap", "snapcraft.yaml") with open(snapcraft_yaml_file) as file_read: y = yaml_utils.load(file_read) if "stage-packages" in y["parts"][part_name]: y["parts"][part_name]["stage-packages"].extend(stage_packages) else: y["parts"][part_name]["stage-packages"] = stage_packages with open(snapcraft_yaml_file, "w") as file_write: yaml_utils.dump(y, stream=file_write)
gpl-3.0
-1,028,729,214,117,175,600
35.582345
102
0.587733
false
4.076641
true
false
false
LawrenceK/console-server
consoleserver/ssh.py
1
4049
# # (C) Copyright L.P.Klyne 2013 # """This is based on the basic ssh server example, the protocol handler has been pulled out as a separate source as this is where the logic for the console server sits. """ import logging _log = logging.getLogger(__name__) import os import grp from zope.interface import implements from twisted.cred import portal from twisted.conch import avatar from twisted.conch.ssh import factory, userauth, connection, keys, session from twisted.conch.checkers import SSHPublicKeyDatabase, UNIXPasswordDatabase from twisted.python import components from twisted.python import randbytes from ssh_protocol import TSProtocol import config class TSAvatar(avatar.ConchUser): def __init__(self, username): avatar.ConchUser.__init__(self) self.username = username self.channelLookup.update({'session': session.SSHSession}) def check_priviledged(self): """Test for membership of root or sudo groups, hence has admin ability""" def is_user_in_group(groupname): return self.username in grp.getgrnam(groupname)[3] print "TSAvatar.check_priviledged %s" % self.username _log.debug("TSAvatar.check_priviledged %s", self.username) return is_user_in_group("root") or is_user_in_group("sudo") class TSRealm: implements(portal.IRealm) def requestAvatar(self, avatarId, mind, *interfaces): return interfaces[0], TSAvatar(avatarId), lambda: None class TSSession: implements(session.ISession) def __init__(self, avatar): self.avatar = avatar @property def factory(self): return self.conn.transport.factory def getPty(self, term, windowSize, attrs): pass def execCommand(self, proto, cmd): raise Exception("no executing commands") def openShell(self, protocol): _log.debug("openShell %s", protocol.getHost().address.port) # protocol is an SSHSessionProcessProtocol object # protocol.getHost().address.port # protocol.factory # protocol.transport # TODO if port is global sshport create CLI ts_protocol = TSProtocol(self.avatar) ts_protocol.makeConnection(protocol) protocol.makeConnection(session.wrapProtocol(ts_protocol)) def windowChanged(newWindowSize): pass def eofReceived(self): pass def closed(self): pass TS_portal = portal.Portal(TSRealm()) TS_portal.registerChecker(UNIXPasswordDatabase()) TS_portal.registerChecker(SSHPublicKeyDatabase()) components.registerAdapter(TSSession, TSAvatar, session.ISession) class TSFactory(factory.SSHFactory): portal = TS_portal services = { 'ssh-userauth': userauth.SSHUserAuthServer, 'ssh-connection': connection.SSHConnection } publickey_file = 'public.key' privatekey_file = 'private.key' publicKeys = {} privateKeys = {} def getRSAKeys(self): TSFactory.publickey_file = config.find_file( TSFactory.publickey_file, default = True ) TSFactory.privatekey_file = config.find_file( TSFactory.privatekey_file, default = True ) if not (os.path.exists(self.publickey_file) and os.path.exists(self.privatekey_file)): # generate a RSA keypair _log.info("Generating RSA keypair") from Crypto.PublicKey import RSA KEY_LENGTH = 1024 rsaKey = RSA.generate(KEY_LENGTH, randbytes.secureRandom) # save keys for next time file(self.publickey_file, 'w+b').write(keys.Key(rsaKey).public().toString('OPENSSH')) file(self.privatekey_file, 'w+b').write(keys.Key(rsaKey).toString('OPENSSH')) TSFactory.publicKeys['ssh-rsa'] = keys.Key.fromString(data=file(self.publickey_file).read()) TSFactory.privateKeys['ssh-rsa'] = keys.Key.fromString(data=file(self.privatekey_file).read()) def __init__(self, consolecollection): self.consolecollection = consolecollection self.getRSAKeys() # we then start the listen using TSFactory
gpl-3.0
4,608,994,057,645,392,400
32.188525
104
0.687824
false
3.841556
false
false
false
marmyshev/transitions
openlp/plugins/songs/lib/mediashoutimport.py
1
5385
# -*- coding: utf-8 -*- # vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4 ############################################################################### # OpenLP - Open Source Lyrics Projection # # --------------------------------------------------------------------------- # # Copyright (c) 2008-2013 Raoul Snyman # # Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan # # Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, # # Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. # # Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, # # Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, # # Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, # # Frode Woldsund, Martin Zibricky, Patrick Zimmermann # # --------------------------------------------------------------------------- # # This program is free software; you can redistribute it and/or modify it # # under the terms of the GNU General Public License as published by the Free # # Software Foundation; version 2 of the License. # # # # This program is distributed in the hope that it will be useful, but WITHOUT # # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # # more details. # # # # You should have received a copy of the GNU General Public License along # # with this program; if not, write to the Free Software Foundation, Inc., 59 # # Temple Place, Suite 330, Boston, MA 02111-1307 USA # ############################################################################### """ The :mod:`mediashoutimport` module provides the functionality for importing a MediaShout database into the OpenLP database. """ import pyodbc from openlp.core.lib import translate from openlp.plugins.songs.lib.songimport import SongImport VERSE_TAGS = [u'V', u'C', u'B', u'O', u'P', u'I', u'E'] class MediaShoutImport(SongImport): """ The :class:`MediaShoutImport` class provides the ability to import the MediaShout Access Database """ def __init__(self, manager, **kwargs): """ Initialise the MediaShout importer. """ SongImport.__init__(self, manager, **kwargs) def doImport(self): """ Receive a single file to import. """ try: conn = pyodbc.connect(u'DRIVER={Microsoft Access Driver (*.mdb)};' u'DBQ=%s;PWD=6NOZ4eHK7k' % self.importSource) except: # Unfortunately no specific exception type self.logError(self.importSource, translate('SongsPlugin.MediaShoutImport', 'Unable to open the MediaShout database.')) return cursor = conn.cursor() cursor.execute(u'SELECT Record, Title, Author, Copyright, ' u'SongID, CCLI, Notes FROM Songs ORDER BY Title') songs = cursor.fetchall() self.importWizard.progressBar.setMaximum(len(songs)) for song in songs: if self.stopImportFlag: break cursor.execute(u'SELECT Type, Number, Text FROM Verses ' u'WHERE Record = %s ORDER BY Type, Number' % song.Record) verses = cursor.fetchall() cursor.execute(u'SELECT Type, Number, POrder FROM PlayOrder ' u'WHERE Record = %s ORDER BY POrder' % song.Record) verse_order = cursor.fetchall() cursor.execute(u'SELECT Name FROM Themes INNER JOIN SongThemes ' u'ON SongThemes.ThemeId = Themes.ThemeId ' u'WHERE SongThemes.Record = %s' % song.Record) topics = cursor.fetchall() cursor.execute(u'SELECT Name FROM Groups INNER JOIN SongGroups ' u'ON SongGroups.GroupId = Groups.GroupId ' u'WHERE SongGroups.Record = %s' % song.Record) topics += cursor.fetchall() self.processSong(song, verses, verse_order, topics) def processSong(self, song, verses, verse_order, topics): """ Create the song, i.e. title, verse etc. """ self.setDefaults() self.title = song.Title self.parseAuthor(song.Author) self.addCopyright(song.Copyright) self.comments = song.Notes for topic in topics: self.topics.append(topic.Name) if u'-' in song.SongID: self.songBookName, self.songNumber = song.SongID.split(u'-', 1) else: self.songBookName = song.SongID for verse in verses: tag = VERSE_TAGS[verse.Type] + unicode(verse.Number) if verse.Type < len(VERSE_TAGS) else u'O' self.addVerse(verse.Text, tag) for order in verse_order: if order.Type < len(VERSE_TAGS): self.verseOrderList.append(VERSE_TAGS[order.Type] + unicode(order.Number)) self.finish()
gpl-2.0
-6,985,386,025,895,966,000
48.842593
106
0.552108
false
4.08732
false
false
false
rchaber/publishbay
bayforms.py
1
1690
from wtforms import fields from wtforms import Form from wtforms import validators from boilerplate.lib import utils from webapp2_extras.i18n import lazy_gettext as _ from webapp2_extras.i18n import ngettext, gettext from boilerplate import forms as forms from config import utils as bayutils FIELD_MAXLENGTH = 50 # intended to stop maliciously long input class EditProDetails(forms.BaseForm): display_full_name = fields.RadioField(_('Display Name'), choices=[('True', _('show your full name')), ('False', _(' - show your first name and last initial'))], coerce=unicode) title = fields.TextField(_('Title'), [validators.Length(max=FIELD_MAXLENGTH)]) profile_visibility = fields.RadioField(_('Profile Visibility'), choices=[ ('everyone', _('Anyone can see your profile whether or not they are logged into PublishBay.')), ('pb_users_only', _('Only PublishBay users who are logged in to PublishBay can see your profile.')), ('hidden', _('Clients can see your profile only if you have applied to their job.')) ]) english_level = fields.SelectField(_('English level'), choices=[1, 2, 3, 4, 5]) class EditContactInfo(forms.BaseForm): address1 = fields.TextField(_('Address 1'), [validators.Length(max=FIELD_MAXLENGTH)]) address2 = fields.TextField(_('Address 2'), [validators.Length(max=FIELD_MAXLENGTH)]) city = fields.TextField(_('City'), [validators.Length(max=FIELD_MAXLENGTH)]) state = fields.TextField(_('State'), [validators.Length(max=FIELD_MAXLENGTH)]) zipcode = fields.TextField(_('ZIP'), [validators.Length(max=FIELD_MAXLENGTH)]) phone = fields.TextField(_('Phone'), [validators.Length(max=FIELD_MAXLENGTH)])
lgpl-3.0
1,359,037,825,371,715,600
51.8125
180
0.714793
false
3.976471
false
false
false
coderbone/SickRage-alt
sickbeard/providers/morethantv.py
1
9942
# coding=utf-8 # Author: Dustyn Gibson <miigotu@gmail.com> # # URL: https://sickchill.github.io # # This file is part of SickChill. # # SickChill is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # SickChill is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with SickChill. If not, see <http://www.gnu.org/licenses/>. from __future__ import absolute_import, print_function, unicode_literals # Stdlib Imports import re # Third Party Imports from requests.compat import urljoin from requests.utils import dict_from_cookiejar # First Party Imports from sickbeard import logger, tvcache from sickbeard.bs4_parser import BS4Parser from sickbeard.show_name_helpers import allPossibleShowNames from sickchill.helper.common import convert_size, try_int from sickchill.helper.exceptions import AuthException from sickchill.providers.torrent.TorrentProvider import TorrentProvider class MoreThanTVProvider(TorrentProvider): def __init__(self): # Provider Init TorrentProvider.__init__(self, "MoreThanTV") # Credentials self.username = None self.password = None self._uid = None self._hash = None # Torrent Stats self.minseed = None self.minleech = None self.freeleech = None # URLs self.url = 'https://www.morethan.tv/' self.urls = { 'login': urljoin(self.url, 'login.php'), 'search': urljoin(self.url, 'torrents.php'), } # Proper Strings self.proper_strings = ['PROPER', 'REPACK'] # Cache self.cache = tvcache.TVCache(self) def _check_auth(self): if not self.username or not self.password: raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.") return True def login(self): if any(dict_from_cookiejar(self.session.cookies).values()): return True login_params = { 'username': self.username, 'password': self.password, 'keeplogged': '1', 'login': 'Log in', } response = self.get_url(self.urls['login'], post_data=login_params, returns='text') if not response: logger.log("Unable to connect to provider", logger.WARNING) return False if re.search('Your username or password was incorrect.', response): logger.log("Invalid username or password. Check your settings", logger.WARNING) return False return True def search(self, search_strings, age=0, ep_obj=None): results = [] if not self.login(): return results # Search Params search_params = { 'tags_type': 1, 'order_by': 'time', 'order_way': 'desc', 'action': 'basic', 'searchsubmit': 1, 'searchstr': '' } # Units units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] def process_column_header(td): result = '' if td.a and td.a.img: result = td.a.img.get('title', td.a.get_text(strip=True)) if not result: result = td.get_text(strip=True) return result for mode in search_strings: items = [] logger.log("Search Mode: {0}".format(mode), logger.DEBUG) for search_string in search_strings[mode]: if mode != 'RSS': logger.log("Search string: {0}".format (search_string.decode("utf-8")), logger.DEBUG) if mode == 'Season': searchedSeason = re.match('.*\s(Season\s\d+|S\d+)', search_string).group(1) search_params['searchstr'] = search_string data = self.get_url(self.urls['search'], params=search_params, returns='text') if not data: logger.log("No data returned from provider", logger.DEBUG) continue with BS4Parser(data, 'html5lib') as html: torrent_table = html.find('table', class_='torrent_table') torrent_rows = torrent_table('tr') if torrent_table else [] # Continue only if at least one Release is found if len(torrent_rows) < 2: logger.log("Data returned from provider does not contain any torrents", logger.DEBUG) continue labels = [process_column_header(label) for label in torrent_rows[0]('td')] # Skip column headers for result in torrent_rows[1:]: try: # skip if torrent has been nuked due to poor quality if result.find('img', alt='Nuked'): continue title = result.find('a', title='View torrent').get_text(strip=True) if mode == 'Season': # Skip if torrent isn't the right season, we can't search # for an exact season on MTV, it returns all of them if searchedSeason not in title: continue # If torrent is grouped, we need a folder name for title if 'Season' in title: torrentid = urljoin(self.url, result.find('span', title='Download').parent['href']) torrentid = re.match('.*?id=([0-9]+)', torrentid).group(1) group_params = { 'torrentid': torrentid } # Obtain folder name to use as title torrentInfo = self.get_url(self.urls['search'], params=group_params, returns='text').replace('\n', '') releaseregex = '.*files_{0}.*?;">/(.+?(?=/))'.format(re.escape(torrentid)) releasename = re.search(releaseregex, torrentInfo).group(1) title = releasename download_url = urljoin(self.url, result.find('span', title='Download').parent['href']) if not all([title, download_url]): continue cells = result('td') seeders = try_int(cells[labels.index('Seeders')].get_text(strip=True)) leechers = try_int(cells[labels.index('Leechers')].get_text(strip=True)) # Filter unseeded torrent if seeders < self.minseed or leechers < self.minleech: if mode != 'RSS': logger.log("Discarding torrent because it doesn't meet the" " minimum seeders or leechers: {0} (S:{1} L:{2})".format (title, seeders, leechers), logger.DEBUG) continue torrent_size = cells[labels.index('Size')].get_text(strip=True) size = convert_size(torrent_size, units=units) or -1 item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''} if mode != 'RSS': logger.log("Found result: {0} with {1} seeders and {2} leechers".format (title, seeders, leechers), logger.DEBUG) items.append(item) except StandardError: continue # For each search mode sort all the items by seeders if available items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True) results += items return results def get_season_search_strings(self, episode): search_string = { 'Season': [] } for show_name in allPossibleShowNames(episode.show, season=episode.scene_season): season_string = show_name + ' ' if episode.show.air_by_date or episode.show.sports: season_string += str(episode.airdate).split('-')[0] elif episode.show.anime: # use string below if you really want to search on season with number # season_string += 'Season ' + '{0:d}'.format(int(episode.scene_season)) season_string += 'Season' # ignore season number to get all seasons in all formats else: season_string += 'S{0:02d}'.format(int(episode.scene_season)) # MTV renames most season packs to just "Season ##" mtv_season_string = '{0} Season {1}'.format(show_name, int(episode.scene_season)) search_string['Season'].append(mtv_season_string.encode('utf-8').strip()) search_string['Season'].append(season_string.encode('utf-8').strip()) return [search_string] provider = MoreThanTVProvider()
gpl-3.0
-2,864,894,290,968,248,000
39.251012
141
0.520821
false
4.525262
false
false
false
chrysante87/pyterpol
synthetic/auxiliary.py
1
10363
import numpy as np import matplotlib.pyplot as plt from astropy.constants import c from scipy.interpolate import splrep from scipy.interpolate import splev from scipy.interpolate import bisplrep from scipy.interpolate import bisplev from scipy.interpolate import RectBivariateSpline from scipy.interpolate import InterpolatedUnivariateSpline from scipy.interpolate import spline from scipy.signal import fftconvolve ZERO_TOLERANCE = 1e-6 def flatten_2d(arr): """ Flattens 2-dim array :param arr: 2d array :return: """ newarr = [] if any([isinstance(subarr, (list, tuple)) for subarr in arr]): for subarr in arr: if isinstance(subarr, (tuple, list)): newarr.extend(subarr) else: newarr.append(subarr) return newarr else: return arr def instrumental_broadening(wave, flux, width=0.25, width_type='fwhm', interpolate_back=True): """ A convolution of a spectrum with a normal distribution. :param: wave: :param: flux: :param width: :param width_type: :return: """ # print "Computing instr. broadening." # If there is no broadening to apply, don't bother if width < ZERO_TOLERANCE: return flux # Convert user input width type to sigma (standard devation) width_type = width_type.lower() if width_type == 'fwhm': sigma = width / 2.3548 elif width_type == 'sigma': sigma = width else: raise ValueError(("Unrecognised width_type='{}' (must be one of 'fwhm'" "or 'sigma')").format(width_type)) # Make sure the wavelength range is equidistant before applying the # convolution delta_wave = np.diff(wave).min() range_wave = wave.ptp() n_wave = int(range_wave / delta_wave) + 1 wave_ = np.linspace(wave[0], wave[-1], n_wave) # flux_ = np.interp(wave_, wave, flux) flux_ = interpolate_spec(wave, flux, wave_) dwave = wave_[1] - wave_[0] n_kernel = int(2 * 4 * sigma / dwave) # The kernel might be of too low resolution, or the the wavelength range # might be too narrow. In both cases, raise an appropriate error if n_kernel == 0: raise ValueError(("Spectrum resolution too low for " "instrumental broadening (delta_wave={}, " "width={}").format(delta_wave, width)) elif n_kernel > n_wave: raise ValueError(("Spectrum range too narrow for " "instrumental broadening")) # Construct the broadening kernel wave_k = np.arange(n_kernel) * dwave wave_k -= wave_k[-1] / 2. kernel = np.exp(- (wave_k) ** 2 / (2 * sigma ** 2)) kernel /= sum(kernel) # Convolve the flux with the kernel flux_conv = fftconvolve(1 - flux_, kernel, mode='same') # And interpolate the results back on to the original wavelength array, # taking care of even vs. odd-length kernels if n_kernel % 2 == 1: offset = 0.0 else: offset = dwave / 2.0 if interpolate_back: flux = np.interp(wave + offset, wave_, 1 - flux_conv, left=1, right=1) # flux = interpolate_spec(wave_, 1-flux_conv, wave+offset) # Return the results. return flux def interpolate_block(x, block, xnew): """ Interpolates in each line of a 2d array. :param x: independent variable :type x: numpy.float64 :param block: 2d array for each column f(x)= block[i] :type block: numpy.float64 :param xnew: point at which it is interpolated :type xnew: float :return: """ intens = np.zeros(len(block[0])) n = len(block[:, 0]) # set up the order of interpolation if n > 4: k = 3 else: k = n - 1 # k=3 # TODO Can thius be done faster with bisplrep and bisplev # do the interpolation for i in range(0, len(block[0])): y = block[:, i] tck = splrep(x, y, k=k) intens[i] = splev(xnew, tck, der=0) return intens def interpolate_block_faster(x, block, xnew): """ Interpolation of teh spectra... hopefully faster? :param x: :param block: :param xnew: :return: """ # length of the datablock nx = len(block[0]) ny = len(x) # print x if (ny > 3) & (ny < 6): ky = 3 elif ny > 5: ky = 5 else: ky = ny - 1 # print ky f = RectBivariateSpline(x, np.arange(nx), block, kx=ky, ky=1) intens = f(xnew, np.arange(nx))[0] return intens def interpolate_spec(wave0, intens0, wave1): """ Defines a function intens0 = f(wave0) and than interpolates in it at wave1. :param wave0: initial wavelength array :type wave0: numpy.float64 :param intens0: initial intensity array :type intens0: numpy.float64 :param wave1: wavelength array at which we interpolate :type wave1: numpy.float64 :return intens1: final intensity array :rtype intens1: numpy.float64 """ tck = splrep(wave0, intens0, k=3) intens1 = splev(wave1, tck) return intens1 def is_within_interval(v, arr): """ Tests whether value v lies within interval [min(arr); max(arr)] :param v: tested values :type v: numpy.float64 :param arr: tested array :type v: numpy.float64 :return: :param: :type: bool """ # print v, max(arr), min(arr) if (v - max(arr) > ZERO_TOLERANCE) | (min(arr) - v > ZERO_TOLERANCE): return False else: return True def generate_least_number(l): """ Goes over integer in list and finds the smallest integer not in the list. :param l: the list :return: int the smallest integer """ num = 0 while num in l: num += 1 return num def keys_to_lowercase(d): """ Converts dictionary keys to lowercase :param d the converted dictionary :return: dnew """ dnew = {} for key in d.keys(): keynew = key.lower() dnew[keynew] = d[key] return dnew def parlist_to_list(l, property='value'): """ Converts a list of Parameter class to a regular list - only the property is returned :param l: :param prop: :return: """ ol = [] for par in l: ol.append(par[property]) return ol def sum_dict_keys(d): """ Sums dictionary key records. :param d: the dictionary :return: s the sum """ s = 0.0 for key in d.keys(): s += d[key] return s def read_text_file(f): """ Reads ascii file f. :param f: the file :type f: str :return lines: list of all lines within file f :rtype: list """ ifile = open(f, 'r') lines = ifile.readlines() ifile.close() return lines def renew_file(f): """ Deletes an existing file. :param f: :return: """ ofile = open(f, 'w') ofile.close() def rotate_spectrum(wave, intens, vrot, epsilon=0.6, interpolate_back=True): """ Rotates a spectrum represented by arrays wave and intes to the prjected rotational velocity vrot. :param wave: wavelength array :type wave: numpy.float64 :param intens: intensity array :type intens: numpy.float64 :param vrot: projected rotational velocity in km/s :type vrot: float :param epsilon: Coefficient of linear limb-darkening. :type epsilon: float :param interpolate_back: interpolate the spectrum back to the original wavelength sampling :type interpolate_back: bool :return intens: the rotated spectrum in the original wavelength sanmpling :rtype intens: numpy.float64 :return intens_conv: the rotated spectrum equidistant in rv :rtype intens_conv: numpy.float64 :return wave_conv: the wavelength array equidistant in rv :rtype wave_conv: numpy.float64 """ if vrot > ZERO_TOLERANCE: # we need it equidistant in RV wave_log = np.log(wave) rv = np.linspace(wave_log[0], wave_log[-1], len(wave)) step = rv[1] - rv[0] # interpolate intens_rv = interpolate_spec(wave_log, intens, rv) # scale rotational velocity with light speed vrot = 1000 * vrot / c.value # get the kernel # velocity vector n = int(np.ceil(2 * vrot / step)) rv_ker = np.arange(n) * step rv_ker = rv_ker - rv_ker[-1] / 2. y = 1 - (rv_ker / vrot) ** 2 # the kernel kernel = (2 * (1 - epsilon) * np.sqrt(y) + np.pi * epsilon / 2. * y) / (np.pi * vrot * (1 - epsilon / 3.0)) kernel = kernel / kernel.sum() # convolve the flux intens_conv = fftconvolve(1 - intens_rv, kernel, mode='same') if n % 2 == 1: rv = np.arange(len(intens_conv)) * step + rv[0] else: rv = np.arange(len(intens_conv)) * step + rv[0] - step / 2. wave_conv = np.exp(rv) # interpolate back if interpolate_back: intens = interpolate_spec(wave_conv, 1 - intens_conv, wave) return intens else: return 1 - intens_conv, wave_conv def shift_spectrum(wave, RV): """ Doppler-shifts spectrum. :param wave: original wavelength array :type wave: numpy.float64 :param RV: radial velocity in km/s :type RV: float :return new_wave: shifted wavelength array :rtype new_wave: numpy.float64 """ # shifts the wavelengths new_wave = wave * (1 + RV * 1000 / c.value) return new_wave def select_index_for_multiple_keywords(d, **kwargs): """ From a dictionary of lists selects one index meeting all requirements. :param kwargs: :return: """ keys = d.keys() length = len(d[keys[0]]) for i in range(0, length): for k in keys: if d[k] == kwargs[k] and k == keys[-1]: return i return -1 def string2bool(s): """ Converts string to boolean. :param s: :return: """ if s.lower() in ['true', '1']: return True else: return False def write_numpy(f, cols, fmt): """ An example of lack of brain of the main developer of this "code". :param f: outputfile or handler :param cols: block of data to be writte :param fmt: format of the blocs :return: None """ np.savetxt(f, cols, fmt=fmt)
gpl-2.0
3,257,326,317,646,513,000
24.09201
115
0.595098
false
3.464728
false
false
false
jessamynsmith/eggtimer-server
periods/tests/management/commands/test_email_active_users.py
1
2900
import datetime import pytz from django.test import TestCase from mock import patch from periods import models as period_models from periods.management.commands import email_active_users from periods.tests.factories import FlowEventFactory TIMEZONE = pytz.timezone("US/Eastern") class TestCommand(TestCase): def setUp(self): self.command = email_active_users.Command() flow_event = FlowEventFactory() self.user = flow_event.user FlowEventFactory(user=self.user, timestamp=TIMEZONE.localize(datetime.datetime(2014, 2, 28))) @patch('django.core.mail.EmailMultiAlternatives.send') def test_email_active_users_no_periods(self, mock_send): period_models.FlowEvent.objects.all().delete() self.command.handle() self.assertFalse(mock_send.called) @patch('django.core.mail.EmailMultiAlternatives.send') @patch('periods.models.today') def test_email_active_users_send_disabled(self, mock_today, mock_send): mock_today.return_value = TIMEZONE.localize(datetime.datetime(2014, 3, 14)) self.user.send_emails = False self.user.save() self.command.handle() self.assertFalse(mock_send.called) @patch('periods.email_sender.send') @patch('periods.models.today') def test_email_active_users(self, mock_today, mock_send): mock_today.return_value = TIMEZONE.localize(datetime.datetime(2014, 3, 15)) self.command.handle() email_text = ('Hello ,\n\nThis is an important notification about the data in your ' 'eggtimer account.\n\nUntil now, eggtimer has been storing all data in ' 'Eastern time. As you may already be aware,\nthis creates issues for users ' 'in other timezones. I am going to update the application so all\ndata is ' 'stored in UTC. This may affect your data!\n\nIf you are in Eastern time, ' 'your data will be migrated correctly, and you need do nothing.\n\nIf you ' 'have been using eggtimer from another timezone, you have two options:\n1) ' 'Before July 14, edit your user profile to select your timezone. When the ' 'data migration is\nperformed, I will use the timezone on your profile.\n2) ' 'Do nothing, and your data will be migrated ' 'as if it is in Eastern time. This will likely\nresult in a time shift when ' 'you view your events. If desired, you can then edit events yourself.\n\nI ' 'apologize for the inconvenience.\n\nSincerely,\n\n') mock_send.assert_called_once_with(self.user, 'Important information about the data in your ' 'eggtimer account', email_text, None)
mit
-7,983,820,550,675,869,000
45.774194
100
0.637586
false
4.148784
true
false
false
hmendozap/master-arbeit-projects
autosk_dev_test/utilities/test_two_GPUs_multiprocessing.py
1
3587
""" Test script that uses two GPUs, one per sub-process, via the Python multiprocessing module. Each GPU fits a logistic regression model. """ # These imports will not trigger any theano GPU binding from multiprocessing import Process, Manager import numpy as np import os def f(shared_args,private_args): """ Build and fit a logistic regression model. Adapted from http://deeplearning.net/software/theano/tutorial/examples.html#a-real-example-logistic-regression """ # Import sandbox.cuda to bind the specified GPU to this subprocess # then import the remaining theano and model modules. import theano.sandbox.cuda theano.sandbox.cuda.use(private_args['gpu']) import theano import theano.tensor as T from theano.tensor.shared_randomstreams import RandomStreams rng = np.random # Pull the size of the matrices from shared_args_dict = shared_args[0] N = shared_args_dict['N'] feats = shared_args_dict['n_features'] D = (rng.randn(N, feats), rng.randint(size=N,low=0, high=2)) training_steps = shared_args_dict['n_steps'] # Declare Theano symbolic variables x = T.matrix("x") y = T.vector("y") w = theano.shared(rng.randn(feats), name="w") b = theano.shared(0., name="b") print "Initial model:" print w.get_value(), b.get_value() # Construct Theano expression graph p_1 = 1 / (1 + T.exp(-T.dot(x, w) - b)) # Probability that target = 1 prediction = p_1 > 0.5 # The prediction thresholded xent = -y * T.log(p_1) - (1-y) * T.log(1-p_1) # Cross-entropy loss function cost = xent.mean() + 0.01 * (w ** 2).sum()# The cost to minimize gw,gb = T.grad(cost, [w, b]) # Compute the gradient of the cost # (we shall return to this in a # following section of this tutorial) # Compile. allow_input_downcast reassures the compiler that we are ok using # 64 bit floating point numbers on the cpu, gut only 32 bit floats on the gpu. train = theano.function( inputs=[x,y], outputs=[prediction, xent], updates=((w, w - 0.1 * gw), (b, b - 0.1 * gb)), allow_input_downcast=True) predict = theano.function(inputs=[x], outputs=prediction, allow_input_downcast=True) # Train for i in range(training_steps): pred, err = train(D[0], D[1]) print "Final model:" print w.get_value(), b.get_value() print "target values for D:", D[1] print "prediction on D:", predict(D[0]) if __name__ == '__main__': # Construct a dict to hold arguments that can be shared by both processes # The Manager class is a convenient to implement this # See: http://docs.python.org/2/library/multiprocessing.html#managers # # Important: managers store information in mutable *proxy* data structures # but any mutation of those proxy vars must be explicitly written back to the manager. manager = Manager() args = manager.list() args.append({}) shared_args = args[0] shared_args['N'] = 400 shared_args['n_features'] = 784 shared_args['n_steps'] = 10000 args[0] = shared_args # Construct the specific args for each of the two processes p_args = {} q_args = {} p_args['gpu'] = 'gpu0' q_args['gpu'] = 'gpu1' # Run both sub-processes p = Process(target=f, args=(args,p_args,)) q = Process(target=f, args=(args,q_args,)) p.start() q.start() p.join() q.join()
mit
6,711,862,250,160,399,000
34.88
101
0.618902
false
3.637931
false
false
false
CommonsDev/dataserver
projects/migrations/0014_auto__add_historicalproject.py
1
11787
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'HistoricalProject' db.create_table(u'projects_historicalproject', ( (u'id', self.gf('django.db.models.fields.IntegerField')(db_index=True, blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100)), ('slug', self.gf('autoslug.fields.AutoSlugField')(unique_with=(), max_length=50, populate_from=None)), ('baseline', self.gf('django.db.models.fields.CharField')(max_length=250, null=True, blank=True)), ('description', self.gf('django.db.models.fields.TextField')(blank=True)), ('location_id', self.gf('django.db.models.fields.IntegerField')(db_index=True, null=True, blank=True)), ('website', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)), ('begin_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)), ('end_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True)), ('progress_id', self.gf('django.db.models.fields.IntegerField')(db_index=True, null=True, blank=True)), ('created_on', self.gf('django.db.models.fields.DateTimeField')(blank=True)), (u'history_id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), (u'history_date', self.gf('django.db.models.fields.DateTimeField')()), (u'history_user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, on_delete=models.SET_NULL)), (u'history_type', self.gf('django.db.models.fields.CharField')(max_length=1)), )) db.send_create_signal(u'projects', ['HistoricalProject']) def backwards(self, orm): # Deleting model 'HistoricalProject' db.delete_table(u'projects_historicalproject') models = { u'accounts.profile': { 'Meta': {'object_name': 'Profile'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mugshot': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}), 'privacy': ('django.db.models.fields.CharField', [], {'default': "'registered'", 'max_length': '15'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"}) }, u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'projects.historicalproject': { 'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalProject'}, 'baseline': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}), 'begin_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'created_on': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), u'history_date': ('django.db.models.fields.DateTimeField', [], {}), u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}), u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL'}), u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}), 'location_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'progress_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': 'None'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}) }, u'projects.project': { 'Meta': {'object_name': 'Project'}, 'baseline': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}), 'begin_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['scout.Place']", 'null': 'True', 'blank': 'True'}), 'progress': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.ProjectProgress']", 'null': 'True', 'blank': 'True'}), 'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}) }, u'projects.projectprogress': { 'Meta': {'ordering': "['order']", 'object_name': 'ProjectProgress'}, 'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}), 'icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '30'}), 'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'progress_range': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.ProjectProgressRange']"}) }, u'projects.projectprogressrange': { 'Meta': {'object_name': 'ProjectProgressRange'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': "'name'", 'unique_with': '()'}) }, u'projects.projectteam': { 'Meta': {'object_name': 'ProjectTeam'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['accounts.Profile']", 'symmetrical': 'False'}), 'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.Project']"}) }, u'scout.place': { 'Meta': {'object_name': 'Place'}, 'address': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'place'", 'to': u"orm['scout.PostalAddress']"}), 'geo': ('django.contrib.gis.db.models.fields.PointField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, u'scout.postaladdress': { 'Meta': {'object_name': 'PostalAddress'}, 'address_locality': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'address_region': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'country': ('django.db.models.fields.CharField', [], {'max_length': '2'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'post_office_box_number': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}), 'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'street_address': ('django.db.models.fields.TextField', [], {'blank': 'True'}) } } complete_apps = ['projects']
agpl-3.0
-7,540,497,227,849,037,000
76.552632
195
0.562823
false
3.603485
false
false
false
zhmz90/CS231N
assign/assignment1/cs231n/classifiers/softmax.py
1
2626
import numpy as np from random import shuffle def softmax_loss_naive(W, X, y, reg): """ Softmax loss function, naive implementation (with loops) Inputs have dimension D, there are C classes, and we operate on minibatches of N examples. Inputs: - W: A numpy array of shape (D, C) containing weights. - X: A numpy array of shape (N, D) containing a minibatch of data. - y: A numpy array of shape (N,) containing training labels; y[i] = c means that X[i] has label c, where 0 <= c < C. - reg: (float) regularization strength Returns a tuple of: - loss as single float - gradient with respect to weights W; an array of same shape as W """ # Initialize the loss and gradient to zero. loss = 0.0 dW = np.zeros_like(W) ############################################################################# # TODO: Compute the softmax loss and its gradient using explicit loops. # # Store the loss in loss and the gradient in dW. If you are not careful # # here, it is easy to run into numeric instability. Don't forget the # # regularization! # ############################################################################# num_train = X.shape[0] for i in xrange(num_train): z = X[i].dot(W) z_exp = np.exp(z) scores = z_exp / np.sum(z_exp) ############################################################################# # END OF YOUR CODE # ############################################################################# return loss, dW def softmax_loss_vectorized(W, X, y, reg): """ Softmax loss function, vectorized version. Inputs and outputs are the same as softmax_loss_naive. """ # Initialize the loss and gradient to zero. loss = 0.0 dW = np.zeros_like(W) ############################################################################# # TODO: Compute the softmax loss and its gradient using no explicit loops. # # Store the loss in loss and the gradient in dW. If you are not careful # # here, it is easy to run into numeric instability. Don't forget the # # regularization! # ############################################################################# pass ############################################################################# # END OF YOUR CODE # ############################################################################# return loss, dW
mit
6,263,552,307,527,689,000
37.617647
79
0.441356
false
4.926829
false
false
false
sxhexe/reaction-route-search
reactionroute_web/reaction/reaction/urls.py
1
1111
"""reaction URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.11/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url, include from django.contrib import admin from django.views.generic import RedirectView from django.conf import settings from django.conf.urls.static import static urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^search/', include('search.urls')), url(r'^demo/', include('demo.urls')), url(r'^$', RedirectView.as_view(url='/search/demo/')), ] # + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
mit
-4,855,968,317,904,313,000
40.148148
79
0.706571
false
3.538217
false
false
false
Nentix/xentriq.docs
docs/conf.py
1
4649
# -*- coding: utf-8 -*- # # Xentriq documentation build configuration file, created by # sphinx-quickstart on Wed Apr 26 16:44:14 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Xentriq' copyright = u'2017 - 2018 nentix.com' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = u'-' # The full version, including alpha/beta/rc tags. release = u'-' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'Xentriqdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'Xentriq.tex', u'Xentriq Documentation', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'xentriq', u'Xentriq Documentation', [], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'Xentriq', u'Xentriq Documentation', 'Xentriq', 'One line description of project.', 'Miscellaneous'), ]
mit
6,299,663,179,183,129,000
29.188312
79
0.672188
false
3.880634
true
false
false
warrickball/figures
hmi_rot2d_coaster.py
1
2261
#!/usr/bin/env python import numpy as np from matplotlib import pyplot as pl from argparse import ArgumentParser parser = ArgumentParser() parser.add_argument('--figsize', type=float, nargs=2, help="figure size, passed to rcParams['figure.figsize']") parser.add_argument('--levels', type=int, default=20, help="number of levels passed to contourf (default 100)") parser.add_argument('--padding', type=float, default=0.01, help="fractional padding between edge and circle (default=0.01)") args = parser.parse_args() if args.figsize: pl.rcParams['figure.figsize'] = args.figsize # data from SDO/HMI webpage # http://jsoc.stanford.edu/HMI/Global_products.html try: rot2d = np.load('data/hmi_rot2d.npy') err2d = np.load('data/hmi_err2d.npy') rmesh = np.load('data/hmi_rmesh.npy') except IOError: try: from urllib2 import urlopen except ImportError: from urllib.request import urlopen response = urlopen('http://jsoc.stanford.edu/SUM86/D917240671/S00000/rot.2d') rot2d = np.loadtxt(response.readlines()) response.close() np.save('data/hmi_rot2d.npy', rot2d) response = urlopen('http://jsoc.stanford.edu/SUM86/D917240671/S00000/err.2d') err2d = np.loadtxt(response.readlines()) response.close() np.save('data/hmi_err2d.npy', err2d) response = urlopen('http://jsoc.stanford.edu/SUM86/D917240671/S00000/rmesh.orig') rmesh = np.loadtxt(response.readlines())[::4] response.close() np.save('data/hmi_rmesh.npy', rmesh) # rot2d has 49 columns, latitudes are 90-i*15/8; i starts at 0 lat = np.array([15./8.*i for i in np.arange(49)])/180.*np.pi r, th = np.meshgrid(rmesh, lat) ax = pl.subplot(111, projection='polar') b = args.padding pl.subplots_adjust(top=1-b, bottom=b, left=b, right=1-b) data = rot2d.T[::-1] data[err2d.T[::-1]/data>0.01] = np.nan ax.contourf(th, r, data, args.levels) ax.contourf(np.pi-th, r, data, args.levels) ax.contourf(-th, r, data, args.levels) ax.contourf(th-np.pi, r, data, args.levels) # plot base of convection zone th = np.linspace(0., 2.*np.pi, 401) r = np.ones(len(th))*0.713 ax.plot(th, r, 'k--') ax.set_xticklabels([]) ax.set_yticklabels([]) ax.grid(False) pl.show()
gpl-3.0
8,172,687,639,470,969,000
30.84507
85
0.670057
false
2.851198
false
false
false
foursquare/fsqio
scripts/fsqio/python3-port-utils/pants/remove_builtins.py
1
3393
#!/usr/bin/env python3 import argparse import subprocess from pathlib import Path from textwrap import dedent from typing import List, Sequence, Set def main() -> None: folders = create_parser().parse_args().folders for fp in get_files_with_import(folders): remove_builtins(file_path=fp) if safe_to_remove_future_from_build(file_path=fp): target_name = determine_pants_target_name(file_path=fp) update_build_dependencies(file_path=fp, pants_target_name=target_name) def create_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser( description='Remove `from builtins import x`, and possibly the BUILD entry for `future`.') parser.add_argument('folders', nargs='*') return parser def get_files_with_import(folders: Sequence[str]) -> Set[Path]: return { fp for folder in folders for fp in Path(folder).rglob("*.py") if not fp.name.endswith("__init__.py") and "from builtins import" in fp.read_text() } def determine_pants_target_name(file_path: Path) -> str: file_map = subprocess.run([ './pants', 'filemap', f'{file_path.parent}:' ], stdout=subprocess.PIPE, encoding="utf-8").stdout.strip().split('\n') target_entry = next((line for line in file_map if file_path.name in line), None) if target_entry is None: raise SystemExit(dedent(f"""\n ERROR: File '{file_path}' invalid. Not found anywhere in {file_path.parent}/BUILD.""")) pants_target_path = target_entry.split(' ')[1] pants_target_name = pants_target_path.split(':')[1] return pants_target_name def remove_builtins(*, file_path: Path) -> None: lines = file_path.read_text().splitlines() builtins_line_index = next( (i for i, line in enumerate(lines) if "from builtins" in line), None ) if builtins_line_index: lines.pop(builtins_line_index) file_path.write_text("\n".join(lines) + "\n") def safe_to_remove_future_from_build(*, file_path: Path) -> bool: lines = file_path.read_text().splitlines() return all( "from future.utils" not in line and "from future.moves" not in line for line in lines ) def _find_target_index_in_build( *, build_lines: List[str], pants_target_name: str, file_name: str ) -> int: index = next((i for i, line in enumerate(build_lines) if f"name = '{pants_target_name}'" in line or f"name='{pants_target_name}'" in line), None) if index is None: # mono-target index = next((i for i, line in enumerate(build_lines) if file_name in line), None) if index is None: # only one target block in file, and sources aren't specified index = next(i for i, line in enumerate(build_lines) if 'python_' in line and '(' in line) return index def update_build_dependencies(*, file_path: Path, pants_target_name: str) -> None: build_file: Path = file_path.parent / "BUILD" lines = build_file.read_text().splitlines() target_index = _find_target_index_in_build( build_lines=lines, pants_target_name=pants_target_name, file_name=file_path.name ) future_line_index = next( (i for i, line in enumerate(lines[target_index:]) if '3rdparty/python:future' in line), None ) if future_line_index: lines.pop(future_line_index + target_index) build_file.write_text("\n".join(lines) + "\n") if __name__ == '__main__': try: main() except KeyboardInterrupt: pass
apache-2.0
-7,298,757,472,970,914,000
31.941748
96
0.666667
false
3.362735
false
false
false
useblocks/groundwork
groundwork/plugins/gw_recipes_builder.py
1
3282
# -*- coding: utf-8 -*- import os from click import Argument from groundwork.patterns import GwCommandsPattern, GwRecipesPattern class GwRecipesBuilder(GwCommandsPattern, GwRecipesPattern): """ Provides commands for listing and building recipes via command line interface. Provided commands: * recipe_list * recipe_build Provides also the recipe **gw_package**, which can be used to setup a groundwork related python package. Content of the package: * setup.py: Preconfigured and ready to use. * groundwork package structure: Directories for applications, patterns, plugins and recipes. * Simple, runnable example of a groundwork application and plugins. * usable test, supported by py.test and tox. * expandable documentation, supported by sphinx and the groundwork sphinx template. * .gitignore This code is hardly based on Cookiecutter's main.py file: https://github.com/audreyr/cookiecutter/blob/master/cookiecutter/main.py """ def __init__(self, *args, **kwargs): self.name = kwargs.get("name", self.__class__.__name__) super(GwRecipesBuilder, self).__init__(*args, **kwargs) def activate(self): self.commands.register("recipe_list", "Lists all recipes", self._recipe_list) self.commands.register("recipe_build", "Builds a given recipe", self._recipe_build, params=[Argument(("recipe",), required=True)]) self.recipes.register("gw_package", os.path.abspath(os.path.join(os.path.dirname(__file__), "../recipes/gw_package")), description="Groundwork basic package. Includes places for " "apps, plugins, patterns and recipes.", final_words="Recipe Installation is done.\n\n" "During development use buildout:\n" "Run: python bootstrap.py\n" "Then: bin/buildout\n" "Start the app: bin/app\n\n" "For installation run: 'python setup.py install' \n" "For documentation run: 'make html' inside doc folder " "(after installation!)\n\n" "For more information, please take a look into the README file " "to know how to go on.\n" "For help visit: https://groundwork.readthedocs.io\n\n" "Have fun with your groundwork package.") def deactivate(self): pass def _recipe_list(self): print("Recipes:") for key, recipe in self.app.recipes.get().items(): print(" %s by plugin '%s' - %s" % (recipe.name, recipe.plugin.name, recipe.description)) def _recipe_build(self, recipe): recipe_obj = self.app.recipes.get(recipe) if recipe_obj is None: print("Recipe %s not found." % recipe) else: recipe_obj.build(no_input=False, extra_context=None)
mit
-8,096,438,655,980,942,000
45.885714
112
0.555454
false
4.564673
false
false
false
theY4Kman/neoalchemy
neoalchemy/util/langhelpers.py
1
2116
from . import compat class _symbol(int): def __new__(self, name, doc=None, canonical=None): """Construct a new named symbol.""" assert isinstance(name, compat.string_types) if canonical is None: canonical = hash(name) v = int.__new__(_symbol, canonical) v.name = name if doc: v.__doc__ = doc return v def __reduce__(self): return symbol, (self.name, "x", int(self)) def __str__(self): return repr(self) def __repr__(self): return "symbol(%r)" % self.name _symbol.__name__ = 'symbol' class symbol(object): """A constant symbol. >>> symbol('foo') is symbol('foo') True >>> symbol('foo') <symbol 'foo> A slight refinement of the MAGICCOOKIE=object() pattern. The primary advantage of symbol() is its repr(). They are also singletons. Repeated calls of symbol('name') will all return the same instance. The optional ``doc`` argument assigns to ``__doc__``. This is strictly so that Sphinx autoattr picks up the docstring we want (it doesn't appear to pick up the in-module docstring if the datamember is in a different module - autoattribute also blows up completely). If Sphinx fixes/improves this then we would no longer need ``doc`` here. """ symbols = {} _lock = compat.threading.Lock() def __new__(cls, name, doc=None, canonical=None): cls._lock.acquire() try: sym = cls.symbols.get(name) if sym is None: cls.symbols[name] = sym = _symbol(name, doc, canonical) return sym finally: symbol._lock.release() _creation_order = 1 def set_creation_order(instance): """Assign a '_creation_order' sequence to the given instance. This allows multiple instances to be sorted in order of creation (typically within a single thread; the counter is not particularly threadsafe). """ global _creation_order instance._creation_order = _creation_order _creation_order += 1 NoneType = type(None)
mit
479,101,455,911,698,600
25.78481
75
0.606333
false
4.045889
false
false
false
gaetano-guerriero/eyeD3-debian
src/eyed3/__init__.py
1
1368
# -*- coding: utf-8 -*- import sys import locale from .__about__ import __version__ as version _DEFAULT_ENCODING = "latin1" LOCAL_ENCODING = locale.getpreferredencoding(do_setlocale=True) """The local encoding, used when parsing command line options, console output, etc. The default is always ``latin1`` if it cannot be determined, it is NOT the value shown.""" if not LOCAL_ENCODING or LOCAL_ENCODING == "ANSI_X3.4-1968": # pragma: no cover LOCAL_ENCODING = _DEFAULT_ENCODING LOCAL_FS_ENCODING = sys.getfilesystemencoding() """The local file system encoding, the default is ``latin1`` if it cannot be determined.""" if not LOCAL_FS_ENCODING: # pragma: no cover LOCAL_FS_ENCODING = _DEFAULT_ENCODING class Error(Exception): """Base exception type for all eyed3 errors.""" def __init__(self, *args): super(Error, self).__init__(*args) if args: # The base class will do exactly this if len(args) == 1, # but not when > 1. Note, the 2.7 base class will, 3 will not. # Make it so. self.message = args[0] from .utils.log import log # noqa: E402 from .core import load # noqa: E402 del sys del locale __all__ = ["log", "load", "version", "LOCAL_ENCODING", "LOCAL_FS_ENCODING", "Error"]
gpl-3.0
-6,633,266,518,445,879,000
34.076923
80
0.614766
false
3.758242
false
false
false
virantha/photokeeper
photokeeper/flickr.py
1
10779
# -*- coding: utf-8 -*- # Copyright 2016 Virantha Ekanayake All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys, os import logging import yaml, pprint import flickrapi import urllib.request from xml.etree import ElementTree from tqdm import tqdm import itertools, dateparser, time from photokeeper.target import TargetBase class FileWithCallback(object): def __init__(self, filename): self.file = open(filename, 'rb') # the following attributes and methods are required self.len = os.path.getsize(filename) self.fileno = self.file.fileno self.tell = self.file.tell self.tqdm = tqdm(total=self.len, ncols=60,unit_scale=True, unit='B') def read(self, size): self.tqdm.update(size) return self.file.read(size) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.tqdm.close() class FlickrMedia(object): def __init__(self, json_dict): self.json_dict = json_dict self.title = json_dict['title'] self.photoid = json_dict['id'] dt = json_dict['datetaken'] self.datetime_taken = dateparser.parse(dt, date_formats=['%Y-%m-%d %H:%M:%S']) class PhotoSet(object): def __init__(self, json_dict): self.json_dict = json_dict self.title = json_dict['title']['_content'] self.setid = json_dict['id'] self.photos = None class Photo(object): def __init__(self, photo_element): """Construct a photo object out of the XML response from Flickr""" attrs = { 'farm': 'farmid', 'server':'serverid','id':'photoid','secret':'secret'} for flickr_attr, py_attr in attrs.items(): setattr(self, py_attr, photo_element.get(flickr_attr)) def _construct_flickr_url(self): url = "http://farm%s.staticflickr.com/%s/%s_%s_b.jpg" % (self.farmid,self.serverid, self.photoid, self.secret) return url def download_photo(self, dirname, cache=False, tgt_filename=None): if not os.path.exists(dirname): os.makedirs(dirname) tgt = os.path.join(dirname, "%s.jpg" % self.photoid) if cache: if os.path.isfile(tgt): return tgt urllib.request.urlretrieve(self._construct_flickr_url(), tgt) return tgt class Flickr(TargetBase): def __init__(self): self.set_keys(*self.read_keys()) self.get_auth2() # Might as well get all the photosets at this point as we'll need them self.photosets = self._get_photosets() def read_keys(self): """ Read the flickr API key and secret from a local file """ with open("flickr_api.yaml") as f: api = yaml.load(f) return (api["key"], api["secret"]) def set_keys(self, key, secret): self.api_key = key self.api_secret = secret def get_auth2(self): print("Authenticating to Flickr") self.flickr = flickrapi.FlickrAPI(self.api_key, self.api_secret) self.flickr.authenticate_via_browser(perms='write') print("Authentication succeeded") return def get_tagged(self, tags, count, download_dir="photos"): """ Get photos with the given list of tags """ print ("connecting to flickr, and getting %d photos with tags %s" % (count, tags)) x = self.flickr.photos_search(api_key = self.api_key, user_id="me", tags=','.join(tags), per_page=count) photos = self._extract_photos_from_xml(x) photo_filenames = self._sync_photos(photos, download_dir) print("Found %d photos" % len(photos)) return photo_filenames def _sync_photos(self, photos, download_dir="photos", clean_up=False): """ Connect to flickr, and for each photo in the list, download. Then, if delete photos that are present locally that weren't present in the list of photos. :returns: List of filenames downloaded """ photo_filenames = [] photo_count = len(photos) for i,photo in enumerate(photos): print("[%d/%d] Downloading %s from flickr" % (i,photo_count,photo.photoid)) filename = photo.download_photo(download_dir, cache=True) photo_filenames.append(filename) # Now, go through and clean up directory if required if clean_up: photo_file_list = ["%s.jpg" % (x.photoid) for x in photos] for fn in os.listdir(download_dir): full_fn = os.path.join(download_dir, fn) if os.path.isfile(full_fn): if not fn in photo_file_list: print ("Flickr sync: Deleting file %s" % fn) os.remove(full_fn) return photo_filenames def _extract_photos_from_xml(self, xml): photos = [] for i in xml.iter(): if i.tag == 'rsp': # the response header. stat member should be 'ok' if i.get('stat') == 'ok': continue else: # error, so just break break if i.tag == 'photo': photos.append(Photo(i)) return photos def get_recent(self,count, download_dir="photos"): """ get the most recent photos """ print ("connecting to flickr, and getting most recent %d photos" % count) x = self.flickr.people_getphotos(api_key = self.api_key, user_id="me",per_page=count) #x = self.flickr.photos_search(api_key=self.api_key,"me") photos = self._extract_photos_from_xml(x) photo_filenames = self._sync_photos(photos, download_dir) return photo_filenames def _get_photosets(self): print("Getting photosets from Flickr") resp = self.flickr.photosets.getList(format='parsed-json') photosets = {} for photoset in resp['photosets']['photoset']: p = PhotoSet(photoset) photosets[p.title] = p #TODO: Possible issue here because multiple photosets could have same title. Oh well return photosets def _get_photos_in_album(self, album_name, cached=False): photoset = self.photosets[album_name] albumid = photoset.setid if not photoset.photos or not cached: resp = self.flickr.photosets.getPhotos(photoset_id=albumid, extras='date_taken', format='parsed-json') photos = {} for p in resp['photoset']['photo']: myphoto = FlickrMedia(p) photos[myphoto.title] = myphoto photoset.photos = photos return photoset.photos def _upload_file(self, filename): with FileWithCallback(filename) as f: resp = self.flickr.upload(filename=filename, fileobj=f, is_public=0) photoid = resp.find('photoid').text return photoid def _create_new_album(self, album_name, first_photo_filename): # First, we need to upload a dummy photo photoid = self._upload_file(first_photo_filename) resp = self.flickr.photosets.create(title=album_name, primary_photo_id=photoid, format='parsed-json') albumid = resp['photoset']['id'] resp = self.flickr.photosets.getInfo(photoset_id=albumid, format='parsed-json') return (photoid, resp['photoset']) def _add_photo_to_album(self, photoid, albumid): #tqdm.write("Adding {} to {} ".format(photoid, albumid)) self.flickr.photosets.addPhoto(photoset_id=albumid, photo_id=photoid) def _is_duplicate(self, image): album_name = image.tgtdatedir if not album_name in self.photosets: return False else: photos = self._get_photos_in_album(album_name, cached=True) image_title = os.path.basename(image.filename) if not image_title in photos: # If photo with same title is not found, then no duplicates return False else: # Same title, but let's check the date too, to be sure #tqdm.write('{} has local date {}, and flickr date {}'.format(image_title, image.datetime_taken, photos[image_title].datetime_taken)) if photos[image_title].datetime_taken != image.datetime_taken: return False else: return True def check_duplicates(self, images): print("Checking for duplicates in Flickr") images_1, images_2 = itertools.tee(images) for total,img in enumerate(images_1): if self._is_duplicate(img): img.flickr_dup = True n_dups = [i for i in images_2 if i.flickr_dup] print('Found {} duplicates out of {} images'.format(len(n_dups), total+1)) def execute_copy(self, images): for img in images: if img.flickr_dup: continue album_name = img.tgtdatedir if album_name not in self.photosets: # Need to create album tqdm.write('Creating new album %s' % album_name) photoid, album_dict = self._create_new_album(album_name, img.srcpath) p = PhotoSet(album_dict) self.photosets[p.title] = p else: photoid = self._upload_file(img.srcpath) self._add_photo_to_album(photoid, self.photosets[album_name].setid) tqdm.write("Adding {} to {} ".format(img.filename, album_name)) # Now, make sure we set the date-taken manually if no exif information if img.exif_timestamp_missing: dt = img.datetime_taken.strftime('%Y-%m-%d %H:%M:%S') tqdm.write('Manually setting date on video {} to {}'.format(img.filename, dt)) self.flickr.photos.setDates(photo_id=photoid, date_taken=dt) def main(): #logging.basicConfig(level=logging.DEBUG, format='%(message)s') script = Flickr() #script.get_recent(10) #script.upload('test.jpg') script.flickr.photos.setDates(photoid='30735623495', date_taken='2016-06-24 10:12:02') if __name__ == '__main__': main()
apache-2.0
7,724,130,514,751,252,000
35.788396
149
0.598386
false
3.658859
false
false
false
oaubert/advene
lib/advene/gui/plugins/packageimporter.py
1
8731
# # Advene: Annotate Digital Videos, Exchange on the NEt # Copyright (C) 2008-2017 Olivier Aubert <contact@olivieraubert.net> # # Advene is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # Advene is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Advene; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # """GUI to import packages. It focuses on importing whole sets of elements: either views, resources, or whole annotations (with their types) without overwriting/merging with existing elements. A common usage scenario is to be able to compare annotations for the same document but edited by 2 persons using the same schema, by importing annotations from User2, suffixing his annotation types with his name. """ import logging logger = logging.getLogger(__name__) from gettext import gettext as _ from gi.repository import Gtk from gi.repository import GObject from gi.repository import Pango from advene.gui.util import dialog from advene.gui.views import AdhocView from advene.util.merger import Differ name="Package importer view plugin" def register(controller): controller.register_viewclass(PackageImporter) class TreeViewImporter: COLUMN_ELEMENT=0 COLUMN_APPLY=1 COLUMN_ELEMENT_NAME=2 def __init__(self, controller=None, sourcepackage=None, destpackage=None): self.controller = controller self.package = sourcepackage self.destpackage = destpackage self.store = self.build_liststore() self.widget = self.build_widget() def build_liststore(self): # Store reference to the element, string representation (title and id) # and boolean indicating wether it is imported or not store = Gtk.ListStore( GObject.TYPE_PYOBJECT, GObject.TYPE_BOOLEAN, GObject.TYPE_STRING, ) for at in self.package.annotationTypes: store.append(row=[ at, True, "%s (%d)" % (self.controller.get_title(at), len(at.annotations)) ]) return store def toggle_selection(self): """Toggle all elements from the current selection. """ def toggle_row(model, path, it, data=None): model.set_value(it, self.COLUMN_APPLY, not model.get_value(it, self.COLUMN_APPLY)) self.widget.get_selection().selected_foreach(toggle_row) return True def build_widget(self): treeview = Gtk.TreeView(model=self.store) treeview.get_selection().set_mode(Gtk.SelectionMode.MULTIPLE) treeview.set_headers_clickable(True) treeview.set_enable_search(False) renderer = Gtk.CellRendererToggle() renderer.set_property('activatable', True) column = Gtk.TreeViewColumn(_('Import?'), renderer, active=self.COLUMN_APPLY) column.set_sort_column_id(self.COLUMN_APPLY) def toggled_cb(renderer, path, model, column): model[path][column] = not model[path][column] return True renderer.connect('toggled', toggled_cb, self.store, self.COLUMN_APPLY) treeview.append_column(column) renderer = Gtk.CellRendererText() column = Gtk.TreeViewColumn(_('Element'), renderer, text=self.COLUMN_ELEMENT_NAME) column.set_resizable(True) column.set_max_width(300) column.set_sort_column_id(self.COLUMN_ELEMENT_NAME) treeview.append_column(column) return treeview class PackageImporter(AdhocView): view_name = _("Package importer view") view_id = 'packageimporter' tooltip=_("Display package import interface") def __init__(self, controller=None, parameters=None, sourcepackage=None, destpackage=None): super().__init__(controller=controller) self.close_on_package_load = True self.contextual_actions = () self.controller=controller opt, arg = self.load_parameters(parameters) self.sourcepackage=sourcepackage self.destpackage=destpackage self.widget=self.build_widget() def build_widget(self): self.mergerview = TreeViewImporter(controller=self.controller, sourcepackage=self.sourcepackage, destpackage=self.destpackage) vbox=Gtk.VBox() label = Gtk.Label(_("Import annotations from %(source)s into %(dest)s") % {'source': self.sourcepackage.uri, 'dest': self.destpackage.uri}) label.set_ellipsize(Pango.EllipsizeMode.MIDDLE) vbox.pack_start(label, False, False, 0) hbox = Gtk.HBox() self.suffix_entry = Gtk.Entry() self.suffix_entry.set_text("IMPORTED") hbox.pack_start(Gtk.Label(_("Suffix to append to created types")), False, False, 0) hbox.pack_start(self.suffix_entry, True, True, 0) vbox.pack_start(hbox, False, False, 0) scroll_win = Gtk.ScrolledWindow () scroll_win.set_policy (Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC) vbox.add(scroll_win) scroll_win.add(self.mergerview.widget) self.buttonbox = Gtk.HButtonBox() def validate(b): m = self.mergerview.store suffix = self.suffix_entry.get_text().strip() if not suffix: dialog.message_dialog(_("The suffix cannot be empty."), icon=Gtk.MessageType.ERROR) return True annotation_count = 0 type_count = 0 # Let's use differ methods to copy elements differ = Differ(source=self.sourcepackage, destination=self.destpackage, controller=self.controller) batch_id=object() for l in m: if l[self.mergerview.COLUMN_APPLY]: source_at = l[self.mergerview.COLUMN_ELEMENT] logger.debug("Copying %s (%d annotations)", source_at.title, len(source_at.annotations)) type_count += 1 dest_at = differ.copy_annotation_type(source_at, generate_id=True) dest_at.title = "%s %s" % (dest_at.title, suffix) self.controller.notify('AnnotationTypeCreate', annotationtype=dest_at, immediate=True, batch=batch_id) for a in source_at.annotations: annotation_count += 1 # Since we copied the annotation type before, copy_annotation should use the translated name new_a = differ.copy_annotation(a, generate_id=True) self.controller.notify('AnnotationCreate', annotation=new_a, immediate=True, batch=batch_id) logger.info(_("Copied %(count)d annotations from %(tcount)d types") % { "count": annotation_count, "tcount": type_count }) self.close() return True def select_all(b): model=self.mergerview.store for l in model: l[self.mergerview.COLUMN_APPLY] = True return True def unselect_all(b): model=self.mergerview.store for l in model: l[self.mergerview.COLUMN_APPLY] = False return True def toggle_selection(b): self.mergerview.toggle_selection() return True b = Gtk.Button(_("All")) b.set_tooltip_text(_("Check all items")) b.connect('clicked', select_all) self.buttonbox.add (b) b = Gtk.Button(_('None')) b.set_tooltip_text(_("Uncheck all items")) b.connect('clicked', unselect_all) self.buttonbox.add (b) b = Gtk.Button(_('Selection')) b.set_tooltip_text(_("Toggle checked state on selected lines")) b.connect('clicked', toggle_selection) self.buttonbox.add (b) b = Gtk.Button(stock=Gtk.STOCK_OK) b.connect('clicked', validate) self.buttonbox.add (b) b = Gtk.Button(stock=Gtk.STOCK_CANCEL) b.connect('clicked', lambda b: self.close()) self.buttonbox.add (b) vbox.pack_start(self.buttonbox, False, True, 0) return vbox
gpl-2.0
-7,850,985,333,841,483,000
37.632743
134
0.624327
false
4.057156
false
false
false
pika/pika
pika/frame.py
1
7744
"""Frame objects that do the frame demarshaling and marshaling.""" import logging import struct from pika import amqp_object from pika import exceptions from pika import spec from pika.compat import byte LOGGER = logging.getLogger(__name__) class Frame(amqp_object.AMQPObject): """Base Frame object mapping. Defines a behavior for all child classes for assignment of core attributes and implementation of the a core _marshal method which child classes use to create the binary AMQP frame. """ NAME = 'Frame' def __init__(self, frame_type, channel_number): """Create a new instance of a frame :param int frame_type: The frame type :param int channel_number: The channel number for the frame """ self.frame_type = frame_type self.channel_number = channel_number def _marshal(self, pieces): """Create the full AMQP wire protocol frame data representation :rtype: bytes """ payload = b''.join(pieces) return struct.pack('>BHI', self.frame_type, self.channel_number, len(payload)) + payload + byte(spec.FRAME_END) def marshal(self): """To be ended by child classes :raises NotImplementedError """ raise NotImplementedError class Method(Frame): """Base Method frame object mapping. AMQP method frames are mapped on top of this class for creating or accessing their data and attributes. """ NAME = 'METHOD' def __init__(self, channel_number, method): """Create a new instance of a frame :param int channel_number: The frame type :param pika.Spec.Class.Method method: The AMQP Class.Method """ Frame.__init__(self, spec.FRAME_METHOD, channel_number) self.method = method def marshal(self): """Return the AMQP binary encoded value of the frame :rtype: str """ pieces = self.method.encode() pieces.insert(0, struct.pack('>I', self.method.INDEX)) return self._marshal(pieces) class Header(Frame): """Header frame object mapping. AMQP content header frames are mapped on top of this class for creating or accessing their data and attributes. """ NAME = 'Header' def __init__(self, channel_number, body_size, props): """Create a new instance of a AMQP ContentHeader object :param int channel_number: The channel number for the frame :param int body_size: The number of bytes for the body :param pika.spec.BasicProperties props: Basic.Properties object """ Frame.__init__(self, spec.FRAME_HEADER, channel_number) self.body_size = body_size self.properties = props def marshal(self): """Return the AMQP binary encoded value of the frame :rtype: str """ pieces = self.properties.encode() pieces.insert( 0, struct.pack('>HxxQ', self.properties.INDEX, self.body_size)) return self._marshal(pieces) class Body(Frame): """Body frame object mapping class. AMQP content body frames are mapped on to this base class for getting/setting of attributes/data. """ NAME = 'Body' def __init__(self, channel_number, fragment): """ Parameters: - channel_number: int - fragment: unicode or str """ Frame.__init__(self, spec.FRAME_BODY, channel_number) self.fragment = fragment def marshal(self): """Return the AMQP binary encoded value of the frame :rtype: str """ return self._marshal([self.fragment]) class Heartbeat(Frame): """Heartbeat frame object mapping class. AMQP Heartbeat frames are mapped on to this class for a common access structure to the attributes/data values. """ NAME = 'Heartbeat' def __init__(self): """Create a new instance of the Heartbeat frame""" Frame.__init__(self, spec.FRAME_HEARTBEAT, 0) def marshal(self): """Return the AMQP binary encoded value of the frame :rtype: str """ return self._marshal(list()) class ProtocolHeader(amqp_object.AMQPObject): """AMQP Protocol header frame class which provides a pythonic interface for creating AMQP Protocol headers """ NAME = 'ProtocolHeader' def __init__(self, major=None, minor=None, revision=None): """Construct a Protocol Header frame object for the specified AMQP version :param int major: Major version number :param int minor: Minor version number :param int revision: Revision """ self.frame_type = -1 self.major = major or spec.PROTOCOL_VERSION[0] self.minor = minor or spec.PROTOCOL_VERSION[1] self.revision = revision or spec.PROTOCOL_VERSION[2] def marshal(self): """Return the full AMQP wire protocol frame data representation of the ProtocolHeader frame :rtype: str """ return b'AMQP' + struct.pack('BBBB', 0, self.major, self.minor, self.revision) def decode_frame(data_in): # pylint: disable=R0911,R0914 """Receives raw socket data and attempts to turn it into a frame. Returns bytes used to make the frame and the frame :param str data_in: The raw data stream :rtype: tuple(bytes consumed, frame) :raises: pika.exceptions.InvalidFrameError """ # Look to see if it's a protocol header frame try: if data_in[0:4] == b'AMQP': major, minor, revision = struct.unpack_from('BBB', data_in, 5) return 8, ProtocolHeader(major, minor, revision) except (IndexError, struct.error): return 0, None # Get the Frame Type, Channel Number and Frame Size try: (frame_type, channel_number, frame_size) = struct.unpack( '>BHL', data_in[0:7]) except struct.error: return 0, None # Get the frame data frame_end = spec.FRAME_HEADER_SIZE + frame_size + spec.FRAME_END_SIZE # We don't have all of the frame yet if frame_end > len(data_in): return 0, None # The Frame termination chr is wrong if data_in[frame_end - 1:frame_end] != byte(spec.FRAME_END): raise exceptions.InvalidFrameError("Invalid FRAME_END marker") # Get the raw frame data frame_data = data_in[spec.FRAME_HEADER_SIZE:frame_end - 1] if frame_type == spec.FRAME_METHOD: # Get the Method ID from the frame data method_id = struct.unpack_from('>I', frame_data)[0] # Get a Method object for this method_id method = spec.methods[method_id]() # Decode the content method.decode(frame_data, 4) # Return the amount of data consumed and the Method object return frame_end, Method(channel_number, method) elif frame_type == spec.FRAME_HEADER: # Return the header class and body size class_id, weight, body_size = struct.unpack_from('>HHQ', frame_data) # Get the Properties type properties = spec.props[class_id]() # Decode the properties out = properties.decode(frame_data[12:]) # Return a Header frame return frame_end, Header(channel_number, body_size, properties) elif frame_type == spec.FRAME_BODY: # Return the amount of data consumed and the Body frame w/ data return frame_end, Body(channel_number, frame_data) elif frame_type == spec.FRAME_HEARTBEAT: # Return the amount of data and a Heartbeat frame return frame_end, Heartbeat() raise exceptions.InvalidFrameError("Unknown frame type: %i" % frame_type)
bsd-3-clause
-8,649,238,038,116,597,000
28.333333
78
0.631586
false
4.150054
false
false
false
simonprickett/wmataapiexperiments
train_predictions_script/pollStationData.py
1
1887
##### # Script to poll API data for DC Metro station # # Author: Simon Prickett ##### import json import os import requests import sys import time ##### # Query the WMATA API for data for the station represented # by stationCode e.g. N06 = Reston Wiehle East ##### def getStationData(stationCode, apiKey): url = 'https://wmataapibeta.azure-api.net/StationPrediction.svc/json/GetPrediction/' + stationCode + '?api_key=' + apiKey res = requests.get(url) return res.json() ##### # Display error telling user to set up their WMATA_API_KEY # environment variable ##### def needToSetApiKey(): print 'You need to set an environment variable:' print 'WMATA_API_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx' print 'Before you can run this script.' exit(1) ##### # Display usage error message ##### def usage(): print 'This script requires 2 parameters, a station code and a number of times to' print 'query for data and a filename to store the results in.' print 'Example: ' + sys.argv[0] + ' N06 2000' exit(1) ##### # Entry point ##### apiKey = os.environ.get('WMATA_API_KEY', '') if (len(apiKey) == 0): needToSetApiKey() if (len(sys.argv) == 4): # Got the right number of arguments, is the second one an integer numSamples = 0 try: numSamples = int(sys.argv[2]) currentSample = 1 print 'Will take ' + sys.argv[2] + ' samples for ' + sys.argv[1] + ' and store in ' + sys.argv[3] f = open(sys.argv[3], 'w') f.write('[\n') while (currentSample <= numSamples): print sys.argv[1] + ' ' + str(currentSample) + ' of ' + str(numSamples) json.dump(getStationData(sys.argv[1], apiKey), f) currentSample += 1 # Do we need a comma or is this the last iteration? if (currentSample <= numSamples): f.write(',') f.write('\n') f.flush() time.sleep(60) f.write(']') f.close() except ValueError: usage() else: # Incorrect invocation usage()
mit
3,686,401,257,969,455,600
23.506494
122
0.666137
false
3.009569
false
false
false
iFedix/FirewallController
modules/live.py
1
19897
# Copyright (C) 2011 Nippon Telegraph and Telephone Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from ryu.base import app_manager from ryu.controller import ofp_event from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER from ryu.controller.handler import set_ev_cls from ryu.ofproto import ofproto_v1_3 from ryu.lib.packet import packet from ryu.lib.packet import ethernet from ryu.lib.packet import ipv4 from ryu.lib.packet import tcp from ryu.lib.packet import udp # INIZIO CLASSE class Live(app_manager.RyuApp): OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION] def __init__(self, *args, **kwargs): super(Live, self).__init__(*args, **kwargs) self.mac_to_port = {} #creo la mac address table (vedere sotto per dettagli). Si tratta di un dizionario che poi diventera' un dizionario di dizionari! #Cioe' per esempio la mac table finale sara': mac_to_port = {1: {"00:00:00:02": 2, "00:00:00:01": 1}, 2: {"00:00:00:02": 1, "00:00:00:01":2}} self.messages = [] self.communications = "" #sono tutte le cominicazioni registrate dal controller self.currentroutes = [] #Lista GLOBALE (non viene mai eliminata) di informazioni sui collegamenti tra host che bisogna fare: #es [00:00:00:00:00:01 00:00:00:00:00:02 ICMP, 00:00:00:00:00:05 00:00:00:00:00:07 HTTP] #NB: non vengono inserite in questa lista le coppie duali (es 00:00:00:00:00:02 00:00:00:00:00:01 ICMP), perche' la comunicazione deve essere biunivoca #vedere check per questo comportamento self.story = [] #Lista di informazioni sui collegamenti tra host che bisogna fare #es [00:00:00:00:00:01 00:00:00:00:00:02 ICMP, 00:00:00:00:00:05 00:00:00:00:00:07 HTTP] #Differenza tra current routes e story: story e' una lista che serve a tener traccia dei collegamenti che bisogna fare. Una volta che un packet in nuovo entra, viene aggiunto a story una nuova entry che sara' poi eliminata quando il pacchetto viene accettato o rifiutato. Current routes e' una lista simile ma che non cancella i valori e ha un singolo valore per i pacchetti speculari (cioe' se entra 00:00:00:00:00:02 00:00:00:00:00:01 ICMP e poi 00:00:00:00:00:01 00:00:00:00:00:02 ICMP verra' aggiunta solo una entry). Serve a tener traccia delle comunicazioni gia' accettate. Infatti se il primo pacchetto e' stato accettato, currentroutes fa in modo che i percorsi intermedi verso il destinatario vengano automaticamente accettati (senza autorizzazione dell'utente). Funziona a mo di intent tramite una tabella globale. # ---------------------METODI UTILI----------------------------- def getProtocol(self, pkt): pkt_ipv4 = pkt.get_protocol(ipv4.ipv4) tp = pkt.get_protocol(tcp.tcp) port = 0 if tp: port = tp.dst_port ud = pkt.get_protocol(udp.udp) if ud: port = ud.dst_port #print "PORTA: %s" % port if pkt_ipv4: protocol = pkt_ipv4.proto if protocol==1: return "ICMP" if protocol==6: if port==80: return "HTTP" if port==443: return "HTTPS" return "TCP" if protocol==17: if port==53: return "DNS" if port==67: return "DHCP" return "UDP" return "Unknown. If you confirm, you will add a general traffic rule (= every type of traffic) between src and dst" def getMatch(self, pkt, parser, in_port, dst): pkt_ipv4 = pkt.get_protocol(ipv4.ipv4) tp = pkt.get_protocol(tcp.tcp) port = 0 if tp: port = tp.dst_port ud = pkt.get_protocol(udp.udp) if ud: port = ud.dst_port #print "PORTA: %s" % port if pkt_ipv4: protocol = pkt_ipv4.proto if protocol==1: return parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_type=0x0800, ip_proto=1) if protocol==6: if port==80: return parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_type=0x0800, ip_proto=6, tcp_dst=80) if port==443: return parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_type=0x0800, ip_proto=6, tcp_dst=443) return parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_type=0x0800, ip_proto=6) if protocol==17: if port==53: parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_type=0x0800, ip_proto=17, udp_dst=53) if port==67: parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_type=0x0800, ip_proto=17, udp_dst=67) return parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_type=0x0800, ip_proto=17) return parser.OFPMatch(in_port=in_port, eth_dst=dst) #metodo per filtrare mac address in ingresso (=passano dal controller senza conferma dell'utente) def filtered_ip(self, dst, eth): #escludo i seguenti mac address dal filtraggio (passano normalmente): #richieste ARP, Link Layer Discovery Protocol, Multicast (ipv6 e ipv), broadcast address return eth.ethertype != 0x0806 and self.lldp_filter(dst) and self.ipv4_multicast_filter(dst) and self.ipv6_multicast_filter(dst) and dst != "ff:ff:ff:ff:ff:ff" def lldp_filter(self, addr): return addr != "01:80:c2:00:00:0e" and addr != "01:80:c2:00:00:03" and addr != "01:80:c2:00:00:00" def ipv6_multicast_filter(self, addr): #escludo mac da 33-33-00-00-00-00 a 33-33-FF-FF-FF-FF (vedere http://www.iana.org/assignments/ethernet-numbers/ethernet-numbers.xhtml) return addr[:5]!="33:33" def ipv4_multicast_filter(self, addr): #escludo mac da 01-00-5E-00-00-00 a 01-00-5E-7F-FF-FF (vedere https://technet.microsoft.com/en-us/library/cc957928.aspx) #print "valuto %s" % addr if addr[:8]!="01:00:5e": #print "TRUE" return True else: val = addr[9]=='8' or addr[9]=='9' or addr[9]=='a' or addr[9]=='b' or addr[9]=='c' or addr[9]=='d' or addr[9]=='e' or addr[9]=='f' #print "Sono nel secondo ramo: %s" % val return val #metodo che serve semplicemente per dire che 00:00:00:00:00:02 00:00:00:00:00:01 ICMP e' uguale a 00:00:00:00:00:01 00:00:00:00:00:02 ICMP #perche' semplicemente e' il ritorno def check(self, to_find): #es: to_find: 00:00:00:00:00:02 00:00:00:00:00:01 ICMP add = to_find.split( ) #add e' una lista contenente due elementi (i due mac addr) case1 = "%s %s %s" % (add[0], add[1], add[2]) #con queste operazioni costruisco due stringhe: 00:00:00:00:00:02 00:00:00:00:00:01 ICMP e 00:00:00:00:00:01 00:00:00:00:00:02 ICMP case2 = "%s %s %s" % (add[1], add[0], add[2]) return (case1 in self.currentroutes or case2 in self.currentroutes) #esiste gia' una occorrenza ritorno true (sarebbe una route gia' autorizzata!) #--------------------------------FUNZIONI PRINCIPALI-------------------------------------- def list_communications(self): #prima rest api eseguita: notifica all'utente di una connessione nuova (nuovo packet in da un certo host ad un altro host) actual = self.communications self.communications = self.communications[self.communications.find('\n') + 1:] #elimino da communications il valore actual e lo faccio prendendo tutto cio' che c'e' dopo il primo \n (= svuoto communications) #print "in coda: %s" % actual # L'algoritmo seguente verifica che la generica coppia src e dst sia comparsa per la prima volta. # ES: se h1 pinga h2 per la prima volta all'utente verra' notificato che e' in atto una conessione da per esempio h1 a h2. # In una topologia con due switch e due host pero' (ma comunque vale anche per topologie piu' generiche) dovranno essere aggiunte 4 regole (4 pezzi di percorso): # farsi disegnino della topologia per maggiore chiarezza! # 1) da eth1 di s2 provenienti da h2 e diretti a h1 (tramite eth2) # 2) da eth2 di s1 provenienti da h2 e diretti a h1 (tramite eth1) # 3) da eth1 di s1 provenienti da h1 e diretti a h2 (tramite eth2) # 4) da eth2 di s2 provenienti da h1 e diretti a h2 (tramite eth1) # Con questo algoritmo alla prima richiesta (es: h1 ping h2) mi memorizzo la coppia h1-h2 (+relativo type) # Gli altri pezzi di route (cioe' le altre regole) vengono percio' automaticamente inserite visto che sono che tutte riguardano la coppia h1-h2(+type) if(actual!=''): if self.check(actual[:actual.find('\n')]) == True: #serve per tagliare il \n finale: cioe' prende la sottostringa da 0 alla posizione dello \n esclusa #print "ENTRY GIA' VISTA %s" % actual[:actual.find('\n')] self.accept() #accetto gia'! e' riferita ad una coppia gia accettata dall'utente! return "done"; #notifico lo script di js che non deve chiedere niente altro all'utente perche' essendo questo un packet intermedio #per una connessione tra src e dst gia' autorizzata in precedenza, automaticamente aggiungo la flow nello switch else: #print "ENTRY MAI VISTA %s" % actual[:actual.find('\n')] self.currentroutes.append(actual[:actual.find('\n')]) #se e' una coppia nuova chiedo all'utente che vuole fare, se accetta al prossimo passo le #altre regole intermedie vengono aggiunte automaticamente return actual def accept(self): datapath = self.messages[0].datapath ofproto = datapath.ofproto parser = datapath.ofproto_parser in_port = self.messages[0].match['in_port'] dpid = datapath.id pkt = packet.Packet(self.messages[0].data) eth = pkt.get_protocols(ethernet.ethernet)[0] src = eth.src dst = eth.dst protocol = self.getProtocol(pkt) key = "%s %s %s" % (src, dst, protocol) if dst in self.mac_to_port[dpid]: out_port = self.mac_to_port[dpid][dst] else: out_port = ofproto.OFPP_FLOOD #a seconda del pacchetto in ingresso e del suo tipo di traffico (ICMP, DNS.. ecc) installo una flow appropriata match = self.getMatch(pkt, parser, in_port, dst); #print(match) actions = [parser.OFPActionOutput(out_port)] inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)] #Se esiste un buffer_id (cioe' se i dati del pacchetto vengono memorizzati nello switch) allora occorre dare il riferimento al buffer (buffer_id) #altrimenti non serve #mod dice di inserire una openflow mod che utilizzi le istruzioni descritte sopra (applicare immediatamente il comportamente), #le azioni (mandare sulla porta di uscita) e il match (installazione della regola appropriata a seconda del tipo di traffico) if self.messages[0].buffer_id: mod = parser.OFPFlowMod(datapath=datapath, buffer_id=self.messages[0].buffer_id, priority=1, match=match, instructions=inst) else: mod = parser.OFPFlowMod(datapath=datapath, priority=1, match=match, instructions=inst) datapath.send_msg(mod) if key in self.story: self.story.remove(key) #print "%s eliminata (sono in accept)!" % key self.messages.pop(0) #rimuove dalla lista l'elemento 0 def deny(self): datapath = self.messages[0].datapath parser = datapath.ofproto_parser in_port = self.messages[0].match['in_port'] pkt = packet.Packet(self.messages[0].data) eth = pkt.get_protocols(ethernet.ethernet)[0] src = eth.src dst = eth.dst protocol = self.getProtocol(pkt) key = "%s %s %s" % (src, dst, protocol) match = parser.OFPMatch(in_port=in_port, eth_dst=dst) #Se esiste un buffer_id (cioe' se i dati del pacchetto vengono memorizzati nello switch) allora occorre dare il riferimento al buffer (buffer_id) #altrimenti non serve #mod dice di inserire una openflow mod che droppi il pacchetto: infatti se negli argomenti non si specifica il campo instructions=inst (come nella accept), #questo metodo crea una openflow mod che droppa le regole che fanno match (cioe' che entrano da una certa porta e destinate ad un certo mac address). #Le successive richieste identiche verranno bloccate da questa regola qua inserita! L'unico modo per togliere la regola #e' farlo manualmente sovrascrivendola attraverso l'inserimento manuale con il modulo tap.py if self.messages[0].buffer_id: mod = parser.OFPFlowMod(datapath=datapath, buffer_id=self.messages[0].buffer_id, priority=1, match=match) else: mod = parser.OFPFlowMod(datapath=datapath, priority=1, match=match) datapath.send_msg(mod) if key in self.story: self.story.remove(key) #print "%s eliminata (sono in deny)!" % key self.messages.pop(0) #----------------------------GESTIONE DEGLI SWITCH------------------------------------------- #a seguire un decoratore che mi dice come gestire la fase openflow della richesta delle funzioni dello switch. #Specificamente, dopo aver ricevuto la reply dallo switch, viene aggiunto una table-miss flow, cioe' il comportamento #di default per i pacchetti che arrivano allo switch e non hanno una flow (non sanno dove essere rediretti dallo switch). @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER) def switch_features_handler(self, ev): datapath = ev.msg.datapath ofproto = datapath.ofproto parser = datapath.ofproto_parser # Delete all existing rules on the switch mod = parser.OFPFlowMod(datapath=datapath, command=ofproto.OFPFC_DELETE, out_port=ofproto.OFPP_ANY, out_group=ofproto.OFPG_ANY) datapath.send_msg(mod) # install table-miss flow entry # # We specify NO BUFFER to max_len of the output action due to # OVS bug. At this moment, if we specify a lesser number, e.g., # 128, OVS will send Packet-In with invalid buffer_id and # truncated packet data. In that case, we cannot output packets # correctly. The bug has been fixed in OVS v2.1.0. #di default i pacchetti vengono mandati al controller con un OFPCML_NO_BUFFER. #Il metodo OFPActionOutput serve ad indicare di mandare fuori il pacchetto con le regole OFPP_CONTROLLER (verso il controller) #e OFPCML_NO_BUFFER (che si traduce nell'inviare tutto il pacchetto senza bufferizzare nulla) match = parser.OFPMatch() actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER, ofproto.OFPCML_NO_BUFFER)] #OFPIT_APPLY_ACTIONS si traduce in applicare immediatamente le azioni in actions inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)] #con priorita' 0, fanno match tutti i pacchetti! Tutto e' inviato al controller mod = parser.OFPFlowMod(datapath=datapath, priority=0, match=match, instructions=inst) datapath.send_msg(mod) @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER) def _packet_in_handler(self, ev): # con questo metodo raccolgo i packet in in ingresso! poi l'utente li accettera' o meno! Li metto in messages # If you hit this you might want to increase # the "miss_send_length" of your switch if ev.msg.msg_len < ev.msg.total_len: self.logger.debug("packet truncated: only %s of %s bytes", ev.msg.msg_len, ev.msg.total_len) msg = ev.msg # sintassi di msg: #OFPPacketIn(buffer_id=256,cookie=0,data='\x01\x80\xc2\x00\x00\x0e\x8e\xf5\xa4\xcd\xa4j\x88\xcc\x02\x16\x07 #dpid:0000000000000001\x04\x05\x02\x00\x00\x00\x02\x06\x02\x00x\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', #match=OFPMatch(oxm_fields={'in_port': 2}),reason=0,table_id=0,total_len=60)) in_port = msg.match['in_port'] #su quale porta dello switch? datapath = msg.datapath ofproto = datapath.ofproto parser = datapath.ofproto_parser dpid = datapath.id #quale switch? torna l'id (es: 1, 2 ecc) pkt = packet.Packet(msg.data) eth = pkt.get_protocols(ethernet.ethernet)[0] src = eth.src #indirizzo eth src (=mac address) dst = eth.dst #indirizzo eth dst (=mac address) # Sotto aggiungiamo le informazioni sullo switch dpid # Ad ogni indirizzo MAC associa la porta dello switch #se il dpid dello switch non esiste nella mac address table, lo aggiungo con ovviamente la lista di mac e porte settata a {} (vuota). #Se lo switch c'era gia', il metodo non fa nulla! self.mac_to_port.setdefault(dpid, {}) # learn a mac address to avoid FLOOD next time. #in poche parole associo l'indirizzo mac source con la porta in ingresso. #Cioe' associo il dispositivo fisico (mac address) in ingresso con la porta dello switch su cui ascolta! #E' come se registrassi chi ha fatto la richiesta! Cioe' associo il mac address alla porta su cui ascolta questo dispositivo! #Percio' per esempio un pacchetto di ritorno non dovra' fare flood perche' ora si sa a quale porta e' associato il dispositivo (mac addresss) a cui devo inviare! #La tabella sara' fatta come segue (come dicevamo sopra): #mac_to_port = {1: {"00:00:00:02": 2, "00:00:00:01": 1}, 2: {"00:00:00:02": 1, "00:00:00:01":2}} self.mac_to_port[dpid][src] = in_port #ora devo trovare il mac address di destinazione nella tabella dei mac address: #Se associato allo switch dpid esiste un campo destinazione, estraggo la porta out a partire dall'indirizzo mac dst if dst in self.mac_to_port[dpid]: out_port = self.mac_to_port[dpid][dst] #altrimenti per forza la porta di uscita sara' un flood: pacchetto inviato a tutte le porte di uscita. #In tal modo spero di raggiungere il mac address della destinazione else: out_port = ofproto.OFPP_FLOOD actions = [parser.OFPActionOutput(out_port)] #RITROVAMENTO PROTOCOLLO protocol = self.getProtocol(pkt) #print "protocol: %s" % protocol #print "STORIA: %s" % story #print "DEBUG: Packet in src %s dst %s con protocollo %s" % (src, dst, protocol) key = "%s %s %s" % (src, dst, protocol) if key not in self.story and self.filtered_ip(dst, eth): # appendo il messaggio appena arrivato alla lista dei messaggi in attesa self.messages.append(ev.msg) # scrivo in output la sorgente e la destinazione separati da uno spazio self.communications += str(src) self.communications += ' ' self.communications += str(dst) self.communications += ' ' self.communications += str(protocol) self.communications += '\n' self.story.append(key) #print "Aggiunto %s alla storia!" % key if self.filtered_ip(dst, eth) == False: data = None #i dati da inviare allo switch vengono posti a none. #Perche'? Perche' possono essere bufferizzati all'interno dello switch (e identificati da un buffer_id) if msg.buffer_id == ofproto.OFP_NO_BUFFER: #se non esiste nessun buffer_id, i dati vengono presi dal packet_in in ingresso data = msg.data out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id, in_port=in_port, actions=actions, data=data) #il messaggio di packet out si comporta in due modi a seconda che i dati siano bufferizzati o meno all'interno dello switch: #se lo sono, si andranno a beccare tali dati tramite il buffer_id, se non lo sono il campo data non viene riempito dall'if appena sopra e quindi il controller #manda allo switch un flow mod completo anche dei dati datapath.send_msg(out)
gpl-3.0
-781,128,038,751,831,400
52.058667
833
0.675881
false
2.895794
false
false
false
JanFan/py-aho-corasick
cmp.py
1
2262
# -*- coding: utf-8 -*- ''' Performance Testing Requirements: pip install pyahocorasick ''' import random import string import time from py_aho_corasick import py_aho_corasick import ahocorasick rand_str = lambda n: ''.join([random.choice(string.ascii_lowercase) for i in range(n)]) if __name__ == '__main__': N = 1000000 text = rand_str(N) keywords = list() NW = 50000 for i in range(NW): nw = random.randint(5,10) kw = rand_str(nw) keywords.append(kw) # pyahocorasick start_t = time.time() A = ahocorasick.Automaton() for idx, key in enumerate(keywords): A.add_word(key, (idx, key)) A.make_automaton() delta_build1 = time.time() - start_t start_t = time.time() cnt1 = 0 for end_index, (insert_order, original_value) in A.iter(text): start_index = end_index - len(original_value) + 1 assert text[start_index:start_index + len(original_value)] == original_value cnt1 += 1 delta_search1 = time.time() - start_t # py_aho_corasick start_t = time.time() A = py_aho_corasick.Automaton(keywords) delta_build2 = time.time() - start_t start_t = time.time() kv = A.get_keywords_found(text) cnt2 = 0 for idx,k,v in kv: assert text[idx:idx+len(k)] == k cnt2 += 1 delta_search2 = time.time() - start_t # brute force start_t = time.time() cnt3 = 0 for kw in keywords: beg = 0 while beg < len(text): idx = text.find(kw, beg) if idx == -1: break else: assert text[idx:idx+len(kw)] == kw beg = idx + 1 cnt3 += 1 delta_search3 = time.time() - start_t print(cnt1) assert cnt1 == cnt2 assert cnt1 == cnt3 # output print('pyahocorasick: text of {0} length, {1} keywords, building time {2} and searching time cost {3}'.format(N,NW,delta_build1,delta_search1)) print('py_aho_corasick: text of {0} length, {1} keywords, building time {2} and searching time cost {3}'.format(N,NW,delta_build2,delta_search2)) print('brute force: text of {0} length, {1} keywords, building time {2} and searching time cost {3}'.format(N,NW,0,delta_search3))
mit
-589,962,487,331,969,400
26.925926
149
0.589302
false
3.124309
false
false
false
irwinsnet/DesPy
despy/dp.py
1
3255
# Despy: A discrete event simulation framework for Python # Version 0.1 # Released under the MIT License (MIT) # Copyright (c) 2015, Stacy Irwin """ ******** despy.dp ******** .. autosummary:: """ EARLY = -1 STANDARD = 0 LATE = 1 class AbstractPackage(): def __init__(self): import despy.abstract.model self.AbstractModel = despy.abstract.model.AbstractModel abstract = AbstractPackage() del AbstractPackage class StatsPackage(): def __init__(self): from despy.stats.random import get_empirical_pmf, get_poisson_pmf self.get_empirical_pmf = get_empirical_pmf self.get_poisson_pmf = get_poisson_pmf stats = StatsPackage() del StatsPackage from despy.session import Session, Config # @UnusedImport class OutputPackage(): def __init__(self): import despy.output.report self.HtmlReport = despy.output.report.HtmlReport self.Datatype = despy.output.report.Datatype import despy.output.results #IMPORTS despy.output.trace self.results = despy.output.results.Results import despy.output.statistic self.AbstractStatistic = despy.output.statistic.AbstractStatistic self.DiscreteStatistic = despy.output.statistic.DiscreteStatistic self.TimeWeightedStatistic = ( despy.output.statistic.TimeWeightedStatistic) import despy.output.trace self.Trace = despy.output.trace.Trace self.TraceRecord = despy.output.trace.TraceRecord import despy.output.plot self.plot = despy.output.plot import despy.output.console self.console = despy.output.console import despy.output.counter self.Counter = despy.output.counter.Counter output = OutputPackage() del OutputPackage class ModelPackage(): def __init__(self): import despy.model.trigger self.AbstractTrigger = despy.model.trigger.AbstractTrigger self.TimeTrigger = despy.model.trigger.TimeTrigger import despy.model.component self.Component = despy.model.component.Component import despy.model.process #IMPORTS despy.fel.event self.Process = despy.model.process.Process self.ProcessTimeOutEvent = despy.model.process.ProcessTimeOutEvent import despy.model.queue self.Queue = despy.model.queue.Queue import despy.model.entity self.Entity = despy.model.entity.Entity import despy.model.resource self.Resource = despy.model.resource.Resource self.ResourceQueue = despy.model.resource.ResourceQueue self.ResourceFinishEvent = despy.model.resource.ResourceFinishServiceEvent import despy.model.timer self.RandomTimer = despy.model.timer.RandomTimer self.TimerEvent = despy.model.timer.TimerEvent model = ModelPackage() del ModelPackage class FelPackage(): def __init__(self): import despy.fel.event self.Event = despy.fel.event.Event fel = FelPackage() del FelPackage from despy.simulation import Simulation # @UnusedImport
mit
-9,093,828,271,728,340,000
28.590909
82
0.654378
false
3.998771
false
false
false
ma-ver-ick/pyaphrodite
prepare_images.py
1
1536
from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np import cv2 import os import dto import pickle ROOT_DIR = "/home/pi/jme3/assets/Textures/" RESIZE_WIDTH = 1920 # Raspberry pi texture size def list_files(directory, extension): ret = [] for file in os.listdir(directory): if not file.lower().endswith(extension): continue ret.append(directory + os.sep + file) return ret all_files = list() all_files.extend(list_files(ROOT_DIR, ".jpg")) database = list() for file in all_files: print("Processing file: %s" % (file)) img = cv2.imread(file) height, width, depth = img.shape aspect_ratio = float(width) / float(height) new_height = RESIZE_WIDTH / aspect_ratio temp_debug_msg = "\tResizing from (%4.0f, %4.0f) to (%4.0f, %4.0f)" temp_debug_tuple = (width, height, RESIZE_WIDTH, new_height) print(temp_debug_msg % temp_debug_tuple) dim = (int(RESIZE_WIDTH), int(new_height)) resized = cv2.resize(img, dim, interpolation = cv2.INTER_LANCZOS4) orig_path, orig_filename = os.path.split(file) orig_filename, orig_file_ext = os.path.splitext(orig_filename) optimized_filename = orig_path + os.sep + orig_filename optimized_filename += ".optimized.png" cv2.imwrite(optimized_filename, resized) p = dto.PictureDTO(file, width, height, depth, optimized_filename, dim[0], dim[1]) database.append(p) database_path = ROOT_DIR + os.sep + "database.pickle" print("Saving database to " + database_path) pickle.dump(database, open(database_path, "wp"))
mit
4,709,988,111,238,682,000
26.927273
83
0.708333
false
2.881801
false
false
false
dongqunxi/GrangerCausality
Preprocessing/CTPS_identifation_BrainComponents.py
1
4218
# -*- coding: utf-8 -*- """ Created on Fri Feb 21 10:42:55 2014 @author: imenb101 """ import numpy as np import matplotlib.pylab as pl import mne, sys, os from mne.viz import tight_layout from mne.fiff import Raw from mne.preprocessing import ICA from ctps import compute_ctps from ctps import plot_ctps_panel try: subject = sys.argv[1] trigger = sys.argv[2]#Get the trigger is stim or resp except: print "Please run with input file provided. Exiting" sys.exit() res_ch_name = 'STI 013' sti_ch_name = 'STI 014' n_components=0.99 n_pca_components=None max_pca_components=None subjects_dir = '/home/qdong/data/' subject_path = subjects_dir + subject#Set the data path of the subject #raw_fname = subject_path + '/MEG/ssp_cleaned_%s_audi_cued-raw_cle.fif' %subject raw_fname = subject_path + '/MEG/%s_audi_cued-raw_cle.fif' %subject raw_basename = os.path.splitext(os.path.basename(raw_fname))[0] raw = Raw(raw_fname, preload=True) picks = mne.fiff.pick_types(raw.info, meg=True, eeg=False, eog=False, stim=False, exclude='bads') ica = ICA(n_components=n_components, n_pca_components=n_pca_components, max_pca_components=max_pca_components, random_state=0) ica.decompose_raw(raw, picks=picks, decim=3) if trigger == 'resp':#'1' represents the response channel add_from_raw = mne.fiff.pick_types(raw.info, meg=False, resp=True, exclude='bads') sources_add = ica.sources_as_raw(raw, picks=add_from_raw) events = mne.find_events(sources_add, stim_channel=res_ch_name) raw_basename += '_resp' elif trigger == 'stim':#'0' represents the stimuli channel add_from_raw = mne.fiff.pick_types(raw.info, meg=False, stim=True, exclude='bads') sources_add = ica.sources_as_raw(raw, picks=add_from_raw) events = mne.find_events(sources_add, stim_channel=sti_ch_name) raw_basename += '_stim' else: print "Please select the triger channel '1' for response channel or '0' for stimilus channel." sys.exit() # drop non-data channels (ICA sources are type misc) #ica.n_pca_components=None picks = mne.fiff.pick_types(sources_add.info, meg=False, misc=True, exclude='bads') #Compare different bandwith of ICA components: 2-4, 4-8, 8-12, 12-16, 16-20Hz l_f = 2 Brain_idx1=[]#The index of ICA related with trigger channels axes_band = [221, 222, 223, 224] ax_index = 0 for i in [4, 8, 12, 16]: h_f = i get_ylim = True if l_f != 2: get_ylim = False sources_add = ica.sources_as_raw(raw, picks=add_from_raw) #sources_add.filter(l_freq=l_f, h_freq=h_f, method='iir', n_jobs=4) sources_add.filter(l_freq=l_f, h_freq=h_f, n_jobs=4, method='iir') this_band = '%i-%iHz' % (l_f, h_f) temp = l_f l_f = h_f # Epochs at R peak onset, from stim_eve. ica_epochs_events = mne.Epochs(sources_add, events, event_id=1, tmin=-0.3, tmax=0.3, picks=picks, preload=True, proj=False) x_length = len(ica_epochs_events.ch_names) # Compute phase values and statistics (significance values pK) #phase_trial_ecg, pk_dyn_ecg, _ = compute_ctps(ica_epochs_ecg.get_data()) _ , pk_dyn_stim, phase_trial = compute_ctps(ica_epochs_events.get_data()) # Get kuiper maxima pk_max = pk_dyn_stim.max(axis=1) Brain_sources = pk_max > 0.1 # bool array, get the prominient components related with trigger Brain_ind = np.where(Brain_sources)[0].tolist() # indices #skip the null idx related with response Brain_idx1 += (Brain_ind)#Get the obvious sources related #Plot the bar #ax = pl.subplot(axes_band[ax_index]) #pk_max.plot(axes=ax_index, ylim=ylim_ecg, xlim=xlim1) pl.subplot(axes_band[ax_index]) x_bar = np.arange(x_length) pl.bar(x_bar, pk_max) for x in Brain_ind: pl.bar(x, pk_max[x], facecolor='r') pl.axhline(0.1, color='k', label='threshod') pl.xlabel('%s' %this_band) pl.ylim(0, 0.5) ax_index += 1 pl.tight_layout() pl.show() #pl.savefig(subject_path+'/MEG/ctps_distribution_%s_%s_withoutSSP.png'%(subject, trigger)) pl.savefig(subject_path+'/MEG/ctps_distribution_%s_%s.png'%(subject, trigger)) Brain_idx = list(set(Brain_idx1)) print '%s has been identified as trigger components' %(Brain_idx)
bsd-3-clause
-859,910,584,621,222,400
38.055556
127
0.672357
false
2.787839
false
false
false
drhagen/parsita
src/parsita/metaclasses.py
1
4934
import inspect import builtins import re from . import options from .parsers import Parser, RegexParser class ParsersDict(dict): def __init__(self, old_options: dict): super().__init__() self.old_options = old_options # Holds state of options at start of definition self.forward_declarations = dict() # Stores forward declarations as they are discovered def __missing__(self, key): class_body_globals = inspect.currentframe().f_back.f_globals if key in class_body_globals: return class_body_globals[key] elif key in dir(builtins): return getattr(builtins, key) elif key in self.forward_declarations: return self.forward_declarations[key] else: new_forward_declaration = ForwardDeclaration() self.forward_declarations[key] = new_forward_declaration return new_forward_declaration def __setitem__(self, key, value): if isinstance(value, Parser): value.protected = True # Protects against accidental concatenation of sequential parsers value.name = key # Used for better error messages super().__setitem__(key, value) class ForwardDeclaration(Parser): def __init__(self): self._definition = None def __getattribute__(self, member): if member != '_definition' and self._definition is not None: return getattr(self._definition, member) else: return object.__getattribute__(self, member) def define(self, parser: Parser) -> None: self._definition = parser def fwd() -> ForwardDeclaration: """Manually create a forward declaration. Normally, forward declarations are created automatically by the contexts. But they can be created manually if not in a context or if the user wants to avoid confusing the IDE. """ return ForwardDeclaration() class GeneralParsersMeta(type): @classmethod def __prepare__(mcs, name, bases, **_): # noqa: N804 old_options = { 'handle_literal': options.handle_literal, 'parse_method': options.parse_method, } options.handle_literal = options.wrap_literal options.parse_method = options.basic_parse return ParsersDict(old_options) def __init__(cls, name, bases, dct, **_): # noqa: N805 old_options = dct.old_options super().__init__(name, bases, dct) # Resolve forward declarations, will raise if name not found for name, forward_declaration in dct.forward_declarations.items(): obj = dct[name] if not isinstance(obj, Parser): obj = options.handle_literal(obj) forward_declaration._definition = obj # Reset global variables for key, value in old_options.items(): setattr(options, key, value) def __call__(cls, *args, **kwargs): raise TypeError('Parsers cannot be instantiated. They use class bodies purely as contexts for managing ' 'defaults and allowing forward declarations. Access the individual parsers as static ' 'attributes.') class GeneralParsers(metaclass=GeneralParsersMeta): """Context for parsing general sequences. This is not a real class. Don't instantiate it. This is used by inheriting from it and defining parsers as class attributes in the body of the child class. """ pass class TextParsersMeta(GeneralParsersMeta): @classmethod def __prepare__(mcs, name, bases, whitespace: str = options.default_whitespace): # noqa: N804 old_options = { 'whitespace': options.whitespace, 'handle_literal': options.handle_literal, 'parse_method': options.parse_method, } # Store whitespace in global location so regex parsers can see it if isinstance(whitespace, str): whitespace = re.compile(whitespace) if whitespace is None: options.whitespace = None else: options.whitespace = RegexParser(whitespace) options.handle_literal = options.default_handle_literal options.parse_method = options.default_parse_method return ParsersDict(old_options) def __new__(mcs, name, bases, dct, **_): # noqa: N804 return super().__new__(mcs, name, bases, dct) class TextParsers(metaclass=TextParsersMeta): r"""Context for parsing text. This is not a real class. Don't instantiate it. This is used by inheriting from it and defining parsers as class attributes in the body of the child class. There is a keyword argument for the metaclass ``whitespace``. This is a regular expression defining the whitespace to be ignored. The default is r"\s*". """ pass __all__ = ['ForwardDeclaration', 'fwd', 'GeneralParsers', 'TextParsers']
mit
-8,236,695,756,848,111,000
32.794521
112
0.64167
false
4.501825
false
false
false
annahs/atmos_research
AL_incand_calib_SP217.py
1
1802
import sys import os import datetime import pickle import numpy as np import matplotlib.pyplot as plt import matplotlib.cm as cm import matplotlib.colors as colors from pprint import pprint import sqlite3 import calendar from datetime import datetime from datetime import timedelta import math import numpy.polynomial.polynomial as poly #mass fg, pk_ht, UNCORR AL_HG_incand_calib = [ [0.23173,25.17577 ], [0.41398,48.99595 ], [1.26106,186.48122 ], [2.88282,489.41296 ], [5.43241,880.95554 ], [8.94784,1347.39537], ] HG_pkht = np.array([row[1] for row in AL_HG_incand_calib]) HG_mass = np.array([row[0] for row in AL_HG_incand_calib]) HG_mass_corr = np.array([row[0]/0.7 for row in AL_HG_incand_calib]) HG_fit = poly.polyfit(HG_pkht, HG_mass_corr, 1) print 'HG fit', HG_fit for line in AL_HG_incand_calib: incand_pk_ht = line[1] uncorr_mass_fg = line[0] AD_corr_fit = HG_fit[0] + HG_fit[1]*incand_pk_ht line.append(AD_corr_fit) HG_pk_ht = [row[1] for row in AL_HG_incand_calib] HG_uncorr_mass = [row[0] for row in AL_HG_incand_calib] HG_uncorr_fit = [row[2]*0.7 for row in AL_HG_incand_calib] HG_ADcorr_fit = [row[2] for row in AL_HG_incand_calib] fig = plt.figure(figsize=(12,10)) ax = fig.add_subplot(111) ax.scatter(HG_pk_ht,HG_uncorr_mass,color='r', label = 'Uncorrected calibration') ax.plot(HG_pk_ht,HG_ADcorr_fit, '--r', label = 'Aquadag correction applied') ax.plot(HG_pk_ht,HG_uncorr_fit, '-r') plt.xlabel('Incandescent pk height (a.u.)') plt.ylabel('rBC mass (fg)') plt.text(250,10, 'Aquadag corrected fit:\nrBC mass = -0.017584 + 9.2453E-3*pkht') ax.set_ylim(0,14) ax.set_xlim(0,2000) plt.legend() os.chdir('C:/Users/Sarah Hanna/Documents/Data/Alert Data/SP2 Calibrations/') plt.savefig('Alert SP2#17 Aquadag calibration curves.png', bbox_inches='tight') plt.show()
mit
2,754,781,142,312,153,000
25.514706
81
0.709212
false
2.455041
false
false
false
mcalmer/spacewalk
spacecmd/src/lib/group.py
1
12970
# # Licensed under the GNU General Public License Version 3 # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright 2013 Aron Parsons <aronparsons@gmail.com> # # NOTE: the 'self' variable is an instance of SpacewalkShell # wildcard import # pylint: disable=W0401,W0614 # unused argument # pylint: disable=W0613 # invalid function name # pylint: disable=C0103 import os import re import shlex try: from xmlrpc import client as xmlrpclib except ImportError: import xmlrpclib from spacecmd.utils import * def help_group_addsystems(self): print('group_addsystems: Add systems to a group') print('usage: group_addsystems GROUP <SYSTEMS>') print('') print(self.HELP_SYSTEM_OPTS) def complete_group_addsystems(self, text, line, beg, end): parts = shlex.split(line) if line[-1] == ' ': parts.append('') if len(parts) == 2: return tab_completer(self.do_group_list('', True), text) elif len(parts) > 2: return self.tab_complete_systems(parts[-1]) return None def do_group_addsystems(self, args): arg_parser = get_argument_parser() (args, _options) = parse_command_arguments(args, arg_parser) if not args: self.help_group_addsystems() return group_name = args.pop(0) # use the systems listed in the SSM if re.match('ssm', args[0], re.I): systems = self.ssm.keys() else: systems = self.expand_systems(args) system_ids = [] for system in sorted(systems): system_id = self.get_system_id(system) if not system_id: continue system_ids.append(system_id) self.client.systemgroup.addOrRemoveSystems(self.session, group_name, system_ids, True) #################### def help_group_removesystems(self): print('group_removesystems: Remove systems from a group') print('usage: group_removesystems GROUP <SYSTEMS>') print('') print(self.HELP_SYSTEM_OPTS) def complete_group_removesystems(self, text, line, beg, end): parts = shlex.split(line) if line[-1] == ' ': parts.append('') if len(parts) == 2: return tab_completer(self.do_group_list('', True), text) elif len(parts) > 2: return self.tab_complete_systems(parts[-1]) return None def do_group_removesystems(self, args): arg_parser = get_argument_parser() (args, _options) = parse_command_arguments(args, arg_parser) if not args: self.help_group_removesystems() return group_name = args.pop(0) # use the systems listed in the SSM if re.match('ssm', args[0], re.I): systems = self.ssm.keys() else: systems = self.expand_systems(args) system_ids = [] for system in sorted(systems): system_id = self.get_system_id(system) if not system_id: continue system_ids.append(system_id) print('Systems') print('-------') print('\n'.join(sorted(systems))) if not self.user_confirm('Remove these systems [y/N]:'): return self.client.systemgroup.addOrRemoveSystems(self.session, group_name, system_ids, False) #################### def help_group_create(self): print('group_create: Create a system group') print('usage: group_create [NAME] [DESCRIPTION]') def do_group_create(self, args): arg_parser = get_argument_parser() (args, _options) = parse_command_arguments(args, arg_parser) if args: name = args[0] else: name = prompt_user('Name:') if len(args) > 1: description = ' '.join(args[1:]) else: description = prompt_user('Description:') self.client.systemgroup.create(self.session, name, description) #################### def help_group_delete(self): print('group_delete: Delete a system group') print('usage: group_delete NAME ...') def complete_group_delete(self, text, line, beg, end): return tab_completer(self.do_group_list('', True), text) def do_group_delete(self, args): arg_parser = get_argument_parser() (args, _options) = parse_command_arguments(args, arg_parser) if not args: self.help_group_delete() return groups = args self.do_group_details('', True) if not self.user_confirm('Delete these groups [y/N]:'): return for group in groups: self.client.systemgroup.delete(self.session, group) #################### def help_group_backup(self): print('group_backup: backup a system group') print('''usage: group_backup NAME [OUTDIR]) OUTDIR defaults to $HOME/spacecmd-backup/group/YYYY-MM-DD/NAME ''') def complete_group_backup(self, text, line, beg, end): List = self.do_group_list('', True) List.append('ALL') return tab_completer(List, text) def do_group_backup(self, args): arg_parser = get_argument_parser() (args, _options) = parse_command_arguments(args, arg_parser) if not args: self.help_group_backup() return groups = args if len(args) == 1 and args[0] == 'ALL': groups = self.do_group_list('', True) outputpath_base = None # use an output base from the user if it was passed if len(args) == 2: outputpath_base = datetime.now().strftime(os.path.expanduser(args[1])) else: outputpath_base = os.path.expanduser('~/spacecmd-backup/group') # make the final output path be <base>/date outputpath_base = os.path.join(outputpath_base, datetime.now().strftime("%Y-%m-%d")) try: if not os.path.isdir(outputpath_base): os.makedirs(outputpath_base) except OSError: logging.error('Could not create output directory') return for group in groups: print("Backup Group: %s" % group) details = self.client.systemgroup.getDetails(self.session, group) outputpath = outputpath_base + "/" + group print("Output File: %s" % outputpath) fh = open(outputpath, 'w') fh.write(details['description']) fh.close() #################### def help_group_restore(self): print('group_restore: restore a system group') print('usage: group_restore INPUTDIR [NAME] ...') def complete_group_restore(self, text, line, beg, end): parts = shlex.split(line) if len(parts) > 1: groups = self.do_group_list('', True) groups.append('ALL') return tab_completer(groups, text) return None def do_group_restore(self, args): arg_parser = get_argument_parser() (args, options) = parse_command_arguments(args, arg_parser) inputdir = os.getcwd() groups = [] files = {} current = {} if args: inputdir = args[0] groups = args[1:] else: self.help_group_restore() return inputdir = os.path.abspath(inputdir) logging.debug("Input Directory: %s" % (inputdir)) # make a list of file items in the input dir if os.path.isdir(inputdir): d_content = os.listdir(inputdir) for d_item in d_content: if os.path.isfile(inputdir + "/" + d_item): logging.debug("Found file %s" % inputdir + "/" + d_item) files[d_item] = inputdir + "/" + d_item else: logging.error("Restore dir %s does not exits or is not a directory" % inputdir) return if not files: logging.error("Restore dir %s has no restore items" % inputdir) return if (len(groups) == 1 and groups[0] == 'ALL') or not groups: groups = files.keys() elif groups: for group in groups: if group in files: groups.append(group) else: logging.error("Group %s was not found in backup" % (group)) for groupname in self.do_group_list('', True): details = self.client.systemgroup.getDetails(self.session, groupname) current[groupname] = details['description'] current[groupname] = current[groupname].rstrip('\n') for groupname in files: fh = open(files[groupname], 'r') details = fh.read() fh.close() details = details.rstrip('\n') if groupname in current and current[groupname] == details: logging.debug("Already have %s" % groupname) continue elif groupname in current: logging.debug("Already have %s but the description has changed" % groupname) if is_interactive(options): print("Changing description from:") print("\n\"%s\"\nto\n\"%s\"\n" % (current[groupname], details)) userinput = prompt_user('Continue [y/N]:') if re.match('y', userinput, re.I): logging.info("Updating description for group: %s" % groupname) self.client.systemgroup.update(self.session, groupname, details) else: logging.info("Updating description for group: %s" % groupname) self.client.systemgroup.update(self.session, groupname, details) else: logging.info("Creating new group %s" % groupname) group = self.client.systemgroup.create(self.session, groupname, details) #################### def help_group_list(self): print('group_list: List available system groups') print('usage: group_list') def do_group_list(self, args, doreturn=False): groups = self.client.systemgroup.listAllGroups(self.session) groups = [g.get('name') for g in groups] if doreturn: return groups else: if groups: print('\n'.join(sorted(groups))) return None #################### def help_group_listsystems(self): print('group_listsystems: List the members of a group') print('usage: group_listsystems GROUP') def complete_group_listsystems(self, text, line, beg, end): return tab_completer(self.do_group_list('', True), text) def do_group_listsystems(self, args, doreturn=False): arg_parser = get_argument_parser() (args, _options) = parse_command_arguments(args, arg_parser) if not args: self.help_group_listsystems() return None group = args[0] try: systems = self.client.systemgroup.listSystems(self.session, group) systems = [s.get('profile_name') for s in systems] except xmlrpclib.Fault: logging.warning('%s is not a valid group' % group) return [] if doreturn: return systems else: if systems: print('\n'.join(sorted(systems))) return None #################### def help_group_details(self): print('group_details: Show the details of a system group') print('usage: group_details GROUP ...') def complete_group_details(self, text, line, beg, end): return tab_completer(self.do_group_list('', True), text) def do_group_details(self, args, short=False): arg_parser = get_argument_parser() (args, _options) = parse_command_arguments(args, arg_parser) if not args: self.help_group_details() return add_separator = False for group in args: try: details = self.client.systemgroup.getDetails(self.session, group) systems = self.client.systemgroup.listSystems(self.session, group) systems = [s.get('profile_name') for s in systems] except xmlrpclib.Fault: logging.warning('%s is not a valid group' % group) return if add_separator: print(self.SEPARATOR) add_separator = True print('Name %s' % details.get('name')) print('Description: %s' % details.get('description')) print('Number of Systems: %i' % details.get('system_count')) if not short: print('') print('Members') print('-------') print('\n'.join(sorted(systems)))
gpl-2.0
-2,932,662,179,444,418,000
27.195652
88
0.594217
false
3.838414
false
false
false
vbmacher/emuStudio
add_server.py
1
4717
#!/usr/bin/env python import sys import os import os.path import shutil import xml.dom.minidom from xml.dom.minidom import getDOMImplementation from xml.dom.minidom import parseString from subprocess import call def get_vars(): errorMsg = "" travisSecurityVars = os.environ["TRAVIS_SECURE_ENV_VARS"] if travisSecurityVars == "false": errorMsg = "\nNo secure env vars available; " masterPassword = os.getenv("MASTER_PASSWORD", "false") if masterPassword == "false": errorMsg += "\nMaster security password is not set; " userName = os.getenv("EMUSTUDIO_USERNAME", "false") if userName == "false": errorMsg += "\nServer user name is not set; " password = os.getenv("EMUSTUDIO_PASSWORD", "false") if password == "false": errorMsg += "\nServer password is not set" if errorMsg != "": print errorMsg sys.exit(1) return (masterPassword, userName, password) def get_or_create(xmldoc, name, element=None): if element == None: element = xmldoc children = element.getElementsByTagName(name) if len(children) == 0: children = [xmldoc.createElement(name)] element.appendChild(children[0]) return children[0] def recreate(xmldoc, name, element=None): if element == None: element = xmldoc children = element.getElementsByTagName(name) if len(children) == 0: theChild = xmldoc.createElement(name) element.appendChild(theChild) else: theChild = children[0] for child in theChild.childNodes: theChild.removeChild(child) return theChild def prettify(node): return '\n'.join([line for line in node.toprettyxml(indent=' ').split('\n') if line.strip()]) def create_settings_security(path, masterPassword): try: xmldoc = xml.dom.minidom.parse(path) except: xmldoc = getDOMImplementation().createDocument(None, "settingsSecurity", None) securityElement = get_or_create(xmldoc, "settingsSecurity") masterElement = recreate(xmldoc, "master", securityElement) securityNode = xmldoc.createTextNode(masterPassword) masterElement.appendChild(securityNode) return prettify(xmldoc) def create_settings(path, userName, password): try: xmldoc = xml.dom.minidom.parse(path) except: xmldoc = getDOMImplementation().createDocument(None, "settings", None) settingsElement = get_or_create(xmldoc, "settings") serversElement = get_or_create(xmldoc, "servers", settingsElement) for child in serversElement.getElementsByTagName("server"): serversElement.removeChild(child) serverElement = recreate(xmldoc, "server", serversElement) serverIdElement = xmldoc.createElement("id") serverUserElement = xmldoc.createElement("username") serverPasswordElement = xmldoc.createElement("password") serverIdNode = xmldoc.createTextNode("emustudio-repository") serverUserNode = xmldoc.createTextNode(userName) serverPasswordNode = xmldoc.createTextNode(password) serverIdElement.appendChild(serverIdNode) serverUserElement.appendChild(serverUserNode) serverPasswordElement.appendChild(serverPasswordNode) serverElement.appendChild(serverIdElement) serverElement.appendChild(serverUserElement) serverElement.appendChild(serverPasswordElement) # Turn off interactive mode interactiveNode = recreate(xmldoc, "interactiveMode", settingsElement) interactiveValue = xmldoc.createTextNode("false") interactiveNode.appendChild(interactiveValue) return prettify(xmldoc) def write_file(path, content, mode='w'): file = open(path, mode) file.write(content) file.close() def backup_or_create(path): if os.path.exists(path): shutil.copy2(path, path + ".bak") else: write_file(path, "") homedir = os.path.expanduser("~") settingsSecurityPath = homedir + '/.m2/settings-security.xml' settingsPath = homedir + '/.m2/settings.xml' knownHostsPath = homedir + "/.ssh/known_hosts" vars = get_vars() backup_or_create(settingsSecurityPath) backup_or_create(settingsPath) try: settingsSecurityXml = create_settings_security(settingsSecurityPath, vars[0]) settingsXml = create_settings(settingsPath, vars[1], vars[2]) write_file(settingsSecurityPath, settingsSecurityXml) write_file(settingsPath, settingsXml) # append sourceforge.net public ssh key fingerprint (if disabling strict host checking doesn't work) call(['ssh-keygen', '-R', 'web.sourceforge.net']) with open(knownHostsPath, "w") as outfile: call(['ssh-keyscan', '-H', 'web.sourceforge.net'], stdout=outfile) except: print "Unexpected error occured" pass
gpl-2.0
-6,555,765,835,985,879,000
31.088435
104
0.705745
false
3.872742
false
false
false
OscarES/serpentinetracker
beamline.py
1
21534
#!/usr/bin/python # # Copyright 2009, Stephen Molloy, Stewart Boogert # # This file is part of Serpentine. # # Serpentine is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Serpentine is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Serpentine. If not, see <http://www.gnu.org/licenses/>. # """Define the physics functions and classes (e.g. tracking, Rmat calcs, etc.)""" # numpy arrays will be useful here #from numpy import * import numpy as np from matplotlib.pylab import plot, subplot, xlabel, ylabel, legend #from elements import * #from scipy import weave from utilities import RotMats import beamadjust import copy from itertools import repeat import re # =============================================================== # Lists of beamline components are almost, but not quite, the right # way to define the lattice. Here we define a class to inherit from # list, but with the multiplication operator redefined to do what # we want # The physics tools will be added on as methods of this new class class Line(list): """class Line: A class designed to hold the list of elements that define the beamline lattice. This class inherits from Python's built-in 'list' class.""" def __mul__(self, fact): """Allows multiplication of a small lattice subset by an integer in order to easily define a repeated section""" new_line = Line() copyfunc = lambda x: new_line.extend(copy.deepcopy(x)) for rep in repeat(copyfunc, fact): rep(self) # for rep in range(fact): # new_line.extend(copy.deepcopy(self)) return new_line def __repr__(self): def namecatch(inst): try: return str(inst.name) except AttributeError: return "No name attr" ret = '\n'.join(namecatch(ele)+" :: "+str(ele.__class__) for ele in self) return ret def FindEleByName(self, name): from serpentine import Serpentine p = re.compile("^" + name + "$") indlist = list() for i in xrange(len(self)): if type(self[i]) == Serpentine: try: intern_list = self[i].beamline.FindEleByName(name) [indlist.append([i, int_i]) for int_i in intern_list] except ValueError, UnboundLocalError: pass elif p.match(self[i].name): indlist.append(i) if indlist: return indlist else: raise ValueError(name + ": Not found.") def FindEleByType(self, classname): from serpentine import Serpentine p = re.compile("^" + classname + "$") indlist = list() for i in xrange(len(self)): if type(self[i]) == Serpentine: try: intern_list = self[i].beamline.FindEleByType(classname) [indlist.append([i, int_i]) for int_i in intern_list] except ValueError, UnboundLocalError: pass elif p.match(self[i].__class__.__name__): indlist.append(i) if indlist: return indlist else: raise ValueError(classname + ": Not found.") def GetEleByType(self, classname): """Returns a list of elements of class 'classtype' from self. This returns the elements themselves, not their indices.""" def extractele(beamline, i, elelist): if type(i)==int: elelist.append(beamline[i]) elif type(i[1])==int: elelist.append(beamline[i[0]].beamline[i[1]]) else: extractele(beamline[i[0]].beamline, i[1], elelist) elelist = list() indlist = self.FindEleByType(classname) for i in indlist: extractele(self, i, elelist) return elelist def FindEleByObj(self, obj): """Returns the index at which the object 'obj' can be found in self.""" for i in range(len(self)): if self[i].__class__.__name__ == 'Serpentine': intern_list = self[i].beamline.FindEleByObj(obj) eledict = dict() eledict[i] = intern_list return eledict if obj == self[i] : return i return -1 def GetEleByName(self, name): """Returns a list of elements named 'name' from self. This returns the elements themselves, not their indices.""" def extractele(beamline, i, elelist): if type(i)==int: elelist.append(beamline[i]) elif type(i[1])==int: elelist.append(beamline[i[0]].beamline[i[1]]) else: extractele(beamline[i[0]].beamline, i[1], elelist) elems = list() indlist = self.FindEleByName(name) for i in indlist: extractele(self, i, elems) return elems def RmatAtoB(self, first, last): """Returns the 6D R-matrix between the entrance of self[first], and the exit of self[last].""" rmat = np.eye(6) for i in self[first:last+1]: rmat = np.dot(i.R, rmat) return rmat def Track(self, beam, offset=np.array([0, 0, 0, 0, 0, 0])): """The primary tracking method for the Line class. It loops around each element of self, calculates the offsets due to movers, offsets, etc., recalculates the energy variable of the beam being tracked to delta_E/E, and then calls the 'TrackThruEle' method of the element in question. Once tracking is complete for that element, the offset and the beam's energy variable are reset to their original values. The loop then continues on to the next element. The 'beam' input should be an object of class 'ElectronBeam' (or one which inherits from that class). Track returns the beam that results from tracking through the lattice. """ prog = ProgressBar(0, len(self), 77) beam_out = copy.deepcopy(beam) # Beam momentum is defined as absolute, but R matrices expect delta_P/P for ele in self: if ele.__class__.__name__ == 'Serpentine': ele.beam_in = beam_out ele.Track() beam_out = ele.beam_out continue if sum(offset**2)>0: beam_out.x = self._AdjustBeamByLineOffset(ele, beam_out, offset) try: beam_out.x = beamadjust.AdjustBeamWithMover(ele, beam_out) except AttributeError: pass if sum(ele.offset**2)>0: beam_out.x = beamadjust.AdjustBeamByOffset(ele, beam_out) try: ele.Processor(beam_out) except AttributeError: pass beam_out.x[5, :] = (beam_out.x[5, :] - ele.P) / ele.P beam_out = ele.TrackThruEle(beam_out) beam_out.x[5, :] = (beam_out.x[5, :] * ele.P) + ele.P if sum(ele.offset**2): beam_out.x = beamadjust.ReAdjustBeamByOffset(ele, beam_out) if hasattr(ele, 'Mover'): beam_out.x = beamadjust.ReAdjustBeamWithMover(ele, beam_out) if sum(offset**2)>0: beam_out.x = self._ReAdjustBeamByLineOffset( ele, beam_out, offset ) prog.updateAmount(self.index(ele)) print prog, "\r", return beam_out def _AdjustBeamByLineOffset(self, ele, beam_out, offset): """Correct the beam position by the offset specified for the entire beamline before the call to Track()""" r_in = RotMats(-offset[5])[0] line_length = self[-1].S - self[0].S dist_along_line = ele.S - self[0].S dist_normed = dist_along_line - (line_length/2) # dist from line centre delta_x = (dist_normed * offset[1]) + offset[0] delta_y = (dist_normed * offset[3]) + offset[2] delta_xp = offset[1] delta_yp = offset[3] beam_out.x[0, :] -= delta_x beam_out.x[1, :] -= delta_xp beam_out.x[2, :] -= delta_y beam_out.x[3, :] -= delta_yp beam_out.x = np.dot(r_in, beam_out.x) return beam_out.x def _ReAdjustBeamByLineOffset(self, ele, beam_out, offset): """Reset the beam position by the offset specified for the entire beamline after the call to Track()""" r_out = RotMats(-offset[5])[1] line_length = self[-1].S - self[0].S dist_along_line = ele.S - self[0].S dist_normed = dist_along_line - (line_length/2) # dist from line centre delta_x = (dist_normed * offset[1]) + offset[0] delta_y = (dist_normed * offset[3]) + offset[2] delta_xp = offset[1] delta_yp = offset[3] beam_out.x[0, :] += delta_x beam_out.x[1, :] += delta_xp beam_out.x[2, :] += delta_y beam_out.x[3, :] += delta_yp beam_out.x = np.dot(r_out, beam_out.x) return beam_out.x def SetSPos(self, ini_s=0): """Sets the longitudinal position of each element based on an initial value that defines the location of the upstream end of the first element (ini_s), and the length of each subsequent element.""" cum_s = ini_s for i in self: if i.__class__.__name__ == 'Serpentine': i.beamline.SetSPos(ini_s=cum_s) if i.beamline[-1].__class__.__name__ != 'Serpentine': cum_s = i.beamline[-1].S+i.beamline[-1].L continue i.S = cum_s cum_s += i.L def TwissProp(self, ini_twiss): """Propagates an initial twiss object ('ini_twiss') through the lattice. For each element, the twiss calculated at its downstream end are stored as an attribute of that element. The twiss output at the end of the lattice are returned from this function.""" sum_phix, sum_phiy = 0, 0 final_twiss = copy.deepcopy(ini_twiss) finalgammax = (1+ini_twiss.alphax**2) / ini_twiss.betax finalgammay = (1+ini_twiss.alphay**2) / ini_twiss.betay for ele in self: ele.twiss = copy.deepcopy(final_twiss) if ele.__class__.__name__ == 'Serpentine': ele.TwissProp() continue det_x = np.linalg.det(ele.R[0:2, 0:2]) det_y = np.linalg.det(ele.R[2:4, 2:4]) deltaphix = np.arctan2(ele.R[0, 1] , \ (final_twiss.betax*ele.R[0, 0] - final_twiss.alphax*ele.R[0, 1])) deltaphiy = np.arctan2(ele.R[2, 3] , \ (final_twiss.betay*ele.R[2, 2] - final_twiss.alphay*ele.R[2, 3])) sum_phix += deltaphix sum_phiy += deltaphiy betax = final_twiss.betax alphax = final_twiss.alphax gammax = finalgammax betay = final_twiss.betay alphay = final_twiss.alphay gammay = finalgammay final_twiss.betax = ( (ele.R[0, 0]**2 * betax) + (-2*ele.R[0, 0]*ele.R[0, 1] * alphax) + (ele.R[0, 1]**2 * gammax) ) / det_x final_twiss.alphax = ( (-ele.R[0, 0]*ele.R[1, 0] * betax) + ((ele.R[0, 0]*ele.R[1, 1] + ele.R[0, 1]*ele.R[1, 0]) * alphax) + (-ele.R[0, 1]*ele.R[1, 1] * gammax) ) / det_x finalgammax = (1 + final_twiss.alphax**2) / final_twiss.betax final_twiss.betay = ( (ele.R[2, 2]**2 * betay) + (-2*ele.R[2, 2]*ele.R[2, 3] * alphay) + (ele.R[2, 3]**2 * gammay) ) / det_y final_twiss.alphay = ( (-ele.R[2, 2]*ele.R[3, 2] * betay) + ((ele.R[2, 2]*ele.R[3, 3] + ele.R[2, 3]*ele.R[3, 2]) * alphay) + (-ele.R[2, 3]*ele.R[3, 3] * gammay) ) / det_y finalgammay = (1 + final_twiss.alphay**2) / final_twiss.betay etax = final_twiss.etax etaxp = final_twiss.etaxp etay = final_twiss.etay etayp = final_twiss.etayp final_twiss.etax = ele.R[0,0]*etax+ele.R[0,1]*etaxp+ele.R[0,5] final_twiss.etaxp = ele.R[1,0]*etax+ele.R[1,1]*etaxp+ele.R[1,5] final_twiss.etay = ele.R[2,2]*etay+ele.R[2,3]*etayp+ele.R[2,5] final_twiss.etayp = ele.R[3,2]*etay+ele.R[3,3]*etayp+ele.R[3,5] final_twiss.phix = sum_phix final_twiss.phiy = sum_phiy return final_twiss def ZeroCors(self): """Sets the field of all correctors in the lattice to zero. This is useful for reverting to the default lattice after a steering operation has been performed.""" import elements for ele in self: if (type(ele) == elements.Xcor or type(ele) == elements.Ycor or type(ele) == elements.XYcor): ele.B = 0 def SingleRmat(self, i): """Returns the already calculated R-matrix for beamline[i]. i.e. it returns beamline[i].R.""" return self[i].R def GetMomProfile(self): """Returns the momentum profile of the reference particle""" spos = [ele.S for ele in self] mom = [ele.P for ele in self] return (spos, mom) def GetEkProfile(self, restmass): """Returns the kinetic energy profile of the reference particle""" spos = [ele.S for ele in self] kenergy = [np.sqrt(ele.P**2+restmass**2)-restmass for ele in self] return (spos, kenergy) def GetRFPhases(self): """Returns the RF phases of the AccCav objects in beamline.""" acccavs = self.GetEleByType('AccCav') return [ele.phi for ele in acccavs] def XRmat(self, ind=0): """Print the 2x2 block of the R matrix corresponding to the horizontal transverse space. 'ind' is the element for which the value is printed.""" print self[ind].name + " x matrix:" print self[ind].R[0:2, 0:2] def YRmat(self, ind=0): """Print the 2x2 block of the R matrix corresponding to the vertical transverse space. 'ind' is the element for which the value is printed.""" print self[ind].name + " y matrix:" print self[ind].R[2:4, 2:4] def LongRmat(self, ind=0): """Print the 2x2 block of the R matrix corresponding to the longitudinal space. 'ind' is the element for which the value is printed.""" print self[ind].name + " longitudinal matrix:" print self[ind].R[4:6, 4:6] def GetTwiss(self): """Returns a dictionary object containing the Twiss paramters calculated for the beamline.""" twiss_dict = {} twiss_dict['S'] = [] twiss_dict['betax'] = [] twiss_dict['betay'] = [] twiss_dict['alphax'] = [] twiss_dict['alphay'] = [] twiss_dict['phix'] = [] twiss_dict['phiy'] = [] twiss_dict['etax'] = [] twiss_dict['etay'] = [] twiss_dict['etaxp'] = [] twiss_dict['etayp'] = [] for ele in self: if ele.__class__.__name__ == 'Serpentine': subtwiss_dict = ele.beamline.GetTwiss() twiss_dict['S'].extend(subtwiss_dict['S']) twiss_dict['betax'].extend(subtwiss_dict['betax']) twiss_dict['betay'].extend(subtwiss_dict['betay']) twiss_dict['alphax'].extend(subtwiss_dict['alphax']) twiss_dict['alphay'].extend(subtwiss_dict['alphay']) twiss_dict['phix'].extend(subtwiss_dict['phix']) twiss_dict['phiy'].extend(subtwiss_dict['phiy']) twiss_dict['etax'].extend(subtwiss_dict['etax']) twiss_dict['etay'].extend(subtwiss_dict['etay']) twiss_dict['etaxp'].extend(subtwiss_dict['etaxp']) twiss_dict['etayp'].extend(subtwiss_dict['etayp']) else: twiss_dict['S'].append(ele.S) twiss_dict['betax'].append(ele.twiss.betax) twiss_dict['betay'].append(ele.twiss.betay) twiss_dict['alphax'].append(ele.twiss.alphax) twiss_dict['alphay'].append(ele.twiss.alphay) twiss_dict['phix'].append(ele.twiss.phix) twiss_dict['phiy'].append(ele.twiss.phiy) twiss_dict['etax'].append(ele.twiss.etax) twiss_dict['etay'].append(ele.twiss.etay) twiss_dict['etaxp'].append(ele.twiss.etaxp) twiss_dict['etayp'].append(ele.twiss.etayp) return twiss_dict class ProgressBar: """A class to display a progress bar when tracking through a beamline.""" def __init__(self, minvalue = 0, maxvalue = 10, totalwidth=12): self.progbar = "[]" # This holds the progress bar string self.min = minvalue self.max = maxvalue self.span = maxvalue - minvalue self.width = totalwidth self.amount = 0 # When amount == max, we are 100% done self.progbar = "" self.percentdone = 0 self.updateAmount(0) # Build progress bar string def updateAmount(self, new_amount = 0): """Calculate the percentage compled, and update the progbar string.""" if new_amount < self.min: new_amount = self.min if new_amount > self.max: new_amount = self.max self.amount = new_amount self.percentDone() self.makestr() def percentDone(self): """Figure out the new percent done, round to an integer""" difffrommin = float(self.amount - self.min) percentdone = (difffrommin / float(self.span)) * 100.0 self.percentdone = int(round(percentdone)) def makestr(self): """Figure out how many hash bars the percentage should be""" allfull = self.width - 2 numhashes = (self.percentdone / 100.0) * allfull numhashes = int(round(numhashes)) # build a progress bar with hashes and spaces self.progbar = "[" + '#'*numhashes + ' '*(allfull-numhashes) + "]" # figure out where to put the percentage, roughly centered percentplace = (len(self.progbar) / 2) - len(str(self.percentdone)) percentstring = str(self.percentdone) + "%" # slice the percentage into the bar self.progbar = self.progbar[0:percentplace] + percentstring + \ self.progbar[percentplace+len(percentstring):] def __str__(self): return str(self.progbar) def fixborkedlist(borkedlist): """A method to repair the broken lists returned by the find methods. This function should not be called by users.""" buildlist = list() if isinstance(borkedlist, int): return borkedlist for i in borkedlist: if isinstance(i, int): buildlist.append(i) else: newlist = fixborkedlist(i) for newi in newlist: buildlist.append(newi) return buildlist # A test suite if __name__ == '__main__': from elements import Drift, Quad import beamrep import matplotlib.pylab as plt Shortline = Line() Shortline.append(Drift(name='ele1', L=0.75)) Shortline.append(Quad(name='ele2', L=0.25, B=5)) Shortline.append(Drift(name='ele3', L=1)) Shortline.append(Quad(name='ele4', L=0.25, B=-5)) beamline = Shortline * 5 # print "="*20 # print " SingleRmat" # print "="*20 # for i in range(0, len(beamline)): # print "%s: " % i, # print SingleRmat(beamline, i) # print # print "="*20 # print " RmatAtoB" # print "="*20 # for i in range(0, len(beamline)): # print "%s: " % i, # print RmatAtoB(beamline, 0, i) print print "="*20 print " TwissProp" print "="*20 i_twiss = {} i_twiss['betax'] = 1 i_twiss['alphax'] = 0 i_twiss['betay'] = 2 i_twiss['alphay'] = 0 f_twiss = beamline.TwissProp(i_twiss) plt.figure(1) beamline.PlotTwiss(f_twiss, ax=1, ay=1, px=1, py=1) print "Assigning beam..." beamin = beamrep.GaussBeam(N=1e4) print "Starting tracking..." # profile.run('beamout = elements.Tracking(beamin)') # profile.run('beamout = beamline.Track(beamin)') beamout = beamline.Track(beamin) print "Done. Now printing figures." plt.figure(2) plt.subplot(121) plt.plot(beamin.x[0, :], beamin.x[1, :], 'bx') plt.subplot(121) plt.plot(beamout.x[0, :], beamout.x[1, :], 'r.') plt.subplot(122) plt.plot(beamin.x[2, :], beamin.x[3, :], 'bx') plt.subplot(122) plt.plot(beamout.x[2, :], beamout.x[3, :], 'r.') plt.show()
gpl-3.0
1,009,788,563,410,026,200
38.152727
81
0.555029
false
3.439387
false
false
false
hansomesong/TracesAnalyzer
Plot/Plot_newSize/Plot_variable_VP_scatter_RLOCs.py
1
4458
__author__ = 'yueli' import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl from config.config import * # Import the targeted raw CSV file rawCSV_file1 = os.path.join( CSV_FILE_DESTDIR, 'For_different_5_VP', 'Deleted_database', 'EID-153.16.47.16-MR-198.6.255.37', "liege-EID-153.16.47.16-MR-198.6.255.37.log.csv" ) rawCSV_file2 = os.path.join( CSV_FILE_DESTDIR, 'For_different_5_VP', 'Deleted_database', 'EID-153.16.47.16-MR-198.6.255.37', "temple-EID-153.16.47.16-MR-198.6.255.37.log.csv" ) rawCSV_file3 = os.path.join( CSV_FILE_DESTDIR, 'For_different_5_VP', 'Deleted_database', 'EID-153.16.47.16-MR-198.6.255.37', "ucl-EID-153.16.47.16-MR-198.6.255.37.log.csv" ) rawCSV_file4 = os.path.join( CSV_FILE_DESTDIR, 'For_different_5_VP', 'Deleted_database', 'EID-153.16.47.16-MR-198.6.255.37', "umass-EID-153.16.47.16-MR-198.6.255.37.log.csv" ) rawCSV_file5 = os.path.join( CSV_FILE_DESTDIR, 'For_different_5_VP', 'Deleted_database', 'EID-153.16.47.16-MR-198.6.255.37', "wiilab-EID-153.16.47.16-MR-198.6.255.37.log.csv" ) # Define a function to get the experiment number list from the CSV file def getTime(rawCSV_file): i = -1 for line in open(rawCSV_file): i = i + 1 lines = line.split(";") if lines[0] == "Round Type": continue else: time.append(i) return time def getRlocSet(rawCSV_file): i = -1 responseList = [] for line in open(rawCSV_file): print line i = i + 1 lines = line.split(";") if lines[0] == "Round Type": print "Round Type" continue else: if lines[0] == "NegativeReply": print "Done" responseList.append(-1) elif lines[0] == "RoundNoReply": responseList.append(0) elif lines[0] == "RoundNormal": if int(lines[9]) == 1: if lines[14].split(",")[1] == "195.59.156.123": responseList.append(1) elif lines[14].split(",")[1] == "195.59.156.124": responseList.append(2) else: responseList.append(3) else: print "There are more than 2 RLOCs together" else: print "Unknown type exists" return responseList time = [] time = getTime(rawCSV_file1) print "time", time rlocSet1 = getRlocSet(rawCSV_file1) print "rlocSet1:", rlocSet1.__len__() rlocSet2= getRlocSet(rawCSV_file2) print "rlocSet2:", rlocSet2.__len__() rlocSet3 = getRlocSet(rawCSV_file3) print "rlocSet3:", rlocSet3.__len__() rlocSet4 = getRlocSet(rawCSV_file4) print "rlocSet4:", rlocSet4.__len__() rlocSet5 = getRlocSet(rawCSV_file5) print "rlocSet5:", rlocSet5.__len__() # Modify the size and dpi of picture, default size is (8,6), default dpi is 80 plt.gcf().set_size_inches(32, 17) # Define font font_label = { 'fontname' : 'Times New Roman', 'color' : 'black', 'fontsize' : 70 } plt.scatter(time, rlocSet1, color='purple', marker="o", label="VP1", s=700) plt.scatter(time, rlocSet2, color='green', marker='>', label="VP2", s=700) plt.scatter(time, rlocSet3, color='red', marker=(5,0), label = "VP3", s=700) plt.scatter(time, rlocSet4, color='orange', marker='*', label = "VP4", s=700) plt.scatter(time, rlocSet5, color='blue', marker='+', label = "VP5", s=700) response = np.linspace(-1, 2, 4) plt.xlabel("experiment numbers", font_label) plt.ylabel("different Map-Replies", font_label) # plt.title("Map Replies over time for EID-153.16.47.16 from MR-198.6.255.37 in 5 VPs", fontsize=20) plt.xlim(0,798) # plt.xlim(550, 600) plt.ylim(-2, 3) plt.xticks(fontsize=45, fontname='Times New Roman') plt.yticks(response, ('Negative\nMap-Reply', 'No Map-\nReply', 'RLOC 1', 'RLOC 2'), fontsize=45, fontname='Times New Roman') # loc=1 makes legend locating at right-up; # loc=2 makes legend locating at left-up; # loc=3 makes legend locating at left-down # loc=4 makes legend locating at right-down # Just have one point in legend mpl.rc('legend', scatterpoints=1) mpl.rc('legend', fontsize=45) mpl.rc('legend', markerscale=1.5) plt.legend(loc=4) plt.savefig( os.path.join(PLOT_DIR, 'Plot_newSize', 'Plot_variable_VP_different_RLOCs.eps'), dpi=300, transparent=True) # plt.show()
gpl-2.0
-854,535,159,552,120,600
28.926174
124
0.606999
false
2.826886
false
false
false
xsixing/blaze
blaze/io/sql/ops.py
1
3865
"""SQL implementations of element-wise ufuncs.""" from __future__ import absolute_import, division, print_function from ...compute.function import function, kernel from ...compute.ops import ufuncs from .kernel import sql_kernel, SQL from .syntax import Call, Expr, QOrderBy, QWhere, And, Or, Not def sqlfunction(signature): def decorator(f): blaze_func = function(signature)(f) kernel(blaze_func, SQL, f, signature) return blaze_func return decorator def define_unop(signature, name, op): """Define a unary sql operator""" def unop(x): return Expr([op, x]) unop.__name__ = name _implement(unop, signature) return unop def define_binop(signature, name, op): """Define a binary sql operator""" def binop(a, b): return Expr([a, op, b]) binop.__name__ = name _implement(binop, signature) return binop def _implement(f, signature): name = f.__name__ blaze_func = getattr(ufuncs, name) #print("implement", f, signature, blaze_func) sql_kernel(blaze_func, f, signature) # Arithmetic add = define_binop("(A... * T, A... * T) -> A... * T", "add", "+") multiply = define_binop("(A... * T, A... * T) -> A... * T", "multiply", "*") subtract = define_binop("(A... * T, A... * T) -> A... * T", "subtract", "-") floordiv = define_binop("(A... * T, A... * T) -> A... * T", "floor_divide", "/") divide = define_binop("(A... * T, A... * T) -> A... * T", "divide", "/") truediv = define_binop("(A... * T, A... * T) -> A... * T", "true_divide", "/") mod = define_binop("(A... * T, A... * T) -> A... * T", "mod", "%") negative = define_unop("(A... * T) -> A... * T", "negative", "-") # Compare eq = define_binop("(A... * T, A... * T) -> A... * bool", "equal", "==") ne = define_binop("(A... * T, A... * T) -> A... * bool", "not_equal", "!=") lt = define_binop("(A... * T, A... * T) -> A... * bool", "less", "<") le = define_binop("(A... * T, A... * T) -> A... * bool", "less_equal", "<=") gt = define_binop("(A... * T, A... * T) -> A... * bool", "greater", ">") ge = define_binop("(A... * T, A... * T) -> A... * bool", "greater_equal", ">=") # Logical logical_and = define_binop("(A... * bool, A... * bool) -> A... * bool", "logical_and", "AND") logical_or = define_binop("(A... * bool, A... * bool) -> A... * bool", "logical_or", "OR") logical_not = define_unop("(A... * bool) -> A... * bool", "logical_not", "NOT") def logical_xor(a, b): # Potential exponential code generation... return And(Or(a, b), Not(And(a, b))) kernel(ufuncs.logical_xor, SQL, logical_xor, "(A... * bool, A... * bool) -> A... * bool") # SQL Functions @sqlfunction('(A * DType) -> DType') def sum(col): return Call('SUM', [col]) @sqlfunction('(A * DType) -> DType') def avg(col): return Call('AVG', [col]) @sqlfunction('(A * DType) -> DType') def min(col): return Call('MIN', [col]) @sqlfunction('(A * DType) -> DType') def max(col): return Call('MAX', [col]) # SQL Join, Where, Group by, Order by def merge(left, right, how='left', on=None, left_on=None, right_on=None, left_index=False, right_index=False, sort=True): """ Join two tables. """ raise NotImplementedError def index(col, index, order=None): """ Index a table or column with a predicate. view = merge(table1, table2) result = view[table1.id == table2.id] or avg(table1.age[table1.state == 'TX']) """ result = sqlindex(col, index) if order: result = sqlorder(result, order) return result @sqlfunction('(A * S, A * B) -> var * S') def sqlindex(col, where): return QWhere(col, where) @sqlfunction('(A * S, A * B) -> A * S') def sqlorder(col, by): if not isinstance(by, (tuple, list)): by = [by] return QOrderBy(col, by)
bsd-3-clause
-2,212,009,295,683,632,400
28.280303
80
0.542044
false
2.989172
false
false
false
dbiesecke/dbiesecke.github.io
repo/script.module.urlresolver/lib/urlresolver/plugins/alldebrid.py
1
14611
""" urlresolver Kodi Addon This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import re from urllib import quote_plus from urllib2 import HTTPError import json from urlresolver import common from urlresolver.common import i18n from urlresolver.resolver import UrlResolver, ResolverError logger = common.log_utils.Logger.get_logger(__name__) logger.disable() AGENT = 'URLResolver for Kodi' VERSION = common.addon_version USER_AGENT = '%s/%s' % (AGENT, VERSION) FORMATS = common.VIDEO_FORMATS api_url = 'https://api.alldebrid.com' class AllDebridResolver(UrlResolver): name = "AllDebrid" domains = ['*'] def __init__(self): self.net = common.Net() self.hosters = None self.hosts = None self.headers = {'User-Agent': USER_AGENT} def get_media_url(self, host, media_id, cached_only=False): try: if media_id.lower().startswith('magnet:'): r = re.search('''magnet:.+?urn:([a-zA-Z0-9]+):([a-zA-Z0-9]+)''', media_id, re.I) if r: _hash, _format = r.group(2), r.group(1) if self.__check_cache(_hash): logger.log_debug('AllDebrid: BTIH %s is readily available to stream' % _hash) transfer_id = self.__create_transfer(_hash) else: if self.get_setting('cached_only') == 'true' or cached_only: raise ResolverError('AllDebrid: Cached torrents only allowed to be initiated') else: transfer_id = self.__create_transfer(_hash) self.__initiate_transfer(transfer_id) transfer_info = self.__list_transfer(transfer_id) for _link, _file in transfer_info.get('links').items(): if any(_file.lower().endswith(x) for x in FORMATS): media_id = _link.replace("\/", "/") break self.__delete_transfer(transfer_id) url = '%s/link/unlock?agent=%s&version=%s&token=%s&link=%s' % (api_url, quote_plus(AGENT), quote_plus(VERSION), self.get_setting('token'), media_id) result = self.net.http_GET(url, headers=self.headers).content except HTTPError as e: try: js_result = json.loads(e.read()) if 'error' in js_result: msg = '%s (%s)' % (js_result.get('error'), js_result.get('errorCode')) else: msg = 'Unknown Error (1)' except: msg = 'Unknown Error (2)' raise ResolverError('AllDebrid Error: %s (%s)' % (msg, e.code)) else: js_result = json.loads(result) logger.log_debug('AllDebrid resolve: [%s]' % js_result) if 'error' in js_result: raise ResolverError('AllDebrid Error: %s (%s)' % (js_result.get('error'), js_result.get('errorCode'))) elif js_result.get('success', False): if js_result.get('infos').get('link'): return js_result.get('infos').get('link') raise ResolverError('AllDebrid: no stream returned') def __check_cache(self, media_id): try: url = '%s/magnet/instant?agent=%s&version=%s&token=%s&magnet=%s' % (api_url, quote_plus(AGENT), quote_plus(VERSION), self.get_setting('token'), media_id) result = self.net.http_GET(url, headers=self.headers).content result = json.loads(result) if result.get('success', False): response = result.get('instant', False) return response except: pass return False def __list_transfer(self, transfer_id): try: url = '%s/magnet/status?agent=%s&version=%s&token=%s&id=%s' % (api_url, quote_plus(AGENT), quote_plus(VERSION), self.get_setting('token'), transfer_id) response = self.net.http_GET(url, headers=self.headers).content result = json.loads(response) if result.get('success', False): return result except: pass return {} def __create_transfer(self, media_id): try: url = '%s/magnet/upload?agent=%s&version=%s&token=%s&magnet=%s' % (api_url, quote_plus(AGENT), quote_plus(VERSION), self.get_setting('token'), media_id) response = self.net.http_GET(url, headers=self.headers).content result = json.loads(response) if result.get('success', False): logger.log_debug('Transfer successfully started to the AllDebrid cloud') return result.get('id', "") except: pass return "" def __initiate_transfer(self, transfer_id, interval=5): try: transfer_info = self.__list_transfer(transfer_id) if transfer_info: line1 = transfer_info.get('filename') line2 = 'Saving torrent to UptoBox via AllDebrid' line3 = transfer_info.get('status') with common.kodi.ProgressDialog('Resolve URL AllDebrid Transfer', line1, line2, line3) as pd: while not transfer_info.get('statusCode') == 4: common.kodi.sleep(1000 * interval) transfer_info = self.__list_transfer(transfer_id) file_size = transfer_info.get('size') line1 = transfer_info.get('filename') if transfer_info.get('statusCode') == 1: download_speed = round(float(transfer_info.get('downloadSpeed')) / (1000**2), 2) progress = int(float(transfer_info.get('downloaded')) / file_size * 100) if file_size > 0 else 0 line3 = "Downloading at %s MB/s from %s peers, %s%% of %sGB completed" % (download_speed, transfer_info.get('seeders'), progress, round(float(file_size) / (1000 ** 3), 2)) elif transfer_info.get('statusCode') == 3: upload_speed = round(float(transfer_info.get('uploadSpeed')) / (1000 ** 2), 2) progress = int(float(transfer_info.get('uploaded')) / file_size * 100) if file_size > 0 else 0 line3 = "Uploading at %s MB/s, %s%% of %s GB completed" % (upload_speed, progress, round(float(file_size) / (1000 ** 3), 2)) else: line3 = transfer_info.get('status') progress = 0 logger.log_debug(line3) pd.update(progress, line1=line1, line3=line3) if pd.is_canceled(): self.__delete_transfer(transfer_id) # self.__delete_folder() raise ResolverError('Transfer ID %s :: Canceled by user' % transfer_id) elif 5 <= transfer_info.get('statusCode') <= 10: self.__delete_transfer(transfer_id) # self.__delete_folder() raise ResolverError('Transfer ID %s :: %s' % (transfer_id, transfer_info.get('status'))) common.kodi.sleep(1000 * interval) # allow api time to generate the links return except Exception as e: self.__delete_transfer(transfer_id) raise ResolverError('Transfer ID %s :: %s' % (transfer_id, e)) def __delete_transfer(self, transfer_id): try: url = '%s/magnet/delete?agent=%s&version=%s&token=%s&id=%s' % (api_url, quote_plus(AGENT), quote_plus(VERSION), self.get_setting('token'), transfer_id) response = self.net.http_GET(url, headers=self.headers).content result = json.loads(response) if result.get('success', False): logger.log_debug('Transfer ID "%s" deleted from the AllDebrid cloud' % transfer_id) return True except: pass return False def get_url(self, host, media_id): return media_id def get_host_and_id(self, url): return 'www.alldebrid.com', url @common.cache.cache_method(cache_limit=8) def get_all_hosters(self): hosters = [] url = '%s/user/hosts?agent=%s&version=%s&token=%s' % (api_url, quote_plus(AGENT), quote_plus(VERSION), self.get_setting('token')) try: js_result = self.net.http_GET(url, headers=self.headers).content js_data = json.loads(js_result) if js_data.get('success', False): regexes = [value.get('regexp').replace('\/', '/') for key, value in js_data.get('hosts', {}).iteritems() if value.get('status', False)] logger.log_debug('AllDebrid hosters : %s' % regexes) hosters = [re.compile(regex) for regex in regexes] else: logger.log_error('Error getting AD Hosters') except Exception as e: logger.log_error('Error getting AD Hosters: %s' % e) return hosters @common.cache.cache_method(cache_limit=8) def get_hosts(self): hosts = [] url = '%s/hosts/domains' % api_url try: js_result = self.net.http_GET(url, headers=self.headers).content js_data = json.loads(js_result) if js_data.get('success', False): hosts = [host.replace('www.', '') for host in js_data.get('hosts', [])] if self.get_setting('torrents') == 'true': hosts.extend([u'torrent', u'magnet']) logger.log_debug('AllDebrid hosts : %s' % hosts) else: logger.log_error('Error getting AD Hosters') except Exception as e: logger.log_error('Error getting AD Hosts: %s' % e) return hosts def valid_url(self, url, host): logger.log_debug('in valid_url %s : %s' % (url, host)) if url: if url.lower().startswith('magnet:') and self.get_setting('torrents') == 'true': return True if self.hosters is None: self.hosters = self.get_all_hosters() for regexp in self.hosters: # logger.log_debug('AllDebrid checking host : %s' %str(regexp)) if re.search(regexp, url): logger.log_debug('AllDebrid Match found') return True elif host: if self.hosts is None: self.hosts = self.get_hosts() if any(host in item for item in self.hosts): return True return False # SiteAuth methods def login(self): if not self.get_setting('token'): self.authorize_resolver() def reset_authorization(self): self.set_setting('token', '') def authorize_resolver(self): url = '%s/pin/get?agent=%s&version=%s' % (api_url, quote_plus(AGENT), quote_plus(VERSION)) js_result = self.net.http_GET(url, headers=self.headers).content js_data = json.loads(js_result) line1 = 'Go to URL: %s' % (js_data.get('base_url').replace('\/', '/')) line2 = 'When prompted enter: %s' % (js_data.get('pin')) with common.kodi.CountdownDialog('Resolve Url All Debrid Authorization', line1, line2, countdown=js_data.get('expired_in', 120)) as cd: result = cd.start(self.__check_auth, [js_data.get('check_url').replace('\/', '/')]) # cancelled if result is None: return return self.__get_token(js_data.get('check_url').replace('\/', '/')) def __get_token(self, url): try: js_result = self.net.http_GET(url, headers=self.headers).content js_data = json.loads(js_result) if js_data.get("success", False): token = js_data.get('token', '') logger.log_debug('Authorizing All Debrid Result: |%s|' % token) self.set_setting('token', token) return True except Exception as e: logger.log_debug('All Debrid Authorization Failed: %s' % e) return False def __check_auth(self, url): activated = False try: js_result = self.net.http_GET(url, headers=self.headers).content js_data = json.loads(js_result) if js_data.get("success", False): activated = js_data.get('activated', False) except Exception as e: logger.log_debug('Exception during AD auth: %s' % e) return activated @classmethod def get_settings_xml(cls): xml = super(cls, cls).get_settings_xml() # xml.append('<setting id="%s_autopick" type="bool" label="%s" default="false"/>' % (cls.__name__, i18n('auto_primary_link'))) xml.append('<setting id="%s_torrents" type="bool" label="%s" default="true"/>' % (cls.__name__, i18n('torrents'))) xml.append('<setting id="%s_cached_only" enable="eq(-1,true)" type="bool" label="%s" default="false" />' % (cls.__name__, i18n('cached_only'))) xml.append('<setting id="%s_auth" type="action" label="%s" action="RunPlugin(plugin://script.module.urlresolver/?mode=auth_ad)"/>' % (cls.__name__, i18n('auth_my_account'))) xml.append('<setting id="%s_reset" type="action" label="%s" action="RunPlugin(plugin://script.module.urlresolver/?mode=reset_ad)"/>' % (cls.__name__, i18n('reset_my_auth'))) xml.append('<setting id="%s_token" visible="false" type="text" default=""/>' % cls.__name__) return xml @classmethod def _is_enabled(cls): return cls.get_setting('enabled') == 'true' and cls.get_setting('token') @classmethod def isUniversal(self): return True
mit
5,094,376,378,971,055,000
45.237342
199
0.549038
false
3.891079
false
false
false
ireapps/census
dataprocessing/load_sf_data_2010.py
1
1395
#!/usr/bin/env python import sys from csvkit.unicsv import UnicodeCSVReader from pymongo import objectid import config import utils if len(sys.argv) < 2: sys.exit('You must provide the filename of a CSV as an argument to this script.') FILENAME = sys.argv[1] YEAR = '2010' collection = utils.get_geography_collection() with open(FILENAME) as f: rows = UnicodeCSVReader(f) headers = rows.next() updates = 0 row_count = 0 for row in rows: row_count += 1 row_dict = dict(zip(headers, row)) xref = utils.xref_from_row_dict(row_dict) geography = utils.find_geography_by_xref(collection, xref, fields=['data']) if not geography: continue if YEAR not in geography['data']: geography['data'][YEAR] = {} tables = {} for k, v in row_dict.items(): # Format table names to match labels t = utils.parse_table_from_key(k) if t not in tables: tables[t] = {} tables[t][k] = v for k, v in tables.items(): geography['data'][YEAR][k] = v collection.update({ '_id': objectid.ObjectId(geography['_id']) }, { '$set': { 'data': geography['data'] } }, safe=True) updates += 1 print "File: %s" % FILENAME print ' Row count: %i' % row_count print ' Updated: %i' % updates
mit
-8,988,254,867,123,021,000
22.25
127
0.573477
false
3.531646
false
false
false
Joergen/zamboni
mkt/reviewers/helpers.py
1
6578
import datetime import urlparse from django.utils.encoding import smart_str import jinja2 import waffle from jingo import register from tower import ugettext as _, ugettext_lazy as _lazy from access import acl from amo.helpers import impala_breadcrumbs from amo.urlresolvers import reverse from mkt.developers.helpers import mkt_page_title from mkt.reviewers.utils import (AppsReviewing, clean_sort_param, create_sort_link, device_queue_search) @register.function @jinja2.contextfunction def reviewers_breadcrumbs(context, queue=None, items=None): """ Wrapper function for ``breadcrumbs``. Prepends 'Editor Tools' breadcrumbs. **queue** Explicit queue type to set. **items** list of [(url, label)] to be inserted after Add-on. """ crumbs = [(reverse('reviewers.home'), _('Reviewer Tools'))] if queue: queues = {'pending': _('Apps'), 'rereview': _('Re-reviews'), 'updates': _('Updates'), 'escalated': _('Escalations'), 'device': _('Device'), 'moderated': _('Moderated Reviews'), 'reviewing': _('Reviewing'), 'pending_themes': _('Pending Themes'), 'flagged_themes': _('Flagged Themes'), 'rereview_themes': _('Update Themes')} if items: url = reverse('reviewers.apps.queue_%s' % queue) else: # The Addon is the end of the trail. url = None crumbs.append((url, queues[queue])) if items: crumbs.extend(items) return impala_breadcrumbs(context, crumbs, add_default=True) @register.function @jinja2.contextfunction def reviewers_page_title(context, title=None, addon=None): if addon: title = u'%s | %s' % (title, addon.name) else: section = _lazy('Reviewer Tools') title = u'%s | %s' % (title, section) if title else section return mkt_page_title(context, title) @register.function @jinja2.contextfunction def queue_tabnav(context): """ Returns tuple of tab navigation for the queue pages. Each tuple contains three elements: (named_url. tab_code, tab_text) """ request = context['request'] counts = context['queue_counts'] apps_reviewing = AppsReviewing(request).get_apps() # Apps. if acl.action_allowed(request, 'Apps', 'Review'): rv = [ ('reviewers.apps.queue_pending', 'pending', _('Apps ({0})', counts['pending']).format(counts['pending'])), ('reviewers.apps.queue_rereview', 'rereview', _('Re-reviews ({0})', counts['rereview']).format( counts['rereview'])), ('reviewers.apps.queue_updates', 'updates', _('Updates ({0})', counts['updates']).format(counts['updates'])), ] if acl.action_allowed(request, 'Apps', 'ReviewEscalated'): rv.append(('reviewers.apps.queue_escalated', 'escalated', _('Escalations ({0})', counts['escalated']).format( counts['escalated']))) rv.extend([ ('reviewers.apps.queue_moderated', 'moderated', _('Moderated Reviews ({0})', counts['moderated']) .format(counts['moderated'])), ('reviewers.apps.apps_reviewing', 'reviewing', _('Reviewing ({0})').format(len(apps_reviewing))), ]) else: rv = [] if waffle.switch_is_active('buchets') and 'pro' in request.GET: device_srch = device_queue_search(request) rv.append(('reviewers.apps.queue_device', 'device', _('Device ({0})').format(device_srch.count()),)) return rv @register.function @jinja2.contextfunction def logs_tabnav(context): """ Returns tuple of tab navigation for the log pages. Each tuple contains three elements: (named url, tab_code, tab_text) """ rv = [ ('reviewers.apps.logs', 'apps', _('Reviews')) ] return rv @register.function @jinja2.contextfunction def logs_tabnav_themes(context): """ Returns tuple of tab navigation for the log pages. Each tuple contains three elements: (named url, tab_code, tab_text) """ rv = [ ('reviewers.themes.logs', 'themes', _('Reviews')) ] if acl.action_allowed(context['request'], 'SeniorPersonasTools', 'View'): rv.append(('reviewers.themes.deleted', 'deleted', _('Deleted'))) return rv @register.function @jinja2.contextfunction def queue_tabnav_themes(context): """Similar to queue_tabnav, but for themes.""" tabs = [] if acl.action_allowed(context['request'], 'Personas', 'Review'): tabs.append(( 'reviewers.themes.list', 'pending_themes', _('Pending'), )) if acl.action_allowed(context['request'], 'SeniorPersonasTools', 'View'): tabs.append(( 'reviewers.themes.list_flagged', 'flagged_themes', _('Flagged'), )) tabs.append(( 'reviewers.themes.list_rereview', 'rereview_themes', _('Updates'), )) return tabs @register.function @jinja2.contextfunction def queue_tabnav_themes_interactive(context): """Tabnav for the interactive shiny theme queues.""" tabs = [] if acl.action_allowed(context['request'], 'Personas', 'Review'): tabs.append(( 'reviewers.themes.queue_themes', 'pending', _('Pending'), )) if acl.action_allowed(context['request'], 'SeniorPersonasTools', 'View'): tabs.append(( 'reviewers.themes.queue_flagged', 'flagged', _('Flagged'), )) tabs.append(( 'reviewers.themes.queue_rereview', 'rereview', _('Updates'), )) return tabs @register.function @jinja2.contextfunction def sort_link(context, pretty_name, sort_field): """Get table header sort links. pretty_name -- name displayed on table header sort_field -- name of get parameter, referenced to in views """ request = context['request'] sort, order = clean_sort_param(request) # Copy search/filter GET parameters. get_params = [(k, v) for k, v in urlparse.parse_qsl(smart_str(request.META['QUERY_STRING'])) if k not in ('sort', 'order')] return create_sort_link(pretty_name, sort_field, get_params, sort, order) @register.function @jinja2.contextfunction def is_expired_lock(context, lock): return lock.expiry < datetime.datetime.now()
bsd-3-clause
-5,529,736,335,256,113,000
30.028302
78
0.59471
false
3.892308
false
false
false
brucedjones/pyck
examples/stl_test.py
1
2459
import pyck domain = [100.0, 100.0, 500.0] h = domain[0] / 100 smoothingKernelFunc = 2 speedsound = 1 density = 1 shearmodulus = 1 bulkmodulus = 1 # Create a packer, see packers directory for options Hcp = pyck.HcpPacker(domain, h) pack = pyck.StructuredPack(Hcp) tibia_min = [154.2328, 204.0634, -853.9525] # Read from the tibia STL file tibia_max = [226.9384, 268.8318, -468.3400] tibia_len = [tibia_max[0] - tibia_min[0], tibia_max[1] - tibia_min[1], tibia_max[2] - tibia_min[2]] tibia_center = [tibia_min[0] + tibia_len[0] / 2, tibia_min[1] + tibia_len[1] / 2, tibia_min[2] + tibia_len[2] / 2] tibia_scale = 1 tibia_len[0] = tibia_len[0] * tibia_scale tibia_len[1] = tibia_len[1] * tibia_scale tibia_len[2] = tibia_len[2] * tibia_scale tibia_dcenter = [domain[0] / 2, domain[1] / 2, domain[2] / 2] tibia_translation = [tibia_dcenter[0] - tibia_center[0], tibia_dcenter[1] - tibia_center[1], tibia_dcenter[2] - tibia_center[2]] stlShape = pyck.StlShape(1, 'tibia_low.stl', tibia_translation) #cube = pyck.Cuboid(2,[0.2,0.2,0.2],[0.6,0.6,0.6]); # Map the shapes and generate the pack pack.AddShape(stlShape) # pack.AddShape(cube); pack.Process() # Create a new model from the pack model = pyck.Model(pack) # Create a new field of n-dimensional integers # Arguments are CreateIntField(label,dimensions) # label - label for this field in the vtp file # dimensions - dimensionality of this field, doesnt have to correspond to model dimensions # Create field of doubles in the same way with CreateDoubleField stateField = model.CreateIntField("State", 1) # Arguments are SetIntField(field,tag,value(s)) # field - returned from CreateIntField # tag - tag applied to particles during shape Mapping # value(s) - singular value or array of values [v1, v2,...vn] to set of # particles with a matching tag model.SetIntField(stateField, 1, 10) model.SetIntField(stateField, 2, 20) # Overwrite some parameters # Arguments are SetParameter(Label,Value) model.SetParameter("MaxSteps", "100") model.SetParameter("Mass", "0.5") # Or use a python dictionary to overwrite parameters parameters = pyck.Parameters({'ViscAlpha': '0.1', 'ViscBeta': '0.2'}) model.SetParameters(parameters) # Create a file writer, in this case VTP according to spark format writer = pyck.SparkWriter() # Write the VTP file model.Serialize("tibia.vtp", writer)
mit
-7,106,577,362,135,716,000
32.633803
92
0.681578
false
2.602116
false
false
false
Bam4d/neon
neon/backends/backend.py
1
87983
# ---------------------------------------------------------------------------- # Copyright 2015 Nervana Systems Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ---------------------------------------------------------------------------- """ Defines Tensor and Backend class """ import numpy as np import logging from math import ceil logger = logging.getLogger(__name__) class OpCollection(object): """ A collection of the set of operation strings """ zero_operand_ops = {"rand", "onehot"} unary_ops = {"finite", "neg", "abs", "sgn", "sqrt", "sqr", "exp", "log", "exp2", "log2", "sig", "sig2", "tanh", "tanh2", "transpose", "safelog"} binary_ops = {"assign", "add", "sub", "mul", "div", "eq", "ne", "lt", "le", "gt", "ge", "pow", "minimum", "maximum", "dot"} reduction_ops = {"sum", "max", "min", "argmax", "argmin"} float_ops = zero_operand_ops | unary_ops | binary_ops ew_ops = float_ops - {'dot', 'transpose'} class Tensor(object): """ The n-dimensional array data structure. GPUTensor and Tensor inherits Tensor. Depending on backend, may have additional keyword arguments. All non-keywords arguments shall be in exact same order as Tensor. Arguments: backend (Backend): backend of the tensor. shape (tuple, optional): shape of the tensor. dtype (numpy.ndtype, optional): underlying data type of the elements. name (str, optional): name indentifying the tensor (used in printing). persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls See also: GPUTensor class, Tensor class Notes: Unlike numpy, in this implementation we never collapse dimensions, and the minimal number of dimensions will be _min_dims (currently set to 2). So a wrapped scalar will have dimension 1x1. """ def __init__(self, backend, shape=None, dtype=np.float32, name=None, persist_values=True): self.backend = backend self.shape = shape self.dtype = dtype self.name = name self.persist_values = persist_values self._min_dims = 2 def __str__(self): """ Returns a string representation of this Tensor. Returns: str: the representation. Raises: NotImplementedError: Can't be instantiated directly. """ raise NotImplementedError() def __repr__(self): """ Returns a more unambiguous string representation of the Tensor. Returns: str: the string representation. Raises: NotImplementedError: Can't be instantiated directly. """ raise NotImplementedError() def __len__(self): """ Return the size of the leading dimension of self. Returns: int: the size of the leading dimension. Raises: NotImplementedError: Can't be instantiated directly. """ raise NotImplementedError() def __setitem__(self, index, value): """ Assign the specified value to a subset of elements found via slice style indexing along each dimension. e.g. A[5:10, :] = 4.5. Each slice consists of start_idx:stop_idx:step_size triplets. If step_size isn't specified it defaults to 1. If start_idx isn't specified it defaults to 0. If stop_idx isn't specified it defaults to the total number of elements along that dimension. As such a slice value of ':' allows one to select all elements along that dimension. Arguments: index (int, slice, tuple): indices of each dimension's slice. value (numeric array, Tensor): values to be assigned to the extracted element subset. If an array it should be the same shape as what key indexes (or be broadcastable as such). Raises: NotImplementedError: Can't be instantiated directly. """ raise NotImplementedError() def __getitem__(self, index): """ Extract a subset view of the items via slice style indexing along each dimension. e.g. A[5:10, :]. Each slice consists of start_idx:stop_idx:step_size triplets. If step_size isn't specified it defaults to 1. If start_idx isn't specified it defaults to 0. If stop_idx isn't specified it defaults to the total number of elements along that dimension. As such a slice value of ':' allows one to select all elements along that dimension. Arguments: index (int, slice, tuple): indices of each dimension's slice. Returns: Tensor: view of self corresponding to the subset items. Raises: NotImplementedError: Can't be instantiated directly. """ raise NotImplementedError() def _assign(self, value): """ Assign an input value to the Tensor. The NervanaCPU does clipping for int and uint types, when overflow happens Arguments: value (Tensor, OpTreNode, numeric): the value to be assigned. """ raise NotImplementedError() def set(self, ary): """ Copy host array to the tensor. Arguments: ary (numpy.ndarray): host array, needs to be contiguous Returns: Tensor: self """ raise NotImplementedError() def get(self): """ Copy tensor to host as numpy array. Returns: numpy.ndarray: A host numpy array """ raise NotImplementedError() def asnumpyarray(self): """ Convert the tensor to an in host memory `numpy.ndarray`. A copy of the data may be made depending on where the Tensor normally resides. Returns: numpy.ndarray view or copy of the Tensor data. Raises: NotImplementedError: Can't be instantiated directly. """ raise NotImplementedError() def take(self, indices, axis, out=None): """ Select a subset of elements from an array across an axis Arguments: indices (Tensor, numpy ndarray): indicies of elements to select axis (int): axis across which to select the values out (Tensor, numpy ndarray, optional): place the resultant values into this array if specified. Return: Tensor: Tensor with selected values Raises: NotImplementedError: Can't be instantiated directly. """ raise NotImplementedError() def fill(self, value): """ Assign specified value to each element of this Tensor. Arguments: value (numeric): The value to be assigned to each element. Return: Tensor: updated view of the data. Raises: NotImplementedError: Can't be instantiated directly. """ raise NotImplementedError() def copy(self, a): """ Construct and return a deep copy of the Tensor passed. Arguments: a (Tensor): the object to copy Returns: Tensor: new array object with the same values as tsr. Raises: NotImplementedError: Can't be instantiated directly. """ raise NotImplementedError() def copy_from(self, a): """ Copy contents from `a`. Arguments: a (numpy.ndarray): the host-resident object to copy from Raises: NotImplementedError: Can't be instantiated directly. """ raise NotImplementedError() def reshape(self, *shape): """ Adjusts the dimensions of the data to the specified shape. The number of elements represented by the new shape must be the same as before. Arguments: shape (int, list): new length of each dimension Raises: NotImplementedError: Can't be instantiated directly. """ raise NotImplementedError() @property def T(self): """ Return a transposed view of the data. Returns: Tensor: transposed view of self. Raises: NotImplementedError: Can't be instantiated directly. """ raise NotImplementedError() def transpose(self, out=None): """ Return a transposed view of the data. Alias of .T property needed for MOP compatibility. Arguments: out (Tensor, numpy ndarray, optional): place the resultant values into this array if specified. Returns: Tensor: transposed view of self. Raises: NotImplementedError: Can't be instantiated directly. """ raise NotImplementedError() def hist(self, tag): """ Compute a histogram of the current tensor values. Arguments: tag (string): Tag to identify the current state of the tensor, useful for disambiguating multiple histograms of the same tensor at different points in time. Returns: Tensor containing the histogram data. Raises: NotImplementedError: Can't be instantiated directly. """ raise NotImplementedError() def __add__(self, other): """ Perform `add` operations. Arguments: other: the right-hand side operand Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("add", self, other) def __sub__(self, other): return OpTreeNode.build("sub", self, other) def __mul__(self, other): return OpTreeNode.build("mul", self, other) def __div__(self, other): return OpTreeNode.build("div", self, other) def __truediv__(self, other): return OpTreeNode.build("div", self, other) def __pow__(self, other): return OpTreeNode.build("pow", self, other) def __radd__(self, other): return OpTreeNode.build("add", other, self) def __rsub__(self, other): return OpTreeNode.build("sub", other, self) def __rmul__(self, other): return OpTreeNode.build("mul", other, self) def __rdiv__(self, other): return OpTreeNode.build("div", other, self) def __rtruediv__(self, other): return OpTreeNode.build("div", other, self) def __rpow__(self, other): return OpTreeNode.build("pow", other, self) def __eq__(self, other): return OpTreeNode.build("eq", self, other) def __ne__(self, other): return OpTreeNode.build("ne", self, other) def __lt__(self, other): return OpTreeNode.build("lt", self, other) def __le__(self, other): return OpTreeNode.build("le", self, other) def __gt__(self, other): return OpTreeNode.build("gt", self, other) def __ge__(self, other): return OpTreeNode.build("ge", self, other) def __abs__(self): return OpTreeNode.build("abs", self, None) def __neg__(self): return OpTreeNode.build("neg", self, None) class Backend(object): """ Backend interface used to manipulate Tensor data. This abstract base class defines what operations each concrete backend must support. NervanaGPU and NervanaCPU inherit Backend. Arguments: rng_seed (int, optional): random number generator seed value default_dtype (numpy.ndtype, optional): Elemental data type to use when creating new tensors if not otherwise specified. Defaults to np.float32 compat_mode (str, optional): Flag to match implementation of other libraries. Currently only 'caffe' is supported, defaults to None. """ def __init__(self, rng_seed=None, default_dtype=np.float32, compat_mode=None): # dtype self.default_dtype = default_dtype # use RandomState instead of seed self.rng = np.random.RandomState(rng_seed) self.init_rng_state = self.rng.get_state() # for resetting state # batch size self.bsz = None self._min_dims = 2 if compat_mode is not None: if compat_mode == 'caffe': self.set_caffe_compat() else: raise ValueError('%s mode not supported currently' % compat_mode) else: self.compat_mode = None def output_dim(self, X, S, padding, strides, pooling=False): """ compute along 1 dimension, with these sizes, what will be the output dimension Arguments: X (int): input data dimension S (int): filter dimension padding (int): padding on each side strides (int): striding pooling (bool): flag for setting pooling layer size """ if self.check_caffe_compat() and pooling: size = int(ceil(float(X - S + 2 * padding)/strides)) + 1 if padding > 0 and (size - 1)*strides >= X + padding: # decrement size if last pooling op is completely in padding size -= 1 else: # normal neon output size determination size = (X - S + 2 * padding)/strides + 1 return size def set_caffe_compat(self): """ Set flag to make layers compatible with caffe in terms of conv and pool layer output size determination and dropout layer implementation """ self.compat_mode = 'caffe' def check_caffe_compat(self): return self.compat_mode == 'caffe' def iobuf(self, dim0, x=None, dtype=None, name=None, persist_values=True, shared=None, parallelism=None): """ Allocate input and output buffer for layer based on batch size. This is used because the layer does not know about the batch size. Arguments: dim0 (tuple or int): I/O buffer dimension for layer (without the axis specifying the batch size). x (data-type, optional): If present and not None, `x` will be returned directly. `x` will be not None if the buffer has already been allocated. dtype (data-type, optional): If present, specifies the underlying type to employ for each element. name (str, optional): name indentifying the tensor (used in printing). persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls shared (buffer, optional): If present will attempt to reuse the memory in shared to allocate the I/O buffer parallelism (str, optional): Indicates type of parallelism (Data, Model) employed by this buffer. Ignored on CPU and GPU backends, defaults to no parallelism. Returns: Tensor: array object """ if x is not None: return x if isinstance(dim0, tuple): if (len(dim0) == 2): bufshape = (dim0[0], dim0[1] * self.bsz) else: bufshape = (np.prod(dim0), self.bsz) else: bufshape = (dim0, self.bsz) if shared is not None: if shared.shape == bufshape: return shared else: return shared.share(bufshape) else: return self.zeros(bufshape, dtype=dtype, name=name, persist_values=persist_values) def rng_reset(self): """ Reset the random state to the state where the Backend is first initialized. usually need to do: self.rng.set_state(self.init_rng_state) """ raise NotImplementedError() def execute(self, node): """ Execute the optree. There must be one and only one 'assign' op at the top of the optree when execute is called. Arguments: node (OpTreeNode): The op-tree to execute. """ pass def begin(self, block, identifier): """ Signal the start of a block of repeated computation (ex. at the start of a loop). This operation can be used to help the compiler optimize instruction performance, but has no direct effect on calculations. It must be book-ended by a corresponding Backend.end() call. Note that multiple begin calls can appear adjacent in nested loops. Arguments: block (Block.attr): identifies the type of computation being worked on based on Block attribute specified identifier (int): unique identifier for this particular iteration of the block. Will typically be something like epoch number, mini-batch number, and so forth. See Also: :py:func:`~neon.backends.backend.Backend.end`, """ pass def end(self, block, identifier): """ Signal the corresponding end of a block of repeated computation (ex. at the end of a loop). This operation can be used to help the compiler optimize performance, but has no direct effect on calculations. It must be preceded by a corresponding Backend.begin() call. Arguments: block (Block.attr): identifies the type of computation being worked on based on Block attribute specified identifier (int): unique identifier for this particular iteration of the block. Will typically be something like epoch number, mini-batch number, and so forth. See Also: :py:func:`~neon.backends.backend.Backend.begin`, """ pass def empty(self, shape, dtype=None, name=None, persist_values=True, parallel=False, distributed=False): """ Instantiate a new instance of this backend's Tensor class, without initializing element values. This is slightly faster than :py:func:`~neon.backends.Backend.array`, :py:func:`~neon.backends.Backend.ones`, :py:func:`~neon.backends.Backend.zeros`, but the values will be random. Arguments: shape (int, list): length of each dimension of the Tensor. dtype (data-type, optional): If present, specifies the underlying type to employ for each element. name (str, optional): name indentifying the tensor (used in printing). persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls parallel (bool, optional): If True and using multi-GPU backend, replicate copies of this tensor across devices. Defaults to False, and has no effect on CPU, or (single) GPU backends. distributed (bool, optional): If True and using multi-GPU backend, this tensor is fragmented and partitioned across devices. Defaults to False, and has no effect on CPU, or (single) GPU backends. Returns: Tensor: array object Raises: NotImplementedError: Can't be instantiated directly. See Also: :py:func:`~neon.backends.Backend.array`, :py:func:`~neon.backends.Backend.zeros`, :py:func:`~neon.backends.Backend.ones` """ raise NotImplementedError() def array(self, ary, dtype=None, name=None, persist_values=True, parallel=False, distributed=False): """ Instantiate a new instance of this backend's Tensor class, populating elements based on ary values. Arguments: ary (array_like): input array object to construct from. Can be built-in python scalar or list (of lists), or a numpy.ndarray dtype (data-type, optional): If present, specifies the underlying type to employ for each element. name (str, optional): name indentifying the tensor (used in printing). persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls parallel (bool, optional): If True and using multi-GPU backend, replicate copies of this tensor across devices. Defaults to False, and has no effect on CPU, or (single) GPU backends. distributed (bool, optional): If True and using multi-GPU backend, this tensor is fragmented and partitioned across devices. Defaults to False, and has no effect on CPU, or (single) GPU backends. Returns: Tensor: array object Raises: NotImplementedError: Can't be instantiated directly. See Also: :py:func:`~neon.backends.Backend.empty`, :py:func:`~neon.backends.Backend.zeros`, :py:func:`~neon.backends.Backend.ones` """ raise NotImplementedError() def zeros(self, shape, dtype=None, name=None, persist_values=True, parallel=False, distributed=False): """ Instantiate a new instance of this backend's Tensor class, populating Each element with a value of 0. Arguments: shape (int, list): length of each dimension of the Tensor. dtype (data-type, optional): If present, specifies the underlying type to employ for each element. name (str, optional): name indentifying the tensor (used in printing). persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls parallel (bool, optional): If True and using multi-GPU backend, replicate copies of this tensor across devices. Defaults to False, and has no effect on CPU, or (single) GPU backends. distributed (bool, optional): If True and using multi-GPU backend, this tensor is fragmented and partitioned across devices. Defaults to False, and has no effect on CPU, or (single) GPU backends. Returns: Tensor: array object Raises: NotImplementedError: Can't be instantiated directly. See Also: :py:func:`~neon.backends.Backend.empty`, :py:func:`~neon.backends.Backend.ones`, :py:func:`~neon.backends.Backend.array` """ raise NotImplementedError() def ones(self, shape, dtype=None, name=None, persist_values=True, parallel=False, distributed=False): """ Instantiate a new instance of this backend's Tensor class, populating Each element with a value of 1. Arguments: shape (int, list): length of each dimension of the Tensor. dtype (data-type, optional): If present, specifies the underlying type to employ for each element. name (str, optional): name indentifying the tensor (used in printing). persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls parallel (bool, optional): If True and using multi-GPU backend, replicate copies of this tensor across devices. Defaults to False, and has no effect on CPU, or (single) GPU backends. distributed (bool, optional): If True and using multi-GPU backend, this tensor is fragmented and partitioned across devices. Defaults to False, and has no effect on CPU, or (single) GPU backends. Returns: Tensor: array object Raises: NotImplementedError: Can't be instantiated directly. See Also: :py:func:`~neon.backends.backend.Backend.empty`, :py:func:`~neon.backends.backend.Backend.zeros`, :py:func:`~neon.backends.backend.Backend.array` """ raise NotImplementedError() def empty_like(self, other_ary, name=None, persist_values=True): """ Instantiate a new instance of this backend's Tensor class, with the shape taken from other_ary. Arguments: other_ary (tensor object): Tensor to inherit the dimensions of. name (str, optional): name indentifying the tensor (used in printing). dtype (data-type, optional): If present, specifies the underlying type to employ for each element. persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls. Returns: Tensor: array object Raises: NotImplementedError: Can't be instantiated directly. See Also: :py:func:`~neon.backends.Backend.empty`, :py:func:`~neon.backends.Backend.ones`, :py:func:`~neon.backends.Backend.array` """ raise NotImplementedError() def zeros_like(self, other_ary, name=None, persist_values=True): """ Instantiate a new instance of this backend's Tensor class, with the shape taken from other_ary and populating each element with a value of 0. Arguments: other_ary (tensor object): Tensor to inherit the dimensions of. name (str, optional): name indentifying the tensor (used in printing). dtype (data-type, optional): If present, specifies the underlying type to employ for each element. persist_values (bool, optional): If set to True (the default), the values assigned to this Tensor will persist across multiple begin and end calls. Setting to False may provide a performance increase if values do not need to be maintained across such calls. Returns: Tensor: array object Raises: NotImplementedError: Can't be instantiated directly. See Also: :py:func:`~neon.backends.Backend.empty`, :py:func:`~neon.backends.Backend.ones`, :py:func:`~neon.backends.Backend.array` """ raise NotImplementedError() def dot(self, a, b, out=None): """ Dot product of two Tensors. Arguments: a (Tensor): left-hand side operand. b (Tensor): right-hand side operand. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Note that this object should differ from left and right. Returns: OpTreeNode: the resulting op-tree from this operation. """ return OpTreeNode.build("dot", a, b, out=out) def compound_dot(self, A, B, C, alpha=1.0, beta=0.0, relu=False): """ Perform one of the following operations (* is dot product) C = alpha * A * B + beta * C C = alpha * A.T * B + beta * C C = alpha * A * B.T + beta * C relu: if true, applied before output (and prior to beta addition) The operation will be short-circuited to: out <- alpha * left * right if beta has value 0 (the default). Arguments: A (Tensor): left-hand side operand. B (Tensor): right-hand side operand. C (Tensor): output operand alpha (float. optional): scale A*B term beta (float, optional): scale C term before sum relu (bool, optional): If True apply ReLu non-linearity before output. Defaults to False. """ raise NotImplementedError() def batched_dot(self, A, B, C, alpha=1.0, beta=0.0, relu=False): """ Perform one of the following operations: 1. For fprop: A(K, C), B(X,C,N), C(X,K,N) --> call batched_dot(A, B, C) 2. For bprop: A(K, C), B(X,K,N), C(X,C,N) --> call batched_dot(A.T, B, C) 3. For update: A(X,K,N), B(X,C,N), C(K,C) --> call batched_dot(A, B.T, C) Arguments: A (Tensor): left-hand input operand B (Tensor): right-hand input operand C (Tensor): output operand alpha (float. optional): scale A*B term beta (float, optional): scale C term before sum relu (bool, optional): If True apply ReLu non-linearity before output. Defaults to False. """ raise NotImplementedError() def make_binary_mask(self, out, keepthresh=0.5): """ Create a binary mask for dropout layers. Arguments: out (Tensor): Output tensor keepthresh (float, optional): fraction of ones. Defaults to 0.5 """ raise NotImplementedError() def add(self, a, b, out=None): """ Perform element-wise addition on the operands, storing the resultant values in the out Tensor. Each operand and out must have identical shape or be broadcastable as such. Arguments: a (Tensor, numeric): left-hand side operand. b (Tensor, numeric): right-hand side operand. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("add", a, b, out=out) def subtract(self, a, b, out=None): """ Perform element-wise subtraction on the operands, storing the resultant values in the out Tensor. Each operand and out must have identical shape or be broadcastable as such. Arguments: a (Tensor, numeric): left-hand side operand. b (Tensor, numeric): right-hand side operand. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("sub", a, b, out=out) def multiply(self, a, b, out=None): """ Perform element-wise multiplication on the operands, storing the resultant values in the out Tensor. Each operand and out must have identical shape or be broadcastable as such. Arguments: a (Tensor, numeric): left-hand side operand. b (Tensor, numeric): right-hand side operand. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("mul", a, b, out=out) def divide(self, a, b, out=None): """ Perform element-wise division on the operands, storing the resultant values in the out Tensor. Each operand and out must have identical shape or be broadcastable as such. Arguments: a (Tensor, numeric): left-hand side operand. b (Tensor, numeric): right-hand side operand. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("div", a, b, out=out) def true_divide(self, a, b, out=None): """ Here it is an alias of divide. Instead of the Python traditional 'floor division', this returns a true division. Arguments: a (Tensor, numeric): left-hand side operand. b (Tensor, numeric): right-hand side operand. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("div", a, b, out=out) def power(self, a, b, out=None): """ Perform element-wise raise of tsr values to specified power, storing the result in Tensor out. Both Tensor's should have identical shape. Arguments: a (Tensor): input to be transformed. b (Tensor, numeric): exponentiated value to be applied to element. Examples include 2 (square), 0.5 (sqaure root). out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("pow", a, b, out=out) def reciprocal(self, a, out=None): """ Perform element-wise reciprocal of Tensor `a`, storing the result in Tensor out. Both Tensor's should have identical shape. Arguments: a (Tensor): input to be transformed. power (Tensor, numeric): exponentiated value to be applied to element. Examples include 2 (square), 0.5 (sqaure root). out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("div", 1., a, out=out) def negative(self, a, out=None): """ Perform element-wise negation of Tensor `a`, storing the result in Tensor out. Both Tensor's should have identical shape. Arguments: a (Tensor): input to be transformed. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("neg", a, None, out=out) def sgn(self, a, out=None): """ Perform element-wise indication of the sign of Tensor `a`, storing the result in Tensor out. Both Tensor's should have identical shape. Arguments: a (Tensor): input to be transformed. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("sgn", a, None, out=out) def absolute(self, a, out=None): """ Perform element-wise absolute value of Tensor `a`, storing the result in Tensor out. Both Tensor's should have identical shape. Arguments: a (Tensor): input to be transformed. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("abs", a, None, out=out) def fabs(self, a, out=None): """ Perform element-wise absolute value of Tensor `a`, storing the result in Tensor out. Both Tensor's should have identical shape. Implemented as an alias of absolute. Arguments: a (Tensor): input to be transformed. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("abs", a, None, out=out) def sqrt(self, a, out=None): """ Perform element-wise square-root of Tensor `a`, storing the result in Tensor out. Both Tensor's should have identical shape. Arguments: a (Tensor): input to be transformed. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("sqrt", a, None, out=out) def square(self, a, out=None): """ Perform element-wise square of Tensor `a`, storing the result in Tensor out. Both Tensor's should have identical shape. Arguments: a (Tensor): input to be transformed. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("sqr", a, None, out=out) def exp(self, a, out=None): """ Perform element-wise exponential transformation on Tensor `a`, storing the result in Tensor out. Both Tensor's should have identical shape. Arguments: a (Tensor): input to be transformed. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("exp", a, None, out=out) def exp2(self, a, out=None): """ Perform element-wise 2-based exponential transformation on Tensor `a`, storing the result in Tensor out. Both Tensor's should have identical shape. Arguments: a (Tensor): input to be transformed. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("exp2", a, None, out=out) def safelog(self, a, out=None): """ Perform element-wise natural logarithm transformation on Tensor `a`, storing the result in Tensor out. Both Tensor's should have identical shape. This log function has built in safety for underflow. Arguments: a (Tensor): input to be transformed. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("safelog", a, None, out=out) def log(self, a, out=None): """ Perform element-wise natural logarithm transformation on Tensor `a`, storing the result in Tensor out. Both Tensor's should have identical shape. Arguments: a (Tensor): input to be transformed. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("log", a, None, out=out) def log2(self, a, out=None): """ Perform element-wise 2-based logarithm transformation on Tensor `a`, storing the result in Tensor out. Both Tensor's should have identical shape. Arguments: a (Tensor): input to be transformed. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("log2", a, None, out=out) def sig(self, a, out=None): """ Perform element-wise sigmoid transformation on Tensor `a`, storing the result in Tensor out. Both Tensor's should have identical shape. Arguments: a (Tensor): input to be transformed. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("sig", a, None, out=out) def sig2(self, a, out=None): """ Perform element-wise 2-based sigmoid logarithm transformation on Tensor `a`, storing the result in Tensor out. Both Tensor's should have identical shape. Arguments: a (Tensor): input to be transformed. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("sig2", a, None, out=out) def tanh(self, a, out=None): """ Perform element-wise hyperbolic tangent transformation on Tensor `a`, storing the result in Tensor out. Both Tensor's should have identical shape. Arguments: a (Tensor): input to be transformed. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("tanh", a, None, out=out) def tanh2(self, a, out=None): """ Perform element-wise 2-based hyperbolic tangent transformation on Tensor `a`, storing the result in Tensor out. Both Tensor's should have identical shape. Arguments: a (Tensor): input to be transformed. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("tanh2", a, None, out=out) def finite(self, a, out=None): """ Perform element-wise test of finiteness (not infinity or not Not a Number) on Tensor `a`, storing the result in Tensor out. Both Tensor's should have identical shape. Arguments: a (Tensor): input to be transformed. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("finite", a, None, out=out) def equal(self, a, b, out=None): """ Performs element-wise equality testing on each element of left and right, storing the result in out. Each operand is assumed to be the same shape (or broadcastable as such). Arguments: a (Tensor, numeric): left-hand side operand. b (Tensor, numeric): right-hand side operand. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("eq", a, b, out=out) def not_equal(self, a, b, out=None): """ Performs element-wise non-equality testing on each element of left and right, storing the result in out. Each operand is assumed to be the same shape (or broadcastable as such). Arguments: a (Tensor, numeric): left-hand side operand. b (Tensor, numeric): right-hand side operand. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("ne", a, b, out=out) def less(self, a, b, out=None): """ Performs element-wise less than testing on each element of left and right, storing the result in out. Each operand is assumed to be the same shape (or broadcastable as such). Arguments: a (Tensor, numeric): left-hand side operand. b (Tensor, numeric): right-hand side operand. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("lt", a, b, out=out) def less_equal(self, a, b, out=None): """ Performs element-wise less than or equal testing on each element of left and right, storing the result in out. Each operand is assumed to be the same shape (or broadcastable as such). Arguments: a (Tensor, numeric): left-hand side operand. b (Tensor, numeric): right-hand side operand. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("le", a, b, out=out) def greater(self, a, b, out=None): """ Performs element-wise greater than testing on each element of left and right, storing the result in out. Each operand is assumed to be the same shape (or broadcastable as such). Arguments: a (Tensor, numeric): left-hand side operand. b (Tensor, numeric): right-hand side operand. out (Tensor, optional): where the result will be stored. If out is None, only theshape op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("gt", a, b, out=out) def greater_equal(self, a, b, out=None): """ Performs element-wise greater than or equal testing on each element of left and right, storing the result in out. Each operand is assumed to be the same shape (or broadcastable as such). Arguments: a (Tensor, numeric): left-hand side operand. b (Tensor, numeric): right-hand side operand. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("ge", a, b, out=out) def maximum(self, a, b, out=None): """ Performs element-wise maximum value assignment based on corresponding elements of left and right, storing the result in out. Each operand is assumed to be the same shape (or broadcastable as such). Arguments: a (Tensor, numeric): left-hand side operand. b (Tensor, numeric): right-hand side operand. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("maximum", a, b, out=out) def minimum(self, a, b, out=None): """ Performs element-wise minimum value assignment based on corresponding elements of left and right, storing the result in out. Each operand is assumed to be the same shape (or broadcastable as such). Arguments: a (Tensor, numeric): left-hand side operand. b (Tensor, numeric): right-hand side operand. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("minimum", a, b, out=out) def clip(self, a, a_min, a_max, out=None): """ Performs element-wise clipping of Tensor `a`, storing the result in out. The clipped value will be between [a_min, a_max]. Arguments: a (Tensor, numeric): left-hand side operand. b (Tensor, numeric): right-hand side operand. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ return self.minimum(self.maximum(a, a_min), a_max, out=out) def sum(self, a, axis=None, out=None, keepdims=True): """ Calculates the summation of the elements along the specified axis. Arguments: a (Tensor): the Tensor on which to perform the sum axis (int, optional): the dimension along which to compute. If set to None, we will sum over all dimensions. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. keepdims (bool, optional): Keep the axes being computed over in the output (with size 1), instead of collapsing. Defaults to True. Returns: OpTreeNode: the resulting op-tree """ if axis is None: return OpTreeNode.build("sum", OpTreeNode.build("sum", a, None, axis=0), None, axis=1, out=out) return OpTreeNode.build("sum", a, None, axis=axis, out=out) def max(self, a, axis=None, out=None, keepdims=True): """ Calculates the maximal element value along the specified axes. Arguments: a (Tensor): the Tensor on which to perform the operation axis (int, optional): the dimension along which to compute. If set to None, we will take max over all dimensions. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. keepdims (bool, optional): Keep the axes being computed over in the output (with size 1), instead of collapsing. Defaults to True. Returns: OpTreeNode: the resulting op-tree """ if axis is None: return OpTreeNode.build("max", OpTreeNode.build("max", a, None, axis=0), None, axis=1, out=out) return OpTreeNode.build("max", a, None, axis=axis, out=out) def min(self, a, axis=None, out=None, keepdims=True): """ Calculates the minimal element value along the specified axes. Arguments: a (Tensor): the Tensor on which to perform the operation axis (int, optional): the dimension along which to compute. If set to None, we will take min over all dimensions. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. keepdims (bool, optional): Keep the axes being computed over in the output (with size 1), instead of collapsing. Defaults to True. Returns: OpTreeNode: the resulting op-tree """ if axis is None: return OpTreeNode.build("min", OpTreeNode.build("min", a, None, axis=0), None, axis=1, out=out) return OpTreeNode.build("min", a, None, axis=axis, out=out) def argmax(self, a, axis=1, out=None, keepdims=True): """ Calculates the indices of the maximal element value along the specified axis. If multiple elements contain the maximum, only the indices of the first are returned. Arguments: a (Tensor): the Tensor on which to perform the operation axis (int, optional): the dimension along which to compute. If set to None, we will take argmax over all dimensions. Defaults to 1 out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. keepdims (bool, optional): Keep the axes being computed over in the output (with size 1), instead of collapsing. Defaults to True. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("argmax", a, None, axis=axis, out=out) def argmin(self, a, axis=1, out=None, keepdims=True): """ Calculates the indices of the minimal element value along the specified axis. If multiple elements contain the minimum, only the indices of the first are returned. Arguments: a (Tensor): the Tensor on which to perform the operation axis (int, optional): the dimension along which to compute. If set to None, we will take argmin over all dimensions. Defaults to 1 out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. keepdims (bool, optional): Keep the axes being computed over in the output (with size 1), instead of collapsing. Defaults to True. Returns: OpTreeNode: the resulting op-tree """ return OpTreeNode.build("argmin", a, None, axis=axis, out=out) def mean(self, a, axis=None, partial=None, out=None, keepdims=True): """ Calculates the arithmetic mean of the elements along the specified axes. Arguments: a (Tensor): the Tensor on which to perform the operation axis (int, optional): the dimension along which to compute. If set to None, we will take mean over all dimensions. Defaults to None partial (bool, optional): Not currently used. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. keepdims (bool, optional): Keep the axes being computed over in the output (with size 1), instead of collapsing. Defaults to True. Returns: OpTreeNode: the resulting op-tree """ shape = a.shape if axis is None: return self.multiply(self.sum(a), 1.0 / (shape[0] * shape[1]), out=out) return self.multiply(self.sum(a, axis=axis), 1.0 / shape[axis], out=out) def var(self, a, axis=None, partial=None, out=None, keepdims=True): """ Calculates the variance of the elements along the specified axes. Arguments: a (Tensor): the Tensor on which to perform the operation axis (int, optional): the dimension along which to compute. If set to None, we will take var over all dimensions. Defaults to None partial (bool, optional): Not currently used. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. keepdims (bool, optional): Keep the axes being computed over in the output (with size 1), instead of collapsing. Defaults to True. Returns: OpTreeNode: the resulting op-tree """ if axis is None: return self.mean(self.square(a - self.mean(a)), out=out) return self.mean(self.square(a - self.mean(a, axis=axis)), axis=axis, out=out) def std(self, a, axis=None, partial=None, out=None, keepdims=True): """ Calculates the standard deviation of the elements along the specified axes. Arguments: a (Tensor): the Tensor on which to perform the operation axis (int, optional): the dimension along which to compute. If set to None, we will take std over all dimensions. out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. partial (bool, optional): Not currently used. keepdims (bool, optional): Keep the axes being computed over in the output (with size 1), instead of collapsing. Defaults to True. Returns: OpTreeNode: the resulting op-tree """ return self.sqrt(self.var(a, axis=axis, partial=partial, out=out)) def take(self, a, indices, axis, out=None): """ Extract elements based on the indices along a given axis. Arguments: a (Tensor): the Tensor on which to perform the operation indices (Tensor, numpy ndarray): indicies of elements to select axis (int, optional): the dimension along which to compute. If set to None, we will extract over all dimensions (flattened first) out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. """ return a.take(indices, axis, out) def onehot(self, indices, axis, out=None): """ Generate optree for converting `indices` to a onehot representation Arguments: indices (Tensor): Elements must be of numpy integer type for gpu onehot to work. axis (int): the axis along the feature length dimension out (Tensor, optional): where the result will be stored. If out is None, only the op-tree will be returned. Returns: OpTreeNode: the resulting op-tree """ if axis not in (0, 1): raise ValueError("bad axis for onehot") return OpTreeNode.build("onehot", None, None, idx=indices, axis=axis, out=out) def update_fc_bias(self, err, out): """ Compute the updated bias gradient for a fully connected network layer. Arguments: err (Tensor): backpropagated error out (Tensor): Where to store the updated gradient value. """ self.ng.sum(err, axis=1, out=out) def add_fc_bias(self, inputs, bias): """ Add the bias for a fully connected network layer. Arguments: inputs (Tensor): the input to update. bias (Tensor): the amount to increment """ self.ng.add(inputs, bias, out=inputs) def conv_layer(self, dtype, N, C, K, D=1, H=1, W=1, T=1, R=1, S=1, pad_d=0, pad_h=0, pad_w=0, str_d=1, str_h=1, str_w=1, relu=False, bsum=False, deterministic_update=False): """ Create a new ConvLayer parameter object. This is then passed as an argument to all the convolution operations. Arguments: dtype (data-type, optional): If present, specifies the underlying type to employ for each element. N (int): Number of images in mini-batch C (int): Number of input feature maps K (int): Number of output feature maps D (int, optional): Depth of input image. Defaults to 1 H (int, optional): Height of input image. Defaults to 1 W (int, optional): Width of input image. Defaults to 1 T (int, optional): Depth of filter kernel. Defaults to 1 R (int, optional): Height of filter kernel. Defaults to 1 S (int, optional): Width of filter kernel. Defaults to 1 pad_d (int, optional): amount of zero-padding around the depth edge Defaults to 0. pad_h (int, optional): amount of zero-padding around the height edge Defaults to 0. pad_w (int, optional): amount of zero-padding around the width edge Defaults to 0. str_d (int, optional): factor to step the filters by in the depth direction. Defaults to 1 str_h (int, optional): factor to step the filters by in the depth direction. Defaults to 1 str_w (int, optional): factor to step the filters by in the depth direction. Defaults to 1 relu (bool, optional): apply a relu transform to the output for fprop or bprop. Defaults to False bsum (bool, optional): calculate the sum along the batchnorm axis for fprop or bprop. Outputs an fp32 tensor of size Kx1. Defaults to False. deterministic_update (bool, optional): eleminate atomic adds in the update operation. Increases reproducibility but runs slower. Defaults to False. """ raise NotImplementedError() def fprop_conv(self, layer, I, F, O, alpha=1.0, relu=False, repeat=1): """ Forward propagate the inputs of a convolutional network layer to produce output Arguments: layer: the conv layer as a parameter object I (Tensor): inputs F (Tensor): the weights (filters) O (Tensor): outputs alpha (float, optional): linear scaling. Defaults to 1.0 relu (bool, optional): apply ReLu before output. Default not to. repeat (int, optional): Repeat this operation the specified number of times. Defaults to 1. """ raise NotImplementedError() def bprop_conv(self, layer, F, E, grad_I, alpha=1.0, repeat=1): """ Backward propagate the error through a convolutional network layer. Arguments: layer: the conv layer as a parameter object F (Tensor): the weights (filters) E (Tensor): errors grad_I (Tensor): gradient to inputs (output delta) alpha (float, optional): linear scaling. Defaults to 1.0 repeat (int, optional): Repeat this operation the specified number of times. Defaults to 1. """ raise NotImplementedError() def update_conv(self, layer, I, E, grad_F, alpha=1.0, repeat=1): """ Compute the updated gradient for a convolutional network layer. Arguments: layer: the conv layer as a parameter object I (Tensor): the inputs E (Tensor): the errors grad_F (Tensor): filter gradients (weights) to update. alpha (float, optional): linear scaling. Defaults to 1.0 repeat (int, optional): Repeat this operation the specified number of times. Defaults to 1. """ raise NotImplementedError() def deconv_layer(self, dtype, N, C, K, P, Q, R=1, S=1, pad_d=0, pad_h=0, pad_w=0, str_d=1, str_h=1, str_w=1): """ Create a new Deconvolution parameter object. This then is passed as an argument to all deconvolution kernels. Arguments: dtype (data-type, optional): If present, specifies the underlying type to employ for each element. N (int): Number of images in mini-batch C (int): Number of input feature maps K (int): Number of output feature maps P (int): Height of output Q (int): Width of output R (int, optional): Height of filter kernel. Defaults to 1 S (int, optional): Width of filter kernel. Defaults to 1 pad_d (int, optional): amount of zero-padding around the depth edge Defaults to 0. pad_h (int, optional): amount of zero-padding around the height edge Defaults to 0. pad_w (int, optional): amount of zero-padding around the width edge Defaults to 0. str_d (int, optional): factor to step the filters by in the depth direction. Defaults to 1 str_h (int, optional): factor to step the filters by in the depth direction. Defaults to 1 str_w (int, optional): factor to step the filters by in the depth direction. Defaults to 1 Leave spatial dimensions at 1 to allow feature map pooling in the fc layers. """ raise NotImplementedError() def pool_layer(self, dtype, op, N, C, D=1, H=1, W=1, J=1, T=1, R=1, S=1, pad_j=0, pad_d=0, pad_h=0, pad_w=0, str_j=None, str_d=None, str_h=None, str_w=None): """ Create a new PoolLayer parameter object. This then is passed as an argument to all pooling kernels. Arguments: op (str): "max", "avg", "l2" pooling (currently bprop only supports max, but not avg and l2) N (int): Number of images in mini-batch C (int): Number of input feature maps D (int, optional): Depth of input image. Defaults to 1 H (int, optional): Height of input image. Defaults to 1 W (int, optional): Width of input image. Defaults to 1 J (int, optional): Size of feature map pooling window (maxout n_pieces). Defaults to 1 T (int, optional): Depth of pooling window. Defaults to 1 R (int, optional): Height of pooling window. Defaults to 1 S (int, optional): Width of pooling window. Defaults to 1 pad_j (int, optional): amount of zero-padding around the fm pooling window edge. Defaults to 0. pad_d (int, optional): amount of zero-padding around the depth edge Defaults to 0. pad_h (int, optional): amount of zero-padding around the height edge Defaults to 0. pad_w (int, optional): amount of zero-padding around the width edge Defaults to 0. str_d (int, optional): factor to step the filters by in the fm pooling window direction. Defaults to 1 str_d (int, optional): factor to step the filters by in the depth direction. Defaults to 1 str_h (int, optional): factor to step the filters by in the depth direction. Defaults to 1 str_w (int, optional): factor to step the filters by in the depth direction. Defaults to 1 Leave spatial dimensions at 1 to allow feature map pooling in the fc layers. """ raise NotImplementedError() def fprop_pool(self, layer, I, O): """ Forward propagate pooling layer. Arguments: layer (PoolLayer): The pool layer object, different backends have different pool layers. I (Tensor): Input tensor. O (Tensor): output tensor. """ raise NotImplementedError() def bprop_pool(self, layer, I, E, grad_I): """ Backward propagate pooling layer. Arguments: layer (PoolLayer): The pool layer object. Different backends have different pool layers. I (Tensor): Input tensor. E (Tensor): Error tensor. grad_I (Tensor): Gradient tensor (delta) """ raise NotImplementedError() def compound_bprop_lut(self, nin, inputs, error, error_t, dW, pad_idx, alpha=1.0, beta=0): """ Backward propagate lookup table layer. Arguments: nin (integer): Number of input word_ids. inputs (Tensor): Input tensor. error (Tensor): Error tensor. error_t (Tensor): Transposed error tensor. dW (Tensor): Gradient tensor (delta). pad_idx (integer): alpha (float): beta (float): """ raise NotImplementedError() # For constructing an op tree used in lazy evaluation class OpTreeNode(tuple): """ An OpTreeNode is a tuple of length 3. The first element is a dict specifying the operation, and the second and third elements specify the operands. From an op-tree's tree perspective, think about the 3 elements as 3 nodes. The second and third element are the left and right child of the first element. """ def __new__(cls, *args): return tuple.__new__(cls, args) def __str__(self): s = '(' + str(self[0]) s += ', ' if isinstance(self[1], Tensor): if self[1].name and self[1].name is not None: s += self[1].name else: s += 'tensor-' + hex(id(self[1])) else: s += str(self[1]) s += ', ' if isinstance(self[2], Tensor): if self[2].name and self[2].name is not None: s += self[2].name else: s += 'tensor-' + hex(id(self[2])) else: s += str(self[2]) s += ')' return s def __repr__(self): return self.__str__() def key(self): """ Returns a key for identifying the optree. The key is depended on the ops and the id of the tensors. Since __eq__ is overloaded, need to manage the hashing of the OpTreeNode manually. Returns: tuple: optree key """ stack = self.traverse(list()) for i in range(len(stack)): if type(stack[i]) is dict: if 'axis' in stack[i]: stack[i] = (stack[i]['op'], stack[i]['axis']) else: stack[i] = (stack[i]['op']) return tuple(stack) def intrinsic_key_maps(self): """ Returns the intrinsic key, tensor_index_map and index_tensor_map for the purpose of identifying a optree. The key is depended on the ops tensors dimensions and the relaion among the tensors. x0 * x1 + x0 * x2 will have the same intrinsic key as y0 * y1 + y0 * y2, if xi and yi have the same shape. In tensor_index_map and index_tensor_map, tensors has a one-to-one mapping with indices. The index of the tensor is depended on the first occurance of the tensor in the post-order traversal of the optree. Returns: (intrinsic_key, tensor_index_map, index_tensor_map) """ stack = self.traverse(list()) tensor_index = 0 tensor_index_map = {} index_tensor_map = {} for i in range(len(stack)): if type(stack[i]) is dict: if 'axis' in stack[i]: stack[i] = (stack[i]['op'], stack[i]['axis']) else: stack[i] = (stack[i]['op']) elif isinstance(stack[i], Tensor): # use interger to replace tensor if stack[i] in tensor_index_map: stack[i] = (tensor_index_map[stack[i]], stack[i].shape) else: # put tensor in dict tensor_index_map[stack[i]] = tensor_index index_tensor_map[tensor_index] = stack[i] stack[i] = (tensor_index, stack[i].shape) tensor_index += 1 return (tuple(stack), tensor_index_map, index_tensor_map) @staticmethod def build(op, a, b, out=None, **kwargs): """ Build OpTreeNode. Arguments: a (OpTreeNode, Tensor, numeric): left-hand side operand. b (OpTreeNode, Tensor, numeric): right-hand side operand. out (Tensor, optional): where the result will be stored. If out is not None, the op-tree will be executed. kwargs: optional argument such as axis of the reducion. """ # check type for arg in (a, b): if not isinstance(arg, (int, float, Tensor, OpTreeNode, type(None))): return NotImplemented # get shape out_shape = [1, 1] if isinstance(a, (OpTreeNode, Tensor)): a_shape = a.shape elif isinstance(a, (float, int)): a_shape = [1, 1] else: a_shape = [0, 0] if isinstance(b, (OpTreeNode, Tensor)): b_shape = b.shape elif isinstance(b, (float, int)): b_shape = [1, 1] else: b_shape = [0, 0] # TODO: fix shape in smarter way if len(a_shape) == 1: a_shape = a_shape + (1,) if len(b_shape) == 1: b_shape = b_shape + (1,) if op in OpCollection.ew_ops: for i in range(2): out_shape[i] = max(a_shape[i], b_shape[i]) elif op in OpCollection.reduction_ops: if "axis" in kwargs: out_shape = list(a_shape) out_shape[kwargs["axis"]] = 1 else: pass # [1, 1] elif op == "assign": out_shape = a_shape elif op == "dot": assert (len(a_shape) == len(b_shape) and len(b_shape) == 2 and a_shape[1] == b_shape[0]) out_shape = (a_shape[0], b_shape[1]) elif op == "transpose": assert b is None out_shape = tuple(reversed(a_shape)) else: raise TypeError("%s is not a valid operation" % op) out_shape = tuple(out_shape) # build op dict op_dict = {"op": op, "shape": out_shape} op_dict.update(kwargs) node = OpTreeNode(op_dict, a, b) # execute explicit assignment if op == "assign": return node.execute() # passing in an out value counts as assignment if out is not None: return OpTreeNode({"op": "assign"}, out, node).execute() # delay execution until assignment return node def execute(self): """ Execute the optree. When calling `execute()`, there must be one and only one `assign` operation at the very top of the op-tree. The corresponding backend's execute function will be called. """ assert(self[0]["op"] == "assign") backend = self[1].backend if isinstance(backend, Backend): return backend.execute(self) else: raise NotImplementedError() def traverse(self, stack): """ Post order walk op tree and produce postfix stack Arguments: stack (list): user shall give empty list like `list()`, then it's used recursively to construct the post-order stack. """ # Left if isinstance(self[1], OpTreeNode): self[1].traverse(stack) elif self[1] is not None: stack.append(self[1]) # Right if isinstance(self[2], OpTreeNode): self[2].traverse(stack) elif self[2] is not None: stack.append(self[2]) stack.append(self[0]) return stack @property def T(self): return OpTreeNode.build("transpose", self, None) def transpose(self, out=None): """ Return a transposed view of the data. """ if out: return OpTreeNode.build("assign", out, self.T) return self.T @staticmethod def optree_to_list(optree): """ convert optree to list of lists recursively """ if isinstance(optree, OpTreeNode): return list(map(OpTreeNode.optree_to_list, optree)) else: return optree @staticmethod def list_to_optree(l): """ convert list to optree recursively """ if isinstance(l, list): return OpTreeNode(*map(OpTreeNode.list_to_optree, l)) else: return l @property def shape(self): """ return the shape of the OpTreeNode """ if isinstance(self, OpTreeNode): return self[0]['shape'] if isinstance(self, Tensor): return self.shape # scalar return (1, 1) @staticmethod def _pretty_print(node): operators = {'add': '+', 'sub': '-', 'mul': '*', 'div': '/', 'pow': '**'} s = '' if isinstance(node, Tensor): if node.name: s = node.name else: s = 'tensor-' + hex(id(node)) elif isinstance(node, OpTreeNode): if node[2]: s += OpTreeNode._pretty_print(node[1]) + ' ' if node[0]['op'] in operators: s += operators[node[0]['op']] else: s += node[0]['op'] s += ' ' + OpTreeNode._pretty_print(node[2]) else: s = node[0]['op'] + ' ' + OpTreeNode._pretty_print(node[1]) s = '(' + s + ')' else: s = str(node) # TODO s = '(' + s + ')' return s def pp(self): """ Pretty print of the optree Arguments: node (OpTreeNode): the top node of the op-tree to print Returns: str: string representation of the op-tree """ return OpTreeNode._pretty_print(self) def asnumpyarray(self): """ Returns the evaluated value of the optree as a host numpy.ndarray. Allocates new memory, usually used for debug. Returns: numpy.ndarray: evaluated value """ return self.astensor().get() def astensor(self): """ Returns the evaluated value of the optree as a Tensor. Allocates new memory, usually used for debug. Returns: Tensor: evaluated value """ stack = self.traverse(list()) be = None for s in stack: if isinstance(s, Tensor): be = s.backend break if be is None: raise ValueError("No tensor object in op_tree") buf = be.empty(self.shape) buf[:] = self return buf def __add__(self, other): return self.build("add", self, other) def __sub__(self, other): return self.build("sub", self, other) def __mul__(self, other): return self.build("mul", self, other) def __div__(self, other): return self.build("div", self, other) def __truediv__(self, other): return self.build("div", self, other) def __pow__(self, other): return self.build("pow", self, other) def __radd__(self, other): return self.build("add", other, self) def __rsub__(self, other): return self.build("sub", other, self) def __rmul__(self, other): return self.build("mul", other, self) def __rdiv__(self, other): return self.build("div", other, self) def __rtruediv__(self, other): return self.build("div", other, self) def __rpow__(self, other): return self.build("pow", other, self) def __eq__(self, other): return self.build("eq", self, other) def __ne__(self, other): return self.build("ne", self, other) def __lt__(self, other): return self.build("lt", self, other) def __le__(self, other): return self.build("le", self, other) def __gt__(self, other): return self.build("gt", self, other) def __ge__(self, other): return self.build("ge", self, other) def __abs__(self): return self.build("abs", self, None) def __neg__(self): return self.build("neg", self, None)
apache-2.0
6,893,616,573,040,554,000
37.947764
94
0.535217
false
4.870896
false
false
false
amlyj/pythonStudy
3.6/AI/ML/DL/study_face_recognition/facerec_from_video_file_faster.py
1
3084
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2018/12/27 11:47 # @Author : TOM.LEE import cv2 import face_recognition video_capture = cv2.VideoCapture("../videos/hamilton_clip.mp4") # Load a sample picture and learn how to recognize it. # Load some sample pictures and learn how to recognize them. lmm_image = face_recognition.load_image_file("../images/lin-manuel-miranda.png") lmm_face_encoding = face_recognition.face_encodings(lmm_image)[0] al_image = face_recognition.load_image_file("../images/alex-lacamoire.png") al_face_encoding = face_recognition.face_encodings(al_image)[0] # Create arrays of known face encodings and their names known_face_encodings = [ lmm_face_encoding, al_face_encoding ] known_face_names = [ "Lin-Manuel Miranda", "Alex Lacamoire" ] # Initialize some variables face_locations = [] face_encodings = [] face_names = [] process_this_frame = True while True: # Grab a single frame of video ret, frame = video_capture.read() # Resize frame of video to 1/4 size for faster face recognition processing small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses) rgb_small_frame = small_frame[:, :, ::-1] # Only process every other frame of video to save time if process_this_frame: # Find all the faces and face encodings in the current frame of video face_locations = face_recognition.face_locations(rgb_small_frame) face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations) face_names = [] for face_encoding in face_encodings: # See if the face is a match for the known face(s) matches = face_recognition.compare_faces(known_face_encodings, face_encoding) name = "Unknown" # If a match was found in known_face_encodings, just use the first one. if True in matches: first_match_index = matches.index(True) name = known_face_names[first_match_index] face_names.append(name) process_this_frame = not process_this_frame # Display the results for (top, right, bottom, left), name in zip(face_locations, face_names): # Scale back up face locations since the frame we detected in was scaled to 1/4 size top *= 4 right *= 4 bottom *= 4 left *= 4 # Draw a box around the face cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) # Draw a label with a name below the face cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED) font = cv2.FONT_HERSHEY_DUPLEX cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) # Display the resulting image cv2.imshow('Video', frame) # Hit 'q' on the keyboard to quit! if cv2.waitKey(1) & 0xFF == ord('q'): break # Release handle to the webcam video_capture.release() cv2.destroyAllWindows()
mit
-3,399,066,312,801,603,600
32.89011
101
0.651751
false
3.419069
false
false
false
prior/webinars
webinars_web/webinars/management/commands/seed.py
1
2093
from django.core.management.base import BaseCommand from optparse import make_option from webinars_web.webinars import models as wm from uuid import uuid4 from webex.attendee import Attendee as WebexRegistrant import hapi_plus.leads from django.conf import settings class Command(BaseCommand): help = 'Seeds registrant data for an event' option_list = BaseCommand.option_list + ( make_option('-e', '--event', type='int', dest='event_id', help= 'The local id for a specific webinar event to seed.'), make_option('-w', '--webex_count', type='int', dest='webex_count', help= 'Number of Webex registrants to seed on this event'), make_option('-s', '--hubspot_count', type='int', dest='hubspot_count', help= 'Number of HubSpot registrants to seed on this event') ) def handle(self, *args, **options): event_id = options.get('event_id') webex_count = options.get('webex_count') or 0 hubspot_count = options.get('hubspot_count') or 0 event = wm.Event.objects.get(pk=event_id) print "bulk inserting %s webex registrants" % webex_count webex_event = event.webex_event event.webex_event.create_registrants(WebexRegistrant.random(webex_event, webex_count)) leads_client = hapi_plus.leads.LeadsClient(settings.HUBSPOT_API_KEY, hub_id=event.hub.id, env=settings.API_ENV, timeout=20) print "incrementally inserting %s hubspot registrants" % hubspot_count for i in xrange(hubspot_count): form_values = [] form_values.append({'fieldLabel':'Email Address', 'fieldName':'Email', 'fieldValue': ('%s@%s.com' % (str(uuid4())[:8], str(uuid4())[:8]))}) form_values.append({'fieldLabel':'First Name', 'fieldName':'FirstName', 'fieldValue': str(uuid4())[:8]}) form_values.append({'fieldLabel':'Last Name', 'fieldName':'LastName', 'fieldValue': str(uuid4())[:8]}) leads_client.create_lead(event.update_cms_form.guid, form_values) print "inserted %s hubspot registrants" % i
apache-2.0
-8,942,040,098,241,600,000
48.833333
151
0.652174
false
3.488333
false
false
false
tyrchen/church
church/views/users.py
1
4675
# -*- coding: utf-8 -*- from __future__ import unicode_literals from datetime import datetime from dateutil.parser import parse import json import logging from django.contrib.auth.decorators import login_required from django.http import HttpResponseRedirect, HttpResponse, HttpResponseBadRequest from django.utils.decorators import method_decorator from django.views.generic import TemplateView, View import requests from settings import API_SERVER __author__ = 'tchen' logger = logging.getLogger(__name__) last_updater_ignore = ['gnats', 'slt-builder', 'JASMINE Notification <', 'builder'] class UserView(TemplateView): template_name = 'church/user.html' def get_user(self, uid): return requests.get(API_SERVER + '/directory/employees/%s.json' % uid).json() def action_required(self, item): updater = item.get('last_updater', '') modified = parse(item['modified_at']).replace(tzinfo=None) now = datetime.now() if updater not in last_updater_ignore and updater != item['dev_owner'] and (now - modified).days < 5 and \ item['responsible'] == item['dev_owner']: return True return False def get_pr_list(self, uid): data = requests.get(API_SERVER + '/gnats/%s.json' % uid).json() action_required_issues = [] new_issues = [] working_issues = [] info_issues = [] done_issues = [] for item in data: if self.action_required(item): action_required_issues.append(item) elif item['state'] == 'open': new_issues.append(item) elif item['responsible'] == uid: working_issues.append(item) elif item['state'] == 'feedback' or item['state'] == 'monitored': done_issues.append(item) else: info_issues.append(item) return [ ('Action Required Iusses', action_required_issues), ('Open Issues', new_issues), ('Working Issues', working_issues), ('Info Issues', info_issues), ('Done Issues (Monitored, Feedback)', done_issues) ] def get_context_data(self, **kwargs): uid = self.kwargs['text'] issue_lists = self.get_pr_list(uid) user = self.get_user(uid) context = super(UserView, self).get_context_data(**kwargs) context['issue_lists'] = issue_lists context['engineer'] = user context['total'] = sum(map(lambda x: len(x[1]), issue_lists)) return context def post(self, request, *args, **kwargs): uid = self.kwargs['text'] user = self.get_user(uid) number = request.POST.get('pk') if request.user.username == uid: name = request.POST.get('name') value = request.POST.get('value') url = API_SERVER + '/gnats/issues/%s.json' % number data = {'name': name, 'value': value} headers = {'Content-type': 'application/json', 'Accept': 'text/plain'} r = requests.put(url, data=json.dumps(data), headers=headers) if r.status_code == 200: if name == 'comment': # user updated the comment, so we add a progress record progress_url = API_SERVER + '/gnats/progresses/%s.json' % number data = {'uid': uid, 'progress': value, 'team': user['team']} r = requests.post(progress_url, data=json.dumps(data), headers=headers) return HttpResponse('{}') else: return HttpResponseBadRequest('Cannot update PR %s' % number) return HttpResponseBadRequest('Cannot update PR %s' % number) @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(UserView, self).dispatch(*args, **kwargs) class UserAddWorkingPRView(View): url = 'http://scrapy.jcnrd.us/schedule.json' def post(self, request, *args, **kwargs): items = request.POST.get('items') if request.user.username == self.kwargs['text']: items = map(lambda x: x.strip(), items.split(',')) for item in items: payload = {'project': 'gnats', 'spider': 'worker_pr', 'uid': request.user.username, 'number': item} requests.post(self.url, payload) return HttpResponse(json.dumps({'status': 'ok'})) return HttpResponseBadRequest('Cannot update PR %s' % items) @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(UserAddWorkingPRView, self).dispatch(*args, **kwargs)
mit
3,288,241,117,156,940,000
37.00813
115
0.589947
false
4.002568
false
false
false
dennishuo/dataproc-initialization-actions
oozie/test_oozie.py
1
1805
import os import unittest from parameterized import parameterized from integration_tests.dataproc_test_case import DataprocTestCase class OozieTestCase(DataprocTestCase): COMPONENT = 'oozie' INIT_ACTIONS = ['oozie/oozie.sh'] TEST_SCRIPT_FILE_NAME = 'validate.sh' def verify_instance(self, name): test_script_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), self.TEST_SCRIPT_FILE_NAME) self.upload_test_file(test_script_path, name) self.__run_test_file(name) self.remove_test_script(self.TEST_SCRIPT_FILE_NAME, name) def __run_test_file(self, name): self.assert_instance_command( name, "bash {}".format(self.TEST_SCRIPT_FILE_NAME)) @parameterized.expand( [ ("SINGLE", "1.1", ["m"]), ("SINGLE", "1.2", ["m"]), ("SINGLE", "1.3", ["m"]), ("STANDARD", "1.1", ["m"]), ("STANDARD", "1.2", ["m"]), ("STANDARD", "1.3", ["m"]), ("HA", "1.1", ["m-0", "m-1", "m-2"]), ("HA", "1.2", ["m-0", "m-1", "m-2"]), ("HA", "1.3", ["m-0", "m-1", "m-2"]), ], testcase_func_name=DataprocTestCase.generate_verbose_test_name) def test_oozie(self, configuration, dataproc_version, machine_suffixes): self.createCluster(configuration, self.INIT_ACTIONS, dataproc_version, machine_type="n1-standard-4", boot_disk_size="200GB") for machine_suffix in machine_suffixes: self.verify_instance("{}-{}".format(self.getClusterName(), machine_suffix)) if __name__ == '__main__': unittest.main()
apache-2.0
-3,328,313,559,347,165,000
34.392157
76
0.516343
false
3.653846
true
false
false
veprbl/wikichords
show.py
1
9484
#!/usr/bin/env python from common import * from time import time, ctime session_start() urlname = args[1] res = con.execute( "SELECT id, band, name, contents, variant FROM pages WHERE urlname = ? LIMIT 1;", (urlname,)).fetchall() if len(res) == 0: fault404() else: (pid, band, name, contents, variant) = res[0] if has_param('text'): if get_uid() == 0: fault403() newtext = get_param('text') newtext = ''.join(map(lambda s: s.rstrip() + "\n", newtext.splitlines())) res = con.execute("SELECT date, author FROM variants WHERE date > ? AND page = ? ORDER BY date DESC LIMIT 1;", (int(time()) - 3600, pid)).fetchall() con.execute("BEGIN TRANSACTION;") if len(res) > 0 and res[0][1] == get_uid(): (date, _) = res[0] con.execute("UPDATE variants SET date = ?, text = ? WHERE page = ? AND date = ?;", \ (int(time()), newtext, pid, date)) con.execute("UPDATE pages SET contents = ? WHERE id = ?;", \ (newtext, pid)) else: res = con.execute( "INSERT INTO variants (page, date, author, text) VALUES (?, ?, ?, ?)", \ (pid, int(time()), get_uid(), newtext) ) con.execute("UPDATE pages SET contents = ?, variant = ? WHERE id = ?;", \ (newtext, res.lastrowid, pid)) con.commit() locate("/song/%s" % urlname) vartime = 0 if len(args) in [3, 4] and (args[2] != 'edit' and args[2] != 'rename'): vartime = int(args[2]) res = con.execute( "SELECT id, text FROM variants WHERE page = ? AND date = ? LIMIT 1;", (pid, vartime)).fetchall() if len(res) == 0: fault404() else: (vid, contents) = res[0] if len(args) == 4 and args[3] == 'rollback': if get_uid() == 0: fault403() con.execute("BEGIN TRANSACTION;") con.execute("DELETE FROM variants WHERE date > ? AND page = ?;", \ (vartime, pid)) con.execute("UPDATE pages SET contents = ?, variant = ? WHERE id = ?;",\ (contents, vid, pid)); con.commit() locate("/song/%s" % urlname) if len(args) == 4 and args[2] == 'rename': if get_uid() == 0: fault403() name = args[3] urlname = mk_urlname(name) con.execute("UPDATE pages SET name = ?, urlname = ? WHERE id = ?;", \ (name, urlname, pid)) con.commit() locate("/song/%s" % urlname); root = Element('root') songtext = Element('songtext', { 'name' : name, 'href' : "/song/%s" % urlname, 'band' : getbandname(band), 'band_href' : "/band/%s" % getbandurlname(band) }) root.append(songtext) def replace_node(line, find_proc, mk_node): if type(line) == list: result = [] for v in line: if type(v) == str: result += replace_node(v, find_proc, mk_node) else: result.append(v) return result (index, param, exclude_len) = find_proc(line) if index >= 0: return [line[:index], mk_node(param)] + \ replace_node(line[index+exclude_len:], find_proc, mk_node) else: return [line] def parse_chords(line): if line == '': return [] def is_valid_chord_char(c): return c.lower() in ['m', 's', 'u', 's', 'a', 'd', '1', '2', '3', '4', '5', '6', '7', '9', '#', '+', '-', '/', '(', ')'] if all(map(lambda c: is_valid_chord_char(c) or c.isspace() \ or c in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', '|'], line)): chordline_node = Element('l') def find_proc(line): from genchord import parse_chord, notes indexes = filter(lambda i: i >= 0, map(lambda note: line.find(note), notes.keys())) if len(indexes) == 0: return (-1, None, 0) index = min(indexes) parse_info = parse_chord(line[index:]) length = parse_info[0] if parse_info is not None: return (index, line[index:index+length], length) unknown_node = Element('unknownchord') unknown_node.text = line[index:l] return (index, unknown_node, l-index) def mk_node(param): if type(param) in (str, unicode): chord_node = Element('c') chord_node.text = param return chord_node else: return param a = replace_node(line, find_proc, mk_node) insert_nodelist(chordline_node, a) return [chordline_node] nodes = [] pos = line.find("|", 0) + 1 end = line.find("|", pos) prev = 0 chordline = [] while end != -1: try: chord = line[pos:end] chord_id = ['Am'].index(chord) line = line[:pos-1] + line[end+1:] chordline.append(" " * (pos - 2 - prev)) chord_node = Element('c') chord_node.text = chord chordline.append(chord_node) end = pos prev = pos + len(chord) - 2 except ValueError: pass pos = end + 1 end = line.find("|", pos) if len(chordline) > 0: chordline_node = Element('l') insert_nodelist(chordline_node, chordline) nodes.append(chordline_node) text_node = Element('v') text_node.text = line.replace("\t", "xxTABx") nodes.append(text_node) return nodes def insert_nodelist(elem, nodelist): f = True for node in nodelist: if type(node) in (str, unicode): if f == True: if elem.text is None: elem.text = node else: elem.text += node else: f.tail = node else: if isinstance(node, getElementClass()): f = node elem.append(node) else: raise Exception(type(node)) def split_verses(line_array): # Search for first non-empty line start = 0 while start < len(line_array) and line_array[start] == '': start += 1 if start == len(line_array): return # Search for the verse ending (empty line) # Assuming that line_array is right stripped try: end = start + line_array[start:].index('') except ValueError: end = len(line_array) verse_node = Element('verse') songtext.append(verse_node) insert_nodelist(verse_node, sum(map(lambda s: parse_chords(s), line_array[start:end]), []) ) if end != len(line_array): split_verses(line_array[end:]) split_verses(map(lambda s: s.rstrip(), contents.splitlines())) lastchanges = Element('lastchanges') root.append(lastchanges) res = con.execute("SELECT id, date, author FROM variants WHERE page = ?", \ (pid,)) for (vid, date, author) in res: selected = date == vartime or (vid == variant and vartime == 0) commit = Element('commit', attrib = { 'author' : get_username(author), 'url' : "/song/%s/%i" % (urlname, date), 'rollback_url' : "/song/%s/%i/rollback" % (urlname, date), 'date' : ctime(date), 'selected' : "yes" if selected else "no"}) lastchanges.append(commit) if len(args) == 3 and args[2] == 'edit': editform = Element('editform', attrib = { 'id' : str(pid) }) editform.text = contents.replace('\r\n', '\n') root.append(editform) if has_param('file'): if get_uid() == 0: fault403() fmt = { '\x1b\x00T\x00u\x00x\x00G\x00u\x00i\x00t\x00a\x00r\x00 \x00F\x00i\x00l\x00e\x00 \x00F\x00o\x00r\x00m\x00a\x00t\x00' : 'tuxguitar', '\x18FICHIER GUITAR PRO' : 'guitarpro' } fmt_ext = { 'tuxguitar' : ['.tg'], 'guitarpro' : ['.gtp', '.gp3', '.gp4', '.gp5'] } fp = form['file'].file maxlen = max(map(len, fmt.keys())) fingerprint = fp.read(maxlen) format = None for f in fmt: if fingerprint[:len(f)] == f: format = fmt[f] break if format is None: die("Unknown file format!") ext = path.splitext(form['file'].filename)[1].lower() if ext not in fmt_ext[format]: die("Wrong file extension!") fp.seek(0) from os import path # user_urlname = mk_urlname(get_username(get_uid())).replace('/', '') filemask = "%s_%%02i%s" % (urlname, ext) pathmask = "uploads/%s" % filemask i = 0 while path.isfile(pathmask % i): i += 1 file(pathmask % i, "w").write(fp.read()) con.execute("INSERT INTO files (page, author, filename) VALUES (?, ?, ?);", \ (pid, get_uid(), filemask % i)) con.commit() locate('/song/%s' % urlname) attachments_node = None res = con.execute("SELECT author, filename FROM files WHERE page = ?;", (pid,)) for (author, filename) in res: if attachments_node is None: attachments_node = Element('attachments') root.append(attachments_node) file_node = Element('file', attrib = { 'author' : get_username(author), 'url' : "/uploads/%s" % filename }) attachments_node.append(file_node); output_root(root)
mit
5,521,724,893,140,749,000
29.692557
158
0.512653
false
3.47909
false
false
false