code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
"""A basic implementation of a Neural Network
by following the tutorial by Andrew Trask
http://iamtrask.github.io/2015/07/12/basic-python-network/
"""
import numpy as np
# sigmoid function
def nonlin(x, deriv=False):
if deriv==True:
return x * (1-x)
return 1 / (1 + np.exp(-x))
# input dataset
x = np.array([[0, 0, 1],
[0, 1, 1],
[1, 0, 1],
[1, 1, 1]])
# output dataset
y = np.array([[0, 0, 1, 1]]).T
# seed random numbers to make calculation
# deterministic (good practice)
np.random.seed(1)
# initialize weights randomly with mean 0
syn0 = 2*np.random.random((3, 1)) - 1
for i in xrange(10000):
# forward propagation
l0 = x
l1 = nonlin(np.dot(l0, syn0))
print l1
break
# how much did we miss
l1_error = y - l1
# multiply how much we missed by the
# slope of the sigmoid at the values in l1
l1_delta = l1_error * nonlin(l1, True)
# update weights
syn0 += np.dot(l0.T, l1_delta)
print 'Output after training:'
print l1
| alexandercrosson/ml | neural_network/basic.py | Python | mit | 1,043 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-04-26 12:32
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('iiits', '0025_auto_20160425_1937'),
]
operations = [
migrations.CreateModel(
name='Staff',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('photo', models.ImageField(upload_to='iiits/static/iiits/images/staff')),
('email', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='StaffDesignation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.AddField(
model_name='staff',
name='designation',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='iiits.StaffDesignation'),
),
]
| IIITS/iiits.ac.in | iiits/migrations/0026_auto_20160426_1232.py | Python | mit | 1,265 |
#!/usr/bin/env python
# based on cb-exit used in CrunchBang Linux <http://crunchbanglinux.org/>
import pygtk
pygtk.require('2.0')
import gtk
import os
import getpass
import time
class i3_exit:
def disable_buttons(self):
self.cancel.set_sensitive(False)
self.logout.set_sensitive(False)
self.suspend.set_sensitive(False)
self.reboot.set_sensitive(False)
self.shutdown.set_sensitive(False)
def cancel_action(self,btn):
self.disable_buttons()
gtk.main_quit()
def logout_action(self,btn):
self.disable_buttons()
self.status.set_label("Exiting i3, please standby...")
os.system("i3-msg exit")
def suspend_action(self,btn):
self.disable_buttons()
self.status.set_label("Suspending, please standby...")
os.system("scrot \"/home/rationalash/Pictures/lockscreen.png\"")
time.sleep(1)
os.system("i3lock -i /home/rationalash/Pictures/lockscreen.png")
time.sleep(1)
os.system("dbus-send --system --print-reply \
--dest=\"org.freedesktop.UPower\" \
/org/freedesktop/UPower \
org.freedesktop.UPower.Suspend")
gtk.main_quit()
def reboot_action(self,btn):
self.disable_buttons()
self.status.set_label("Rebooting, please standby...")
os.system("dbus-send --system --print-reply \
--dest=\"org.freedesktop.ConsoleKit\" \
/org/freedesktop/ConsoleKit/Manager \
org.freedesktop.ConsoleKit.Manager.Restart")
def shutdown_action(self,btn):
self.disable_buttons()
self.status.set_label("Shutting down, please standby...")
os.system("dbus-send --system --print-reply \
--dest=\"org.freedesktop.ConsoleKit\" \
/org/freedesktop/ConsoleKit/Manager \
org.freedesktop.ConsoleKit.Manager.Stop")
def create_window(self):
self.window = gtk.Window()
title = "Log out " + getpass.getuser() + "? Choose an option:"
self.window.set_title(title)
self.window.set_border_width(5)
self.window.set_size_request(500, 80)
self.window.set_resizable(False)
self.window.set_keep_above(True)
self.window.stick
self.window.set_position(1)
self.window.connect("delete_event", gtk.main_quit)
windowicon = self.window.render_icon(gtk.STOCK_QUIT, gtk.ICON_SIZE_MENU)
self.window.set_icon(windowicon)
#Create HBox for buttons
self.button_box = gtk.HBox()
self.button_box.show()
#Cancel button
self.cancel = gtk.Button(stock = gtk.STOCK_CANCEL)
self.cancel.set_border_width(4)
self.cancel.connect("clicked", self.cancel_action)
self.button_box.pack_start(self.cancel)
self.cancel.show()
#Logout button
self.logout = gtk.Button("_Log out")
self.logout.set_border_width(4)
self.logout.connect("clicked", self.logout_action)
self.button_box.pack_start(self.logout)
self.logout.show()
#Suspend button
self.suspend = gtk.Button("_Suspend")
self.suspend.set_border_width(4)
self.suspend.connect("clicked", self.suspend_action)
self.button_box.pack_start(self.suspend)
self.suspend.show()
#Reboot button
self.reboot = gtk.Button("_Reboot")
self.reboot.set_border_width(4)
self.reboot.connect("clicked", self.reboot_action)
self.button_box.pack_start(self.reboot)
self.reboot.show()
#Shutdown button
self.shutdown = gtk.Button("_Power off")
self.shutdown.set_border_width(4)
self.shutdown.connect("clicked", self.shutdown_action)
self.button_box.pack_start(self.shutdown)
self.shutdown.show()
#Create HBox for status label
self.label_box = gtk.HBox()
self.label_box.show()
self.status = gtk.Label()
self.status.show()
self.label_box.pack_start(self.status)
#Create VBox and pack the above HBox's
self.vbox = gtk.VBox()
self.vbox.pack_start(self.button_box)
self.vbox.pack_start(self.label_box)
self.vbox.show()
self.window.add(self.vbox)
self.window.show()
def __init__(self):
self.create_window()
def main():
gtk.main()
if __name__ == "__main__":
go = i3_exit()
main()
| RationalAsh/configs | i3-exit.py | Python | mit | 4,584 |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .tektronixMDO3000 import *
class tektronixMDO3012(tektronixMDO3000):
"Tektronix MDO3012 IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'MDO3012')
super(tektronixMDO3012, self).__init__(*args, **kwargs)
self._analog_channel_count = 2
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 100e6
# AFG option
self._output_count = 1
self._init_channels()
self._init_outputs()
| Diti24/python-ivi | ivi/tektronix/tektronixMDO3012.py | Python | mit | 1,724 |
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../tools'))
import files
import graphs
def main(argv):
k, Gs = files.read_graphs(argv[0])
sinks = []
for G in Gs:
n, m = G[:2]
edges = G[2]
nodes = [n for n in xrange(1, n + 1)]
sinks.append(graphs.general_sink(nodes, edges))
print ' '.join(str(sink) for sink in sinks)
if __name__ == "__main__":
sys.setrecursionlimit(1048576)
main(sys.argv[1:])
| cowboysmall/rosalind | src/heights/rosalind_gs.py | Python | mit | 489 |
# -*- coding: utf-8 -*-
"""
@file
@brief Link to data from `Gutenberg <http://www.gutenberg.org/>`_,
provides an automated way to get the data from this website.
Some data may be replicated here to unit test notebooks.
"""
import os
import urllib.request
from urllib.error import URLError
def gutenberg_name(name="condamne", local=False, load=False):
"""
Retrieves data from `Gutenberg <http://www.gutenberg.org/>`_.
@param name name of the requested data
@param local use local version
@param load load the data
@return content or filename or url
List of available datasets:
* ``condamne``: `Le dernier jour d'un condamné <http://www.gutenberg.org/ebooks/6838>`_, Victor Hugo
"""
this = os.path.abspath(os.path.dirname(__file__))
data = os.path.join(this, "data_gutenberg")
if name == "condamne":
url = "http://www.gutenberg.org/cache/epub/6838/pg6838.txt"
loc = os.path.join(data, "pg6838.txt")
if load:
if not local:
try:
with urllib.request.urlopen(url) as u:
text = u.read()
u.close()
except URLError:
# we switch to local
text = None
if text is not None:
text = text.decode("utf8")
return text
if not os.path.exists(loc):
raise FileNotFoundError(loc)
with open(loc, "r", encoding="utf8") as f:
text = f.read()
return text
else:
if local:
if not os.path.exists(loc):
raise FileNotFoundError(loc)
return loc
else:
return url
else:
raise ValueError(
"unknown name '{0}', check the code of the function".format(name))
| sdpython/ensae_teaching_cs | src/ensae_teaching_cs/data/gutenberg.py | Python | mit | 1,949 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('backend', '0005_organization_registered'),
]
operations = [
migrations.RemoveField(
model_name='organization',
name='lobbyists_with_access',
),
migrations.AlterField(
model_name='organization',
name='explore_url',
field=models.CharField(max_length=128, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='organization',
name='lobbyists',
field=models.IntegerField(null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='organization',
name='money',
field=models.IntegerField(null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='organization',
name='name',
field=models.CharField(max_length=128, null=True, blank=True),
preserve_default=True,
),
]
| euhackathon/commission-today-api | backend/backend/migrations/0006_auto_20141203_0021.py | Python | mit | 1,218 |
from django.db import models
from linda_app.models import DatasourceDescription
# A single test over a data source
class EndpointTest(models.Model):
execution_time = models.DateTimeField(auto_now_add=True) # test timestamp
datasource = models.ForeignKey(DatasourceDescription) # tested datasource
up = models.BooleanField(default=False) # was the endpoint up? - simple select query
response_time = models.IntegerField(blank=True, null=True) # response time for a simple select query
supports_minus = models.BooleanField(default=True, blank=True) # did the endpoint support SparQL features 1.1 like MINUS? | LinDA-tools/LindaWorkbench | linda/endpoint_monitor/models.py | Python | mit | 631 |
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ether
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
from ryu.lib.packet import arp, ipv4
from ryu.topology.api import get_switch, get_link, get_host
from ryu.topology import event, switches
import networkx as nx
from ryu.lib import hub
class actualSDN_switch(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(actualSDN_switch, self).__init__(*args, **kwargs)
self.vtable = {}
# default vlan table
self.vtable = {'00:00:00:00:00:01':'1',
'00:00:00:00:00:02':'1',
'00:00:00:00:00:03':'1'}
self.mac_to_ip = {} # mac <-> ip
self.ip_to_mac = {} # ip <-> mac
self.mac_to_port = {} # host in which port
self.stable = {} #dpid<->datapath
self.default_datapath = None
self.default_ev = None
self.host_enter = 0 # host enter number
self.switch_enter = 0 # switch enter number
self.mac_to_dp = {} # mac <-> datapath
self.switches = [] #all switches' dpid
self.switches_dp = [] #all switches' datapath
# self.path_db = [] # store shortest path
# monitor init
self.datapaths={} # all datapaths
self.monitor_thread = hub.spawn(self._monitor)
self.bandwidth = {}
#networkx init
self.topology_api_app = self
self.directed_Topo = nx.DiGraph()
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch()
self.datapaths[datapath.id] = datapath
self.default_datapath = datapath
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
# read the mac_table(valid user) and put the information into the mac_to_ip and ip_to_mac
with open('./mac_table.txt') as f:
line = f.readlines()
line = [x.strip('\n') for x in line]
for content in line:
tmp = content.split(',')
mac = tmp[0]
ip = tmp[1]
self.mac_to_ip[mac] = ip
self.ip_to_mac[ip] = mac
#self.host_num = len(self.ip_to_mac)
self.host_num = 3
# _monitor, _request_stats adn _port_stats_reply_handler, the three functions are used when monitor the traffic
def _monitor(self):
while True:
for dp in self.datapaths.values():
self._request_stats(dp)
hub.sleep(3)
def _request_stats(self, datapath):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
req = parser.OFPPortStatsRequest(datapath, 0 , ofproto.OFPP_ANY)
datapath.send_msg(req)
@set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER)
def _port_stats_reply_handler(self, ev):
body = ev.msg.body
parser = ev.msg.datapath.ofproto_parser
self.logger.info('datapath port '
'rx-pkts rx-bytes '
'tx-pkts tx-bytes bandwidth')
self.logger.info('---------------- -------- '
'-------- -------- '
'-------- -------- --------')
for stat in sorted(body):
if stat.port_no < 7:
index = str(ev.msg.datapath.id) + '-' + str(stat.port_no)
if index not in self.bandwidth:
self.bandwidth[index] = 0
transfer_bytes = stat.rx_bytes + stat.tx_bytes
speed = (transfer_bytes - self.bandwidth[index]) / 3
self.logger.info('%016x %8x %8d %8d %8d %8d %8d\n',
ev.msg.datapath.id, stat.port_no,
stat.rx_packets, stat.rx_bytes,
stat.tx_packets, stat.tx_bytes, speed)
self.bandwidth[index] = transfer_bytes
def add_flow(self, datapath, priority, match, actions, buffer_id=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
buffer_id = ofproto.OFP_NO_BUFFER
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)
print('add flow!!')
# delete flow
def del_flow(self, datapath, match):
ofproto = datapath.ofproto
ofproto_parser = datapath.ofproto_parser
mod = ofproto_parser.OFPFlowMod(datapath=datapath,
command= ofproto.OFPFC_DELETE,out_port=ofproto.OFPP_ANY, out_group=ofproto.OFPG_ANY,match=match)
datapath.send_msg(mod)
print('del flow')
# when src in topo and change port, this situation will run this function to delete flows which are relative the src.
def ShortestPathDeleteFlow(self, datapath, *args):
if datapath==None:
return
ofproto = datapath.ofproto
ofproto_parser = datapath.ofproto_parser
#print('stable',self.stable)
for key, value in self.stable.items():
for arg in args:
match = ofproto_parser.OFPMatch(eth_dst=arg)
self.del_flow(value, match)
match = ofproto_parser.OFPMatch(eth_src=arg)
self.del_flow(value, match)
print('SP del flow end')
# handle arp package
def _handle_arp(self, datapath, in_port, pkt_ethernet, arp_pkt):
if arp_pkt.opcode != arp.ARP_REQUEST:
return
if self.ip_to_mac.get(arp_pkt.dst_ip) == None:
return
#Browse Target hardware adress from ip_to_mac table.
get_mac = self.ip_to_mac[arp_pkt.dst_ip]
#target_ip_addr = arp_pkt.dst_ip
pkt = packet.Packet()
#Create ethernet packet
pkt.add_protocol(ethernet.ethernet(ethertype=ether.ETH_TYPE_ARP,dst=pkt_ethernet.src,src=get_mac))
#Create ARP Reply packet
pkt.add_protocol(arp.arp(opcode=arp.ARP_REPLY,
src_mac=get_mac,
src_ip=arp_pkt.dst_ip,
dst_mac=arp_pkt.src_mac,
dst_ip=arp_pkt.src_ip))
self._send_packet(datapath, in_port, pkt)
print('arp', get_mac, pkt_ethernet.src,)
# add host in the direct topo
def AddHost(self, dpid, host, in_port):
#Add host into directed_topo
self.directed_Topo.add_node(host)
#Add edge switch's port to src host
self.directed_Topo.add_edge(dpid, host, {'port':in_port})
#Add edge host to switch
self.directed_Topo.add_edge(host, dpid)
return
@set_ev_cls(event.EventSwitchEnter)
def get_topology_data(self, ev):
#Topo information of switch
self.switch_enter += 1
#Get Switch List
switch_list = get_switch(self.topology_api_app, None)
self.switches = [switch.dp.id for switch in switch_list]
self.switches_dp = [switch.dp for switch in switch_list]
#Add switch dpid into Directed Topology
self.directed_Topo.add_nodes_from(self.switches)
#Get Link List
links_list = get_link(self.topology_api_app, None)
#When all Link enter
if self.switch_enter == len(self.switches):
links = [(link.src.dpid, link.dst.dpid, {'port':link.src.port_no}) for link in links_list ]
links.sort()
self.directed_Topo.add_edges_from(links)
print('****List Of Links****')
print(self.directed_Topo.edges(data = True))
# install direct topo.
# if the hosts in the same vlan, the function will install paths between them.
def default_path_install(self, ev):
for src in self.vtable:
for dst in self.vtable:
if src != dst:
if self.vtable[src] == self.vtable[dst]:
print('****Shortest path****')
print('vtable', self.vtable)
print(self.directed_Topo.edges(data = True))
self.ShortestPathInstall(ev, src, dst)
# Using networkx, the paths between the hosts in the same vlan are the shortest.
def ShortestPathInstall(self, ev, src, dst):
#Compute shortest path
path = nx.shortest_path(self.directed_Topo, src, dst)
#Add Flow along with the path
for k, sw in enumerate(self.switches):
if sw in path:
next = path[path.index(sw)+1]
out_port = self.directed_Topo[sw][next]['port']
actions = [self.switches_dp[k].ofproto_parser.OFPActionOutput(out_port)]
match = self.switches_dp[k].ofproto_parser.OFPMatch(eth_src=src, eth_dst=dst)
inst = [actions]
self.add_flow(self.switches_dp[k], 1, match, actions, inst)
return
def _send_packet(self, datapath, in_port, pkt):
ofproto =datapath.ofproto
parser = datapath.ofproto_parser
pkt.serialize()
data = pkt.data
actions = [parser.OFPActionOutput(port=in_port)]
out = parser.OFPPacketOut(datapath=datapath,
buffer_id=ofproto.OFP_NO_BUFFER,
in_port=ofproto.OFPP_CONTROLLER,
actions=actions,
data=data)
datapath.send_msg(out)
# the main function
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
# If you hit this you might want to increase
# the "miss_send_length" of your switch
if ev.msg.msg_len < ev.msg.total_len:
self.logger.debug("packet truncated: only %s of %s bytes",
ev.msg.msg_len, ev.msg.total_len)
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
pkt_ethernet = pkt.get_protocols(ethernet.ethernet)[0]
if not pkt_ethernet:
return
if pkt_ethernet.ethertype == 35020:
# ignore lldp packet
return
arp_pkt = pkt.get_protocol(arp.arp)
if pkt_ethernet.ethertype== 2054:
self._handle_arp(datapath, in_port, pkt_ethernet, arp_pkt)
return
dst = pkt_ethernet.dst
src = pkt_ethernet.src
out_port = None
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
self.mac_to_dp.setdefault(src, datapath)
self.stable.setdefault(dpid, datapath)
self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
# when the src is valid
if src in self.vtable:
# if the valid src not in the direct topo
if not self.directed_Topo.has_node(src):
print('add', src)
self.AddHost(dpid,src,in_port)
#Add information to mac_to_port
self.mac_to_port[dpid][src] = in_port
self.host_enter += 1
# if entered host > 3, it will install shortest path
if self.host_enter == self.host_num:
self.default_path_install(ev)
#change port function
else:
#change port: del relative flow and reinstall
if in_port != self.mac_to_port.get(dpid).get(src):
#Delete the wrong flow
self.ShortestPathDeleteFlow(datapath, src)
#Update mac_to_port table
for key, value in self.mac_to_port.items():
if value.has_key(src):
for mac, port in value.items():
if mac == src:
del self.mac_to_port[key][mac]
break
self.mac_to_port[dpid][src] = in_port
#Change Graph
#Remove wrong
self.directed_Topo.remove_node(src)
#Add Correct host
self.AddHost(dpid, src, in_port)
#Add new flows and path
self.default_path_install(ev)
# when the dst is in the direct topo
if dst in self.mac_to_port[dpid]:
if self.vtable[src] != None and self.vtable[src] == self.vtable[dst]:
out_port = self.mac_to_port[dpid][dst]
actions = [parser.OFPActionOutput(out_port)]
print('out_port',out_port)
else:
out_port = ofproto.OFPP_FLOOD
actions=[parser.OFPActionOutput(out_port)]
else:
out_port = ofproto.OFPP_FLOOD
actions = [parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
match = parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_src=src)
# verify if we have a valid buffer_id, if yes avoid to send both
# flow_mod & packet_out
if msg.buffer_id != ofproto.OFP_NO_BUFFER:
self.add_flow(datapath, 1, match, actions, msg.buffer_id)
return
else:
self.add_flow(datapath, 1, match, actions)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
| ray6/sdn | actualSDN.py | Python | mit | 14,660 |
class OWL:
def __init__(self):
pass | Jawoll/automatchthreads | modules/scrapers/owl.py | Python | mit | 47 |
#-------------------------------------------------------------------------------------
from bin.dazer_methods import Dazer
from bin.lib.ssp_functions.ssp_synthesis_tools import ssp_fitter
#-------------------------------------------------------------------------------------
#!/usr/bin/python
import sys
import numpy as np
from numpy import float_, absolute as abs, random as ran
import time
import ssp_Hector_Fit3D_tools as ssp
import pyfits as pyf
from pyfits import getheader as ghead, getdata as gdata, writeto as wfits
from scipy.interpolate.interpolate import interp1d
import ssp_Hector_Fit3D_my as my
import os.path as pt
import matplotlib
import os
#Example command
#
'''
cd workspace/dazer/bin/lib/ssp_functions/
Pyhon command to run original file
python ssp_Hector_Fit3D.py NGC5947.spec_5.txt ssp_lib.fits,ssp_lib.3.fits auto_ssp.NGC5947.cen.only.out mask_elines.txt auto_ssp_V500_several_Hb.config 1 -1 40 3850 6800 emission_lines.txt 0.02 0.001 0.015 0.025 2 0.5 1 9 0.5 0.1 0.0 1.6
'''
def sycall(comand):
from subprocess import call
line=comand.split(" ")
fcomand=[]
fcomand.extend(line)
linp=''
nx=len(fcomand)
for i in range(0, nx):
linp=linp+fcomand[i]+" "
print linp
call(fcomand)
sys.argv=filter(None,sys.argv)
ran.seed(None)
vel_light=299792.458
red_elines=0.0
sec_ini=ssp.print_time()
time1=ssp.get_time()
if len(sys.argv) < 7:
print "USE: auto_ssp.py SPEC1.txt SSP_SFH.fits,SSP_KIN.fits OUTFILE MASK_LIST CONFIG_FILE PLOT [min max] [wmin wmax] [redshift_elines_to_mask] [input_redshift delta_redshift min_redshift max_redshift] [input_sigma delta_sigma min_sigma max_sigma] [input_Av delta_Av min_Av max_Av]"
print "CONFIG_FILE:"
print "redshift delta_redshift min_redshift max_redshift"
print "sigma delta_sigma min_sigma max_sigma"
print "Av delta_Av min_Av max_Av [Same range for all]"
print "N_SYSTEMS"
print "(1) START_W END_W MASK_FILE CONFIG_FILE NPOLY MASK_FILE_POLY"
print "..."
print "(N) START_W END_W MASK_FILE CONFIG_FILE NPOLY MASK_FILE_POLY"
print "MIN_DELTA_CHISQ MAX_NITER CUT_MEDIAN_FLUX"
print "start_w_peak end_w_peak"
print "wavelength_to_norm width_AA new_back_templates.fits"
inline_params = ['ssp_Hector_Fit3D.py', 'NGC5947.spec_5.txt','ssp_lib.fits,ssp_lib.fits','auto_ssp.NGC5947.cen.only.out','mask_elines.txt','auto_ssp_V500_several_Hb.config' ,'1', '-1', '40', '3850', '6800', 'emission_lines.txt', '0.02', '0.001', '0.015', '0.025', '2', '0.5', '1', '9', '0.5', '0.1', '0.0', '1.6']
sys.argv = inline_params
unc_file=sys.argv[1]
clean_file="clean_"+sys.argv[1]
junk_back_list=sys.argv[2]
data=junk_back_list.split(',')
if len(data) == 2:
back_list=data[0]
back_list2=data[1]
else:
back_list=junk_back_list
back_list2=junk_back_list
outfile=sys.argv[3]
out_elines="elines_"+outfile
out_single="single_"+outfile
out_fit="fit_"+outfile
out_coeffs_file="coeffs_"+outfile
out_fit="output."+outfile+".fits"
out_ps=outfile
#######################################
# Clean previous results
#######################################
call="rm -rf "+outfile
sycall(call)
call="rm -rf "+out_elines
sycall(call)
call="rm -rf "+out_single
sycall(call)
call="rm -rf "+out_fit
sycall(call)
D_SYS_VEL=100
mask_list=sys.argv[4]
config_file=sys.argv[5]
plot=int(sys.argv[6])
if plot == 2:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
dev_plot=outfile+".pdf"
dev_plot_single="single_"+outfile+".pdf"
else:
if plot == 0:
matplotlib.use('Agg')
dev_plot="null"
dev_plot_single="null"
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
smooth=1
MIN_CHISQ=1e12
out_file="junk.junk"
factor=1
box=1
deft=0
if len(sys.argv) == 9:
min=float_(sys.argv[7])
max=float_(sys.argv[8])
deft=1
if len(sys.argv) == 11:
min=float_(sys.argv[7])
max=float_(sys.argv[8])
min_wave=sys.argv[9]
max_wave=sys.argv[10]
deft=2
if len(sys.argv) == 12:
min=float_(sys.argv[7])
max=float_(sys.argv[8])
min_wave=sys.argv[9]
max_wave=sys.argv[10]
elines_mask=sys.argv[11]
deft=2
input_redshift=0
if len(sys.argv) == 13:
min=float_(sys.argv[7])
max=float_(sys.argv[8])
min_wave=sys.argv[9]
max_wave=sys.argv[10]
elines_mask=sys.argv[11]
input_redshift=float_(sys.argv[12])
redshift=input_redshift
deft=2
f=open(config_file,'r')
line=f.readline()
data=line.split(" ")
data=filter(None,data)
redshift=float_(data[0])
d_redshift=float_(data[1])
min_redshift=float_(data[2])
max_redshift=float_(data[3])
DV=float_(data[4])
RV=float_(data[5])
DS=float_(data[6])
RS=float_(data[7])
MIN_W=float_(data[8])
MAX_W=float_(data[9])
if len(sys.argv) == 16:
min=float_(sys.argv[7])
max=float_(sys.argv[8])
min_wave=sys.argv[9]
max_wave=sys.argv[10]
elines_mask=sys.argv[11]
input_redshift=float_(sys.argv[12])
input_d_redshift=float_(sys.argv[13])
input_min_redshift=float_(sys.argv[14])
input_max_redshift=float_(sys.argv[15])
redshift=input_redshift
d_redshift=input_d_redshift
min_redshift=input_min_redshift
max_redshift=input_max_redshift
deft=2
line=f.readline()
data=line.split(" ")
data=filter(None,data)
sigma=data[0]
d_sigma=data[1]
min_sigma=data[2]
max_sigma=data[3]
if len(sys.argv) == 20:
min=float_(sys.argv[7])
max=float_(sys.argv[8])
min_wave=sys.argv[9]
max_wave=sys.argv[10]
elines_mask=sys.argv[11]
input_redshift=float_(sys.argv[12])
input_d_redshift=float_(sys.argv[13])
input_min_redshift=float_(sys.argv[14])
input_max_redshift=float_(sys.argv[15])
sigma=float_(sys.argv[16])
d_sigma=float_(sys.argv[17])
min_sigma=float_(sys.argv[18])
max_sigma=float_(sys.argv[19])
redshift=input_redshift
d_redshift=input_d_redshift
min_redshift=input_min_redshift
max_redshift=input_max_redshift
deft=2
line=f.readline()
data=line.split(" ")
data=filter(None,data)
Av_IN=data[0]
d_Av_IN=data[1]
min_Av=data[2]
max_Av=data[3]
if len(sys.argv) == 24:
min=float_(sys.argv[7])
max=float_(sys.argv[8])
min_wave=sys.argv[9]
max_wave=sys.argv[10]
elines_mask=sys.argv[11]
input_redshift=float_(sys.argv[12])
input_d_redshift=float_(sys.argv[13])
input_min_redshift=float_(sys.argv[14])
input_max_redshift=float_(sys.argv[15])
sigma=float_(sys.argv[16])
d_sigma=float_(sys.argv[17])
min_sigma=float_(sys.argv[18])
max_sigma=float_(sys.argv[19])
Av_IN=float_(sys.argv[20])
d_Av_IN=float_(sys.argv[21])
min_Av=float_(sys.argv[22])
max_Av=float_(sys.argv[23])
redshift=input_redshift
d_redshift=input_d_redshift
min_redshift=input_min_redshift
max_redshift=input_max_redshift
deft=2
data=min_wave.split(',')
if len(data) == 2:
min_wave=float_(data[0])
min_wave2=float_(data[1])
else:
min_wave=float_(min_wave)
min_wave2=min_wave
data=max_wave.split(',')
if len(data) == 2:
max_wave=float_(data[0])
max_wave2=float_(data[1])
else:
max_wave=float_(max_wave)
max_wave2=max_wave
REDSHIFT=redshift
Av_ini=Av_IN
if d_redshift !=0:
fit_redshift=1
else:
fit_redshift=0
#print "FIT_RED "+str(fit_redshift)+" "+str(d_redshift)+" "+str(len(sys.argv))
line=f.readline()
data=line.split(" ")
data=filter(None,data)
ns=int(data[0])
start_w_min=1e12
end_w_max=-1e12
start_w_E=[]
end_w_E=[]
mask_E=[]
config_E=[]
n_line_E=[]
npoly_E=[]
mask_poly_E=[]
nmin_E=[]
nmax_E=[]
config_line_E=[]
for i in range(0, ns):
line=f.readline()
data=line.split(" ")
data=filter(None,data)
start_w_e=float_(data[0])
end_w_e=float_(data[1])
mask_e=data[2]
config_e=data[3]
npoly_e=int(data[4])
mask_poly_e=data[5]
nmin_e=float_(data[6])
nmax_e=float_(data[7])
start_w_E.extend([start_w_e])
end_w_E.extend([end_w_e])
mask_E.extend([mask_e])
config_E.extend([config_e])
#
# We read all the information
#
n_line=0
linef=""
f2=open(config_e,'r')
for line in f2:
linef=linef+line+";"
n_line=n_line+1
config_line_E.extend([linef])
f2.close()
n_line_E.extend([n_line])
npoly_E.extend([npoly_e])
mask_poly_E.extend([mask_poly_e])
nmin_E.extend([nmin_e])
nmax_E.extend([nmax_e])
if start_w_e < start_w_min:
start_w_min=start_w_e
if end_w_e > end_w_max:
end_w_max=end_w_e
line=f.readline()
data=line.split(" ")
data=filter(None,data)
MIN_DELTA_CHISQ=float_(data[0])
MAX_NITER=int(data[1])
CUT_MEDIAN_FLUX=float_(data[2])
ABS_MIN=0.5*CUT_MEDIAN_FLUX
line=f.readline()
data=line.split(" ")
data=filter(None,data)
start_w_peak=float_(data[0])
end_w_peak=float_(data[1])
line=f.readline()
data=line.split(" ")
data=filter(None,data)
if len(data) == 3:
wave_norm=data[0]
w_wave_norm=data[1]
new_back_file=data[2]
else:
wave_norm=[]
w_wave_norm=[]
new_back_file=[]
f.close()
#
# SFH template
#
[pdl_flux_c_ini,hdr]=gdata(back_list, 0, header=True)
[nf,n_c]=pdl_flux_c_ini.shape
coeffs=np.zeros([nf,3])
crpix=hdr["CRPIX1"]
cdelt=hdr["CDELT1"]
crval=hdr["CRVAL1"]
n_mc=30
#
# Kinematics template
#
[pdl_flux_c_ini2,hdr2]=gdata(back_list2, 0, header=True)
[nf2,n_c2]=pdl_flux_c_ini2.shape
coeffs2=np.zeros([nf2,3])
crpix2=hdr2["CRPIX1"]
cdelt2=hdr2["CDELT1"]
crval2=hdr2["CRVAL1"]
Av=np.zeros(nf)
d_Av=np.zeros(nf)
for i in range(0, nf):
Av[i]=Av_IN
d_Av[i]=d_Av_IN
if mask_list == "none":
nmask=0
else:
f=open(mask_list,'r')
start_mask=[]
end_mask=[]
for line in f:
data=line.split(" ")
data=filter(None,data)
if len(data) != 0 and data[0] != "\n":
start_mask.extend([float_(data[0])])
end_mask.extend([float_(data[1])])
nmask=len(start_mask)
f.close()
n_mask_org=nmask
if elines_mask == "none":
nmask_e=0
nline=0
else:
f=open(elines_mask,'r')
nmask_e=0
nline=0
w_eline=[]
start_mask_e=[]
end_mask_e=[]
for line in f:
data=line.split(" ")
data=filter(None,data)
if data[0] != "#":
w_eline.extend([float_(data[0])])
start_mask_e.extend([w_eline[nline]*(1+input_redshift)-4*sigma])
end_mask_e.extend([w_eline[nline]*(1+input_redshift)+4*sigma])
nmask_e=nmask_e+1
nline=nline+1
f.close()
#
# We read the input spectrum
#
n_unc=0
y_min=1e12
y_max=-1e12
f=open(unc_file,'r')
i_scale=0
FLUX=0
have_error=0
index_unc=[]
wave_unc=[]
flux_unc=[]
flux_unc_org=[]
flux_unc_input=[]
e_flux_unc=[]
color_unc=[]
masked=[]
masked2=[]
masked_Av=[]
flux_masked=[]
flux_masked2=[]
e_flux_unc_kin=[]
wave_scale=0
for line in f:
data=line.split(' ')
data=filter(None,data)
if data[0] != "#":
index_unc.extend([float_(data[0])])
wave_unc.extend([float_(data[1])])
flux_unc.extend([float_(data[2])])
flux_unc_org.extend([float_(data[2])])
flux_unc_input.extend([float_(data[2])])
if len(data) > 2:
# Variance Column!
e_flux_unc.extend([np.sqrt(abs(float_(data[3])))])
color_unc.extend([1])#$data[4];
have_error=1
else:
e_flux_unc.extend([np.sqrt(abs(float_(data[2])))/10])
color_unc.extend([1])
if np.isnan(flux_unc[n_unc]):
flux_unc[n_unc]=flux_unc[n_unc-1]
if flux_unc[n_unc] < y_min:
y_min=flux_unc[n_unc]
if flux_unc[n_unc] > y_max:
y_max=flux_unc[n_unc]
if n_unc > 0:
if wave_unc[n_unc-1] <= wave_scale and wave_unc[n_unc] > wave_scale:
i_scale=n_unc
masked.extend([1])
masked2.extend([1])
masked_Av.extend([1])
if flux_unc[n_unc] == 0:
masked[n_unc]=0
masked2[n_unc]=0
w_test=wave_unc[n_unc-1]
for j in range(0, nmask):
if w_test > start_mask[j] and w_test < end_mask[j]:
masked[n_unc]=0
masked2[n_unc]=0
masked_Av[n_unc]=0
if deft == 2:
if w_test < min_wave:
masked[n_unc]=0
masked_Av[n_unc]=0
if w_test > max_wave:
masked[n_unc]=0
masked_Av[n_unc]=0
if w_test < min_wave2:
masked2[n_unc]=0
if w_test > max_wave2:
masked2[n_unc]=0
for j in range(0, nmask_e):
if w_test > start_mask_e[j] and w_test < end_mask_e[j]:
masked2[n_unc]=0
masked_Av[n_unc]=0
flux_masked.extend([flux_unc[n_unc]*masked[n_unc]])
flux_masked2.extend([flux_unc[n_unc]*masked2[n_unc]])
if wave_unc[n_unc] > min_wave and wave_unc[n_unc] < max_wave:
FLUX=FLUX+flux_masked[n_unc]
e_flux_unc_kin.extend([e_flux_unc[n_unc]])
n_unc=n_unc+1
f.close()
sigma_e=np.median(e_flux_unc)
#print "SIGMA_E = "+str(sigma_e)
for i in range(0, n_unc):
if e_flux_unc[i] > 1.5*sigma_e:
e_flux_unc[i]=1.5*sigma_e
e_flux_unc_kin[i]=1.5*sigma_e
if deft == 2:
y_min=min
y_max=max
else:
min_wave=np.amin(wave_unc)
max_wave=np.amax(wave_unc)
if deft == 1:
y_min=min
y_max=max
median_flux=np.median(flux_masked)
dpix_unc=wave_unc[1]-wave_unc[0]
max=3*median_flux
pdl_output=np.zeros([6,n_unc])
#
# We create a kernel
#
med_flux=np.median(flux_unc)
chi_sq_min_now=1e12
min_chi_sq=chi_sq_min_now
print '-----The redshift', redshift
print '-----The sigma', sigma
print '-----The Av', Av.shape
print '-----The crval2', crval2
print '-----The cdelt2', cdelt2
print '-----The crpix2', crpix2
print '-----back_list2', back_list2
print '-----nf2', nf2
print '-----n_c2', n_c2
print '-----pdl_flux_c_ini2', pdl_flux_c_ini2.shape
#print '-----hdr2', hdr2
print '-----wave_unc', wave_unc
print '-----masked_Av', len(masked_Av)
print '-----e_flux_unc', e_flux_unc
print '-----flux_unc', flux_unc
print '-----n_mc', n_mc
print '-----chi_sq_min_now', chi_sq_min_now
print '-----min_chi_sq', min_chi_sq
ssp_dat, mis_cosas = ssp.fit_ssp_lin_no_zero(redshift,sigma,Av,crval2,cdelt2,crpix2,nf2,n_c2,pdl_flux_c_ini2,hdr2,wave_unc,masked_Av,e_flux_unc,flux_unc,n_mc,chi_sq_min_now,min_chi_sq,plot)
min_chi_sq=ssp_dat[0]
# print "CUT = "+str(med_flux)+" "+str(CUT_MEDIAN_FLUX)
# print str(redshift)+","+str(sigma)
#--------------------------------------------------------------------
print '\n----------------------------------------------------------------------------\n'
dzp = Dazer()
dz = ssp_fitter()
#Data folder location
data_folder = '/home/vital/workspace/Fit_3D/example_files/'
defaut_conf = 'auto_ssp_V500_several_Hb.config'
#Read parameters from command line
command_fit_dict = dz.load_command_params()
#Read parameters from config file
conf_file_address = command_fit_dict['config_file_address'] if 'config_file_address' in command_fit_dict != None else data_folder + defaut_conf
config_fit_dict = dz.load_config_params(conf_file_address)
#Update the fit configuration giving preference to the values from the command line
config_fit_dict.update(command_fit_dict)
#Import input data: spectrum, masks, emision line loc, stellar bases...
dz.load_input_data(config_fit_dict)
dz.fit_conf['zero_mask'] = np.array(mis_cosas[1])
obs_fit_spectrum = dz.fit_ssp()
dzp.FigConf()
#dzp.data_plot(dz.fit_conf['obs_wave'], dz.fit_conf['obs_flux'], label='obs_flux')
dzp.data_plot(dz.fit_conf['obs_wave'], dz.fit_conf['zero_mask'], label='my mask')
dzp.data_plot(mis_cosas[0], mis_cosas[1], label='Hector mask')
dzp.data_plot(mis_cosas[0], mis_cosas[2], label='Hector fit')
dzp.data_plot(mis_cosas[0], obs_fit_spectrum, label='my fit')
dzp.FigWording('Wave', 'Flux', 'Input spectra')
dzp.display_fig()
print '\n----------------------------------------------------------------------------\n'
#--------------------------------------------------------------------
print 'Aqui acabamos'
sys.exit(0)
if med_flux < ABS_MIN:
# WHAT TO DO???
# We print all!!!
sys.exit(0)
if med_flux > CUT_MEDIAN_FLUX:
if MIN_W == 0:
MIN_W = min_wave
if MAX_W == 0:
MAX_W=max_wave
################
# REDSHIFT DETERMINATION
my_plot=2
K=0
nr=0
chi_r=[]
red_r=[]
if d_redshift > 0:
min_chi_sq=1e30
RED=min_redshift
while RED < max_redshift:
ssp_dat1=ssp.fit_ssp_lin_no_zero_no_cont(RED,sigma,Av,crval2,cdelt2,crpix2,nf2,n_c2,pdl_flux_c_ini2,hdr2,wave_unc,masked2,e_flux_unc_kin,flux_masked2,n_mc,chi_sq_min_now,min_chi_sq,my_plot)
chi_now=ssp_dat1[0]
chi_r.extend([chi_now])
red_r.extend([RED])
# print RED,chi_now,d_redshift
if nr > 1 and chi_r[nr-1] < min_chi_sq and chi_r[nr-1] > 0:
redshift=red_r[nr-1]
min_chi_sq=chi_r[nr-1]
K=nr-1
nr=nr+1
RED=RED+d_redshift
#
# TWO
#
e_redshift=d_redshift
nr=0
chi_r=[]
red_r=[]
RED=redshift-1.5*d_redshift
max_redshift=redshift+1.5*d_redshift
d_redshift=0.1*d_redshift
while RED < max_redshift:
ssp_dat2=ssp.fit_ssp_lin_no_zero_no_cont(RED,sigma,Av,crval2,cdelt2,crpix2,nf2,n_c2,pdl_flux_c_ini2,hdr2,wave_unc,masked2,e_flux_unc,flux_masked2,n_mc,chi_sq_min_now,min_chi_sq,my_plot)
chi_now=ssp_dat2[0]
chi_r.extend([chi_now])
red_r.extend([RED])
if nr > 1 and chi_r[nr-1] < chi_r[nr-2] and chi_r[nr-1] < chi_r[nr] and chi_r[nr-1] <= min_chi_sq:
a=red_r[nr-2]
b=red_r[nr-1]
c=red_r[nr]
fa=chi_r[nr-2]
fb=chi_r[nr-1]
fc=chi_r[nr]
den=(fc-2*fb+fa)
redshift=c-(b-a)*((fc-fb)/den+0.5)
slope=abs(0.5*(fc-fb)/(c-b))+abs(0.5*(fa-fb)/(a-b))
if slope > 0:
e_redshift=0.01*redshift/slope
else:
e_redshift=0.01*redshift
min_chi_sq=chi_r[nr-1]
K=nr-1
nr=nr+1
a_rnd=ran.rand(2)
RED=RED+d_redshift*(a_rnd[0])
fit_redshift=0
d_redshift=0
else:
fit_redshift=0
e_redshift=0
print "REDSHIFT = "+str(redshift)+" +- "+str(e_redshift)
#sys.exit()
REDSHIFT=redshift
# sigma DETERMINATION
K=0
nr=0
chi_s=[]
sigma_s=[]
print "D_SIGMA = "+str(d_sigma)
if d_sigma > 0:
min_chi_sq = 1e30
SIGMA=min_sigma
while SIGMA < max_sigma:
ssp_dat3=ssp.fit_ssp_lin_no_zero_no_cont(redshift,SIGMA,Av,crval2,cdelt2,crpix2,nf2,n_c2,pdl_flux_c_ini2,hdr2,wave_unc,masked2,e_flux_unc,flux_masked2,n_mc,chi_sq_min_now,min_chi_sq,my_plot)
chi_now=ssp_dat3[0]
chi_s.extend([chi_now])
sigma_s.extend([SIGMA])
if chi_s[nr-1] < min_chi_sq:
sigma=sigma_s[nr-1]
min_chi_sq=chi_s[nr-1]
K=nr
nr=nr+1
SIGMA=SIGMA+d_sigma
SIGMA=sigma-1.5*d_sigma
max_sigma=sigma+1.5*d_sigma
d_sigma=0.33*d_sigma
#
#TWO
#
nr=0
chi_s=[]
sigma_s=[]
e_sigma=d_sigma
while SIGMA < max_sigma:
ssp_dat4=ssp.fit_ssp_lin_no_zero_no_cont(redshift,SIGMA,Av,crval2,cdelt2,crpix2,nf2,n_c2,pdl_flux_c_ini2,hdr2,wave_unc,masked2,e_flux_unc,flux_masked2,n_mc,chi_sq_min_now,min_chi_sq,my_plot)
chi_now=ssp_dat4[0]
chi_s.extend([chi_now])
sigma_s.extend([SIGMA])
if nr > 1 and chi_s[nr-1] < chi_s[nr-2] and chi_s[nr-1] < chi_s[nr] and chi_s[nr-1] <= min_chi_sq:
a=sigma_s[nr-2]
b=sigma_s[nr-1]
c=sigma_s[nr]
fa=chi_s[nr-2]
fb=chi_s[nr-1]
fc=chi_s[nr]
den=(fc-2*fb+fa)
sigma=c-(b-a)*((fc-fb)/den+0.5)
min_chi_sq=chi_s[nr-1]
K=nr
SIGMA=max_sigma
nr=nr+1
a_rnd=ran.rand(2)
SIGMA=SIGMA+d_sigma*(a_rnd[0])
slope=(chi_s[nr-1]-min_chi_sq)/(sigma_s[nr-1]-sigma)
if slope > 0:
e_sigma=sigma/slope/10.
else:
e_sigma=sigma/10.
fit_sigma=0
d_sigma=0
else:
fit_sigma=0
e_sigma=0
sigma=abs(sigma)
e_sigma=abs(e_sigma)
print "SIGMA = "+str(sigma)+"+-"+str(e_sigma)
else:
#
# Below the cut!
#
for i in range(0, nf):
Av[i]=0
d_Av[i]=0
# Av DETERMINATION
K=0
nr=0
chi_Av=[]
Av_s=[]
Av_p_chi=[]
print "D_Av = "+str(d_Av_IN)
nr_min=0
if d_Av_IN > 0:
min_chi_sq = 1e30
Av_NOW=min_Av
while Av_NOW < max_Av:
for i in range(0, nf):
Av[i]=Av_NOW
#
# Not allow negative coeffs!!!!
#
ssp_dat5=ssp.fit_ssp_lin_no_zero(redshift,sigma,Av,crval2,cdelt2,crpix2,nf2,n_c2,pdl_flux_c_ini2,hdr2,wave_unc,masked_Av,e_flux_unc,flux_masked,n_mc,chi_sq_min_now,min_chi_sq,my_plot)
chi_now=ssp_dat5[0]
chi_Av.extend([chi_now])
Av_s.extend([Av_NOW])
if chi_now > 0:
Av_p_chi.extend([Av_NOW/(chi_now)])
if K == 0 and chi_Av[nr] < min_chi_sq:
Av_F=Av_s[nr]
nr_min=nr
min_chi_sq=chi_now
if nr > 1 and chi_Av[nr-1] < chi_Av[nr-2] and chi_Av[nr-1] < chi_Av[nr] and chi_Av[nr-1] <= min_chi_sq:
a=Av_s[nr-2]
b=Av_s[nr-1]
c=Av_s[nr]
fa=chi_Av[nr-2]
fb=chi_Av[nr-1]
fc=chi_Av[nr]
den=(fc-2*fb+fa)
Av_F=c-(b-a)*((fc-fb)/den+0.5)
min_chi_sq=chi_Av[nr-1]
K=nr
nr=nr+1
a_rnd=ran.rand(2)
Av_NOW=Av_NOW+d_Av_IN*(a_rnd[0])
if Av_s[nr-1] != Av_F:
slope=(chi_Av[nr-1]-min_chi_sq)/(Av_s[nr-1]-Av_F)
if slope > 0 :
e_Av=abs(Av_F/slope/3.)
else:
e_Av=d_Av_IN
else:
e_Av=d_Av_IN
fit_Av=0
d_Av_NOW=0
else:
fit_Av=0
if d_Av_IN == 0:
Av_F=Av_IN
if e_Av == 0:
e_Av=d_Av_IN
print "AV = "+str(Av_F)+" +- "+str(e_Av)
for i in range(0, nf):
Av[i]=Av_F
fit_redshift=0
redshift_abs=redshift
delta_chi=10
NITER=0
niter_tmp_max=10
chi_sq_min_now=1e12
min_chi_sq_limit=min_chi_sq
n_mc=10
pdl_rat_master=np.ones(n_unc+1)
[min_chi_sq,pdl_age_mod,pdl_met_mod,pdl_ml,pdl_Av,coeffs,coeffs_N,coeffs_NM,pdl_model_spec_min,pdl_res]=ssp.fit_ssp_lin_no_zero(redshift,sigma,Av,crval2,cdelt2,crpix2,nf2,n_c2,pdl_flux_c_ini2,hdr2,wave_unc,masked_Av,e_flux_unc,flux_unc,n_mc,chi_sq_min_now,min_chi_sq,plot)
#
# We substract the continuum!
#
pdl_mod_SSP=pdl_model_spec_min
pdl_res_SSP=pdl_res
nx=n_unc
i0_now=int(0.4*nx)
i1_now=int(0.6*nx)
stats_res=np.std(pdl_res[i0_now:i1_now])+np.mean(pdl_res[i0_now:i1_now])
stats_mod=np.mean(pdl_model_spec_min[i0_now:i1_now])
SN=0
if stats_res > 0:
SN=stats_mod/stats_res
print "Signal-to-Noise = "+str(SN)
old=1
if old == 1 and SN > 10:
pdl_model_spec_min[np.where(pdl_model_spec_min == 0)[0]]=1.
pdl_rat=pdl_res/pdl_model_spec_min+1
rat=pdl_rat
med_rat=my.median_filter(int(5*2.354*sigma),rat)
pdl_med_rat=np.array(med_rat)
n_unc_1=n_unc-1
pdl_wave_unc=wave_unc[0]+(wave_unc[1]-wave_unc[0])*np.arange(0,n_unc_1)
med_rat=my.median_filter(int(7*2.354*sigma),rat)
med_sigma=int(1.5*sigma)
if med_sigma < 3:
med_sigma=3
med_rat_box=my.median_box(med_sigma,med_rat)
med_wave_box=my.median_box(med_sigma,wave_unc)
y_rat = interp1d(med_wave_box, med_rat_box,bounds_error=False,fill_value=0.)(wave_unc)
if plot > 0:
out_ps_now="junk2"
title="ratio"
ssp.plot_results_min_max(2,wave_unc,[flux_unc,pdl_model_spec_min,pdl_res,pdl_rat,y_rat],out_ps_now,title,-0.2,1.5)
i0_now=int(0.4*n_unc)
i1_now=int(0.6*n_unc)
stats_rat0=np.mean(y_rat[i0_now:i1_now])
stats_rat1=np.std(y_rat[i0_now:i1_now])+stats_rat0
if stats_rat0 > 0 and stats_rat1 > 0.02:
for i in range(0, n_unc):
val=y_rat[i]
if val > 0:
flux_unc[i]=flux_unc[i]/val
print "Deriving SFH...."
#my pdl_mod_JOINT;
#my pdl_res_JOINT;#=$pdl_res_SSP; #Modificacion 12 de Marzo de 2015 (en caso de no entrar en el ciclo while)
#my pdl_no_gas;
#my age_min;
#my met_min;
#my Av_min;
#my age_min_mass;
#my met_min_mass;
#my Av_min_mass;
coeffs_cat=np.zeros([nf+1,n_mc])
while MIN_CHISQ > MIN_DELTA_CHISQ and NITER < MAX_NITER:
if NITER == 1:
MIN_CHISQ=1e12
######################################################################
# Fitting the emission lines
######################################################################
a_fixed=np.zeros([1,9])
a_type_fixed=[]
n_mod_fixed=0
if ns > 0:
ks=0
SYS_VEL=vel_light*REDSHIFT
REN=[]
e_REN=[]
sycall(call)
for ist in range(0,ns):
if red_elines > 0:
SYS_VEL=vel_light*red_elines
if ist == 0:
SYS_VEL_MAX=vel_light*red_elines+D_SYS_VEL
SYS_VEL_MIN=vel_light*red_elines-D_SYS_VEL
else:
SYS_VEL_MAX=vel_light*red_elines+D_SYS_VEL
SYS_VEL_MIN=vel_light*red_elines-D_SYS_VEL
else:
SYS_VEL=vel_light*REDSHIFT
if ist == 0:
SYS_VEL_MAX=vel_light*REDSHIFT+D_SYS_VEL
SYS_VEL_MIN=vel_light*REDSHIFT-D_SYS_VEL
else:
SYS_VEL_MAX=vel_light*REDSHIFT+D_SYS_VEL
SYS_VEL_MIN=vel_light*REDSHIFT-D_SYS_VEL
start_w_e=start_w_E[ist]
end_w_e=end_w_E[ist]
mask_e=mask_E[ist]
config_e=config_E[ist]
npoly_e=npoly_E[ist]
mask_poly_e=mask_poly_E[ist]
nmin_e=nmin_E[ist]
nmax_e=nmax_E[ist]
print "CONF="+config_e
wave_elines=[]
flux_elines=[]
flux_e_elines=[]
masked_elines=[]
n_e=0
for i in range(0, n_unc):
if wave_unc[i] > start_w_e and wave_unc[i] < end_w_e:
wave_elines.extend([wave_unc[i]])
flux_elines.extend([flux_unc_org[i]-pdl_mod_SSP[i]])
flux_e_elines.extend([abs(e_flux_unc[i])])
masked_elines.extend([1])
n_e=n_e+1
pdl_wave_elines=np.array(wave_elines)
pdl_flux_elines=np.array(flux_elines)
pdl_flux_e_elines=np.array(flux_e_elines)
pdl_masked_elines=np.array(masked_elines)
stats0=np.mean(pdl_flux_elines)
stats4=np.amax(pdl_flux_elines)
y_max=stats4-stats0
deft=1
data=filter(None, config_line_E[ist].split(';')[0].split(" "))
#print float_(filter(None, config_line_E[0].split(';')[4].split(" ")))[2]
junk=data[0]
n_mod=int(data[1])
chi_goal=float_(data[2])
d_chi_goal=float_(data[3])
n_line=n_line_E[ist]
i_mod=1
typef=[]
a=np.zeros([n_mod,9])
ia=np.zeros([n_mod,9])
ea=np.zeros([n_mod,9])
a0=np.zeros([n_mod,9])
a1=np.zeros([n_mod,9])
link=np.zeros([n_mod,9])
for ii in range(0, n_mod):
cnf=filter(None, config_line_E[ist].split(';')[i_mod].split(" "))
i_mod=i_mod+1
typef.extend(cnf)
for j in range(0, 9):
data=config_line_E[ist].split(';')[i_mod].replace('\t',' ')
data=filter(None, data.split(' '))
i_mod=i_mod+1
a[ii][j]=float_(data[0])
ia[ii][j]=float_(data[1])
ea[ii][j]=0
a0[ii][j]=float_(data[2])
a1[ii][j]=float_(data[3])
link[ii][j]=float_(data[4])
if deft == 1:
a1_max=2*y_max*(a[ii][2]*((2*3.1416)**0.5))
a0_min=0.01*1.2*y_max*(a[ii][2]*((2*3.1416)**0.5))
if a1[ii][1] > a1_max:
a1[ii][1]=a1_max
a0[ii][1]=a0_min
#
# Force vicitiny in the velocity
#
a[0][3]=SYS_VEL
ia[0][3]=1
a0[0][3]=SYS_VEL_MIN
a1[0][3]=SYS_VEL_MAX
i_ter=0
n_loops=5
n_mc_e=30
breakt=0
scale_ini=0.15;
deft=0;
pdl_model=np.zeros(n_e)
pdl_model_cont=np.zeros(n_e)
pdl_model_tmp=np.zeros(n_e)
pdl_model_cont_tmp=np.zeros(n_e)
a_out_now=ssp.copy_a(n_mod,a)
a_out_tmp=ssp.copy_a(n_mod,a)
chi_sq_now=1e12
a_results=np.zeros([1, n_mod, 9])
nnk=0
max_time=5
time=ssp.get_seconds()
d_time=ssp.get_seconds()-time
i_loops=0
ran.seed(None)
while i_ter < n_loops and breakt == 0:
chi_iter=chi_sq_now
chi_single=0
[chi_sq_now,pdl_a,pdl_model_tmp,pdl_model_cont_tmp]=ssp.fit_elines_grad_rnd_new(pdl_wave_elines,pdl_flux_elines,pdl_flux_e_elines,n_mod,chi_goal,d_chi_goal,typef,a_out_tmp,ia,a0,a1,link,n_mc_e,pdl_masked_elines,deft,scale_ini)#,max_time)
a_out_now=ssp.copy_a_pdl(n_mod,pdl_a)
#print chi_sq_now, pdl_a[:,1],a_out_tmp[:,1]
if chi_sq_now < chi_iter:
#####################################################
# Close to a result, narrow the range
for i in range(0, n_mod):
for j in range(0, 9):
if typef[i] == "eline\n":
if ia[i][j] == 1:
if link[i][j] == -1:
delta_now=abs(a1[i][j]-a0[i][j])/(2.)
a0_tmp=a0[i][j]
a1_tmp=a1[i][j]
if j != 3:
a0_tmp=a_out_now[i][j]-delta_now
a1_tmp=a_out_now[i][j]+delta_now
else:
a0_tmp=a_out_now[i][j]-0.5*delta_now
a1_tmp=a_out_now[i][j]+0.5*delta_now
if a0_tmp < a0[i][j]:
a0_tmp=a0[i][j]
if a1_tmp > a1[i][j]:
a1_tmp=a1[i][j]
a0[i][j]=a0_tmp
a1[i][j]=a1_tmp
####################################################
a_out_tmp=ssp.copy_a(n_mod,a_out_now)
a_results=ssp.copy_a_results(n_mod,nnk,a_out_now,a_results)
pdl_model=pdl_model_tmp
pdl_model_cont=pdl_model_cont_tmp
nnk=nnk+1
i_ter=i_ter+1
else:
rnd_a=ran.rand(10);
a_out_now=ssp.copy_a(n_mod,a_out_now)
i_loops=i_loops+1
if i_loops > 5*n_loops:
breakt=1
out_ps_now="fit_"+outfile+"."+str(start_w_e)+"_"+str(end_w_e)
title="["+str(start_w_e)+","+str(end_w_e)+"]"
if pdl_model.shape[0] == len(pdl_wave_elines):
pdl_model=np.transpose(pdl_model)
ssp.plot_results(plot,pdl_wave_elines,[pdl_flux_elines,pdl_model[0,:],(pdl_flux_elines-pdl_model[0,:])],out_ps_now,title)
print "----------------------------------------";
a_final=ssp.mean_a_results_last(n_mod,nnk,a_results,ia)
#
# Background noise
#
pdl_res_now=pdl_flux_elines-pdl_model
stats_back1=np.mean(pdl_res_now)+np.std(pdl_res_now)
a_final=ssp.add_back_noise(n_mod,a_final,typef,chi_sq_now,stats_back1)
ssp.print_a_final(n_mod,a_final,typef,chi_sq_now)
out_fit_spectra=out_elines
ssp.print_a_final_file_add(n_mod,a_final,typef,chi_sq_now,out_fit_spectra)
[n_mod_fixed,junk_a_fixed,junk_a_type_fixed]=ssp.add_a_results_elines(n_mod,a_final,typef,n_mod_fixed,a_fixed,a_type_fixed)
a_fixed=junk_a_fixed
a_type_fixed=junk_a_type_fixed
nmin_e=int(0.1*n_unc)
nmax_e=int(0.9*n_unc)
###############################
# Low order polynomical!
out_fit_now=out_fit+"."+str(start_w_e)+"_"+str(end_w_e)+".pdf"
box=int(sigma*6)
print "DONE FIT ELINES CONFIG "+str(ist)
#
# We create a FIXED model of the emission lines
#
pdl_model_elines=np.zeros(n_unc)
pdl_model_cont=np.zeros(n_unc)
pdl_wave_elines=np.array(wave_unc)
NN=len(pdl_wave_elines)
NN1=len(pdl_model_elines)
for i in range(0, n_mod_fixed):
pdl_tmp=ssp.create_single_model(pdl_wave_elines,i,a_type_fixed,a_fixed)
NN2=len(pdl_tmp[0,:])
pdl_model_elines=pdl_model_elines+pdl_tmp[0,:]
#
# We remove the gas before a new iteration
#
for i in range(0, n_unc):
flux_unc[i]=flux_unc_org[i]-pdl_model_elines[i]
pdl_mod_JOINT=pdl_mod_SSP+pdl_model_elines
pdl_res_JOINT=pdl_res_SSP-pdl_model_elines
pdl_no_gas=np.array(flux_unc)
#############################################################
# We rescale!
##############################################################
y_rat=np.ones(nx+1)
jy=0
if SN > 10:
pdl_mod_JOINT[np.where(pdl_mod_JOINT == 0)[0]]=1.
pdl_rat=pdl_res_JOINT/pdl_mod_JOINT+1
rat=pdl_rat
n_unc_1=n_unc-1
pdl_wave_unc=wave_unc[0]+(wave_unc[1]-wave_unc[0])*np.arange(0, n_unc_1)
med_rat=my.median_filter(int(5*2.354*sigma),rat);
med_sigma=int(1.5*sigma)
if med_sigma < 3:
med_sigma=3
med_rat_box=my.median_box(med_sigma,med_rat)
med_wave_box=my.median_box(med_sigma,wave_unc)
y_rat = interp1d(med_wave_box, med_rat_box,bounds_error=False,fill_value=0.)(wave_unc)
i0_now=int(0.4*nx)
i1_now=int(0.6*nx)
stats_rat0=np.mean(y_rat[i0_now:i1_now])
stats_rat1=np.std(y_rat[i0_now:i1_now])+stats_rat0
if plot > 1:
out_ps_now="junk3"
title="ratio = "+str(stats_rat0)+", rms="+str(stats_rat1)
print title
ssp.plot_results_min_max(2,wave_unc,[flux_unc,pdl_model_spec_min,pdl_res,pdl_rat,y_rat],out_ps_now,title,-0.2,1.5)
if stats_rat0 > 0 and stats_rat1 > 0.02:
if jy == 0:
# Continuum shape correction on/off
pdl_rat_master=y_rat
pdl_rat_master[np.where(pdl_rat_master == 0)[0]]=1.
y_rat=pdl_rat_master
else:
y_rat=pdl_rat_master
for i in range(0, n_unc):
val=y_rat[i]
if val > 0:
flux_unc[i]=flux_unc[i]/val
flux_unc_org[i]=flux_unc_org[i]/val
##############################################################
# End re-scale
##############################################################
ML=0
if med_flux > CUT_MEDIAN_FLUX:
n_mc=20
[min_chi_sq,pdl_age_mod,pdl_met_mod,pdl_ml,pdl_Av,coeffs,coeffs_N,coeffs_NM,pdl_mod_SSP,pdl_res_SSP,coeffs_N_input,e_coeffs_N_input]=ssp.fit_ssp_lin_MC(redshift,sigma,Av,crval,cdelt,crpix,nf,n_c,pdl_flux_c_ini,hdr,wave_unc,masked,e_flux_unc,flux_unc,n_mc,chi_sq_min_now,MIN_CHISQ,plot)
smooth_ratiot=ssp.smooth_ratio(flux_unc,pdl_mod_SSP,int(sigma))
pdl_mod_SSP_no_cor=np.copy(pdl_mod_SSP)
pdl_mod_SSP=pdl_mod_SSP*smooth_ratiot
f1=open(out_coeffs_file, "w")
f1.write("# ID AGE MET COEFF Min.Coeff log(M/L) AV N.Coeff Err.Coeff\n")
print "------------------------------------------------------------------------------"
print "ID AGE MET COEFF Min.Coeff log(M/L) AV N.Coeff Err.Coeff"
print "------------------------------------------------------------------------------"
age_mod=pdl_age_mod
met_mod=pdl_met_mod
Av_mod=pdl_Av
ml=pdl_ml
a_coeffs=coeffs[:,0]
a_e_coeffs=coeffs[:,1]
a_min_coeffs=coeffs[:,2]
a_coeffs_N=coeffs_N
a_e_coeffs_N=a_e_coeffs
l_age_min=0
l_met_min=0
l_Av_min=0
l_age_min_mass=0
l_met_min_mass=0
l_Av_min_mass=0
e_l_age_min=0
e_l_met_min=0
e_l_Av_min=0
e_l_age_min_mass=0
e_l_met_min_mass=0
e_l_Av_min_mass=0
for k in range(0, nf):
if a_coeffs[k] > 0:
a_e_coeffs_N[k]=a_e_coeffs[k]*(a_coeffs_N[k]/a_coeffs[k])
else:
a_e_coeffs_N[k]=0
l_age_min=l_age_min+a_coeffs[k]*np.log10(age_mod[k])
l_met_min=l_met_min+a_coeffs[k]*np.log10(met_mod[k])
l_Av_min=l_Av_min+a_coeffs[k]*np.log10(Av_mod[k])
l_age_min_mass=l_age_min_mass+ml[k]*a_coeffs_N[k]*np.log10(age_mod[k])
l_met_min_mass=l_met_min_mass+ml[k]*a_coeffs_N[k]*np.log10(met_mod[k])
l_Av_min_mass=l_Av_min_mass+ml[k]*a_coeffs_N[k]*np.log10(Av_mod[k])
e_l_age_min=e_l_age_min+a_e_coeffs[k]*np.log10(age_mod[k])
e_l_met_min=e_l_met_min+a_e_coeffs[k]*np.log10(met_mod[k])
e_l_Av_min=e_l_Av_min+a_e_coeffs[k]*np.log10(Av_mod[k])
e_l_age_min_mass=e_l_age_min_mass+ml[k]*a_e_coeffs_N[k]*np.log10(age_mod[k])
e_l_met_min_mass=e_l_met_min_mass+ml[k]*a_e_coeffs_N[k]*np.log10(met_mod[k])
e_l_Av_min_mass=e_l_Av_min_mass+ml[k]*a_e_coeffs_N[k]*np.log10(Av_mod[k])
ML=ML+ml[k]*a_coeffs_N[k]
C_ini=coeffs_N_input[k]
e_C_ini=e_coeffs_N_input[k]
f1.write(("%2d" % k)+" "+("%7.4f" % age_mod[k])+" "+("%7.4f" % met_mod[k])+" "+("%7.4f" % a_coeffs_N[k])+" "+("%7.4f" % a_min_coeffs[k])+" "+("%4.4f" % np.log10(ml[k]))+" "+("%4.2f" % Av_mod[k])+" "+("%7.4f" % a_coeffs[k])+" "+("%7.4f" % a_e_coeffs[k])+"\n")
if a_coeffs[k] > 1e-5:
print ("%2d" % k)+" "+("%7.4f" % age_mod[k])+" "+("%7.4f" % met_mod[k])+" "+("%7.4f" % a_coeffs_N[k])+" "+("%7.4f" % a_min_coeffs[k])+" "+("%4.4f" % np.log10(ml[k]))+" "+("%4.2f" % Av_mod[k])+" "+("%7.4f" % a_coeffs[k])+" "+("%7.4f" % a_e_coeffs[k])+" "+("%7.4f" % C_ini)+" "+("%7.4f" % e_C_ini)
print "------------------------------------------------------------------------------"
f1.close
age_min=10**(l_age_min)
met_min=10**(l_met_min)
Av_min=10**(l_Av_min)
age_min_mass=10**(l_age_min_mass/ML)
met_min_mass=10**(l_met_min_mass/ML)
Av_min_mass=10**(l_Av_min_mass/ML)
e_age_min=abs(0.43*e_l_age_min*age_min)
e_met_min=abs(0.43*e_l_met_min*met_min)
e_Av_min=abs(0.43*e_l_Av_min*Av_min)
e_age_min_mass=abs(0.43*e_l_age_min*age_min_mass)
e_met_min_mass=abs(0.43*e_l_met_min*met_min_mass)
e_Av_min_mass=abs(0.43*e_l_Av_min*Av_min_mass)
if min_chi_sq > 0:
delta_chi=abs((chi_sq_min_now-min_chi_sq)/min_chi_sq)
wpeak=6562
Fpeak=-1e12
pdl_mod_JOINT=pdl_mod_SSP+pdl_model_elines
pdl_res_JOINT=pdl_res_SSP-pdl_model_elines
pdl_no_gas=np.array(flux_unc)
# Copy output!
pdl_output[0,:]=np.array(flux_unc_org)
pdl_output[1,:]=pdl_mod_SSP
pdl_output[2,:]=pdl_mod_JOINT
pdl_res_SSP=np.array(flux_unc_org)-pdl_mod_SSP
pdl_res_SSP_no_cor=np.array(flux_unc_input)-pdl_mod_SSP_no_cor
pdl_output[3,:]=pdl_res_SSP_no_cor
pdl_tmp=np.array(flux_unc_org)
nx_1=n_unc#-1
if len(pdl_rat_master)-len(pdl_mod_JOINT)==1:
pdl_res_JOINT=pdl_tmp/(pdl_rat_master[0:nx_1])-pdl_mod_JOINT
else:
pdl_res_JOINT=pdl_tmp/(pdl_rat_master)-pdl_mod_JOINT
pdl_output[4,:]=pdl_res_JOINT
pdl_output[5,:]=np.array(flux_unc_org)-(pdl_res_SSP-pdl_res_JOINT)
title="X="+str(chi_sq_now)+" T="+str(age_min)+" ("+str(age_min_mass)+") Z="+str(met_min)+" ("+str(met_min_mass)+") Av="+str(Av_min)+" z="+str(redshift)+" sigma="+str(sigma)
ssp.plot_results(plot,pdl_wave_elines,pdl_output,out_ps,title)
print "I.Iter = "+str(NITER)+" DONE"
NITER=NITER+1
# Write output file
h=pyf.PrimaryHDU().header
h["NAXIS"]=2
h["NAXIS1"]=n_unc
h["NAXIS2"]=6
h["COMMENT"]="OUTPUT auto_ssp_elines_rnd.pl FITs"
h["CRVAL1"]=wave_unc[0]
h["CDELT1"]=wave_unc[1]-wave_unc[0];
h["CRPIX1"]=1
if pt.exists(out_fit) == False:
wfits(out_fit,pdl_output,h)
else:
sycall("rm "+out_fit)
wfits(out_fit,pdl_output,h)
################################
print "--------------------------------------------------------------"
pdl_masked=np.array(masked)
pdl_chi_now=((pdl_masked*pdl_res_JOINT)**2)/((np.array(e_flux_unc))**2)
pdl_chi_now[np.isnan(pdl_chi_now)]=0
chi_joint=np.sum(pdl_chi_now)
chi_joint=(chi_joint/(n_unc-n_mod_fixed-nf-1))**0.5
rms=np.std(pdl_masked*pdl_res_JOINT)
j1=int(0.4*n_unc)
j2=int(0.6*n_unc)
rms=np.std(pdl_res_JOINT[j1:j2])
pdl_flux_unc_now=np.array(flux_unc)
med_flux=np.median(pdl_flux_unc_now[j1:j2])
title="X="+str(chi_joint)+" T="+str(age_min)+" ("+str(age_min_mass)+") Z="+str(met_min)+" ("+str(met_min_mass)+") Av="+str(Av_min)+" z="+str(redshift)+" sigma="+str(sigma)
ssp.plot_results(plot,wave_unc,pdl_output,out_ps,title)
MASS=ML*med_flux
lML=np.log10(ML)
print "MSP CHISQ="+str(chi_joint)+" AGE="+str(age_min)+"+-"+str(e_age_min)+" MET="+str(met_min)+"+-"+str(e_met_min)+" AV="+str(Av_min)+"+-"+str(e_Av_min)+" REDSHIFT="+str(redshift)+"+-"+str(e_redshift)+" SIGMA_DISP="+str(sigma)+"+-"+str(e_sigma)+" RMS="+str(rms)+" MED_FLUX="+str(med_flux)+" AGE_mass="+str(age_min_mass)+"+-"+str(e_age_min_mass)+" MET_mass="+str(met_min_mass)+"+-"+str(e_met_min_mass)+" MASS="+str(MASS)+" log_M/L="+str(lML)
j1=int(0.4*n_unc)
j2=int(0.6*n_unc)
wave_norm=(wave_unc[j1]+wave_unc[j2])/2.
f=open(outfile, "w")
f.write("# (1) MIN_CHISQ\n")
f.write("# (2) LW Age (Gyr)\n")
f.write("# (3) LW Age error\n")
f.write("# (4) LW metallicity\n")
f.write("# (5) LW metallicity error\n")
f.write("# (6) Av\n")
f.write("# (7) AV error\n")
f.write("# (8) redshift \n")
f.write("# (9) redshift error\n")
f.write("# (10) velocity dispersion sigma, in AA\n")
f.write("# (11) velocity dispersion error\n")
f.write("# (12) median_FLUX\n")
f.write("# (13) redshift_ssp\n")
f.write("# (14) med_flux \n")
f.write("# (15) StdDev_residual \n")
f.write("# (16) MW Age (Gyr)\n")
f.write("# (17) MW Age error\n")
f.write("# (18) MW metallicity\n")
f.write("# (19) MW metallicity error\n")
f.write("# (20) Systemic Velocity km/s \n")
f.write("# (21) Log10 Average Mass-to-Light Ratio \n")
f.write("# SSP_SFH $back_list \n")
f.write("# SSP_KIN $back_list2 \n")
f.write("# WAVE_NORM $wave_norm AA\n")
if chi_joint == 0:
chi_joint=1
f.write(str(chi_joint)+","+str(age_min)+","+str(e_age_min)+","+str(met_min)+","+str(e_met_min)+","+str(Av_min)+","+str(e_Av_min)+","+str(redshift)+","+str(e_redshift)+","+str(sigma)+","+str(e_sigma)+","+str(FLUX)+","+str(redshift_abs)+","+str(med_flux)+","+str(rms)+","+str(age_min_mass)+","+str(e_age_min_mass)+","+str(met_min_mass)+","+str(e_met_min_mass)+","+str(SYS_VEL)+","+str(lML)+"\n")
f.close
sec_end=ssp.print_time()
sec_total=sec_end-sec_ini
print "# SECONDS = "+str(sec_total)
#
# Write the output!
#
#
| Delosari/dazer | bin/lib/ssp_functions/ssp_Hector_Fit3D_mix.py | Python | mit | 45,180 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, getopt
from datetime import datetime
import math
from gann import *
def print_usage():
print """
classic Gann square: gann.py -o <output file name> -s <square size>
Gann square based on date: gann.py -o <output file name> -a <base date> -b <final date> -m <path to list of dates to mark>
Gann sub square based on date: gann.py -o <output file name> -a <base date> -b <final date> -m <path to list of dates to mark> -r "<left>;<bottom>;<right>;<up>"
input date format: "dd/MM/yyyy"
"""
def main(argv):
cell_size = 30
date_format = "%d/%m/%Y"
# --------------------------------------
output_file_name = ''
marks_file_name = ''
square_size = -1
date_a = None
date_b = None
left, bot, right, up = 0, 0, 0, 0
# --------------------------------------
try:
opts, args = getopt.getopt(argv, "ho:s:a:b:m:r:", ["ofile=", "size=", "a_date=", "b_date=", "mfile=", "rect="])
except getopt.GetoptError:
print_usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print_usage()
sys.exit()
elif opt in ("-o", "--ofile"):
output_file_name = arg
elif opt in ("-s", "--size"):
square_size = int(arg)
elif opt in ("-a", "--a_date"):
date_a = datetime.strptime(arg, date_format)
elif opt in ("-b", "--b_date"):
date_b = datetime.strptime(arg, date_format)
elif opt in ("-m", "--mfile"):
marks_file_name = arg
elif opt in ("-r", "--rect"):
rect = arg.split(';')
try:
left, bot, right, up = int(rect[0]), int(rect[1]), int(rect[2]), int(rect[3])
except ValueError as e:
print 'Failed to parse range!'
if output_file_name == '':
print_usage()
sys.exit(2)
if square_size != -1:
# classic Gann square
# Info
print "Cells: %i" % (square_size * square_size)
print "Square size: %i" % square_size
print "Cell size: %i" % cell_size
print "Building..."
stream = open(output_file_name, 'w')
create_gann_square_classic(square_size, cell_size, stream)
stream.close()
elif date_a and date_b:
# date based Gann square
delta = date_b - date_a
square_size = int(math.ceil(math.sqrt(delta.days)))
if square_size % 2 == 0:
square_size += 1
# Info
print "Cells: %i" % (square_size * square_size)
print "Square size: %i" % square_size
print "Cell size: %i" % cell_size
# Process
print "Loading data..."
marks = load_marks(marks_file_name)
print "Building..."
stream = open(output_file_name, 'w')
if (left != 0 or bot != 0 or right != 0 or up != 0) and left < right and bot < up:
create_gann_sub_square_dates((left, bot, right+1, up+1), cell_size, date_a, marks, stream)
else:
create_gann_square_dates(square_size, cell_size, date_a, marks, stream)
stream.close()
else:
print_usage()
sys.exit(2)
print "Done. See {0}".format(output_file_name)
if __name__ == "__main__":
main(sys.argv[1:]) | Galarius/gann-square | gann.py | Python | mit | 3,346 |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# JewelCraft jewelry design toolkit for Blender.
# Copyright (C) 2015-2019 Mikhail Rachinskiy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# ##### END GPL LICENSE BLOCK #####
from bpy.types import Operator
class SCENE_OT_jewelcraft_scene_units_set(Operator):
bl_label = "Set Units"
bl_description = "Set optimal unit settings for jewelry modelling"
bl_idname = "scene.jewelcraft_scene_units_set"
bl_options = {"REGISTER", "UNDO", "INTERNAL"}
def execute(self, context):
unit_settings = context.scene.unit_settings
unit_settings.system = "METRIC"
unit_settings.length_unit = "MILLIMETERS"
unit_settings.scale_length = 0.001
context.space_data.overlay.grid_scale = 0.001
self.report({"INFO"}, "Optimal unit settings are in use")
return {"FINISHED"}
| mrachinskiy/blender-addon-jewelcraft | ops_utils/scene_ops.py | Python | mit | 1,477 |
from datetime import datetime, timedelta
from django.db import models
from django.db.models import Max, Min
from tinymce.models import HTMLField
class Company(models.Model):
name = models.CharField(max_length=75, blank=True, null=True)
symbol = models.CharField(max_length=10, blank=True, null=True)
description = HTMLField(blank=True, null=True, default='')
listing_date = models.DateField(blank=True, null=True)
renamed_to = models.ForeignKey('self', blank=True, null=True, default=None, related_name='renamed_from')
order = models.IntegerField(blank=True, default=0)
is_index = models.BooleanField(blank=True, default=False)
is_currently_listed = models.BooleanField(blank=True, default=True)
is_suspended = models.BooleanField(blank=True, default=False)
created_datetime = models.DateTimeField(auto_now_add=True)
updated_datetime = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('symbol',)
verbose_name = 'Company'
verbose_name_plural = 'Companies'
def __unicode__(self):
return self.symbol if self.symbol is not None else self.name
def __str__(self):
return self.symbol if self.symbol is not None else self.name
@property
def readable_name(self):
if self.is_index:
return self.name[1:]
else:
return self.name
@property
def year_high(self):
today = datetime.now()
one_year = timedelta(days=52*7)
if today.isoweekday() == 6:
today = today - timedelta(days=1)
elif today.isoweekday() == 7:
today = today - timedelta(days=2)
last_year = today - one_year
quotes = self.quote_set.filter(quote_date__gt=last_year)
if quotes.count() == 0:
return 0.0
year_high = quotes.aggregate(Max('price_high'))
return ('%f' % year_high['price_high__max']).rstrip('0').rstrip('.')
@property
def year_low(self):
today = datetime.now()
one_year = timedelta(days=52*7)
if today.isoweekday() == 6:
today = today - timedelta(days=1)
elif today.isoweekday() == 7:
today = today - timedelta(days=2)
last_year = today - one_year
quotes = self.quote_set.filter(quote_date__gt=last_year)
if quotes.count() == 0:
return 0.0
year_low = quotes.aggregate(Min('price_low'))
return ('%f' % year_low['price_low__min']).rstrip('0').rstrip('.')
@property
def last_thirty_quotes(self):
quotes = self.quote_set.order_by('-quote_date')[:30]
return quotes
| rodxavier/open-pse-initiative | django_project/companies/models.py | Python | mit | 2,705 |
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.template.defaultfilters import slugify
from mptt.models import MPTTModel, TreeForeignKey
class ForumCategory(MPTTModel):
parent = TreeForeignKey(
'self', blank=True, null=True, related_name='children'
)
name = models.CharField(max_length=255)
slug = models.SlugField(max_length=255)
description = models.CharField(max_length=255, blank=True)
order = models.PositiveIntegerField(blank=True, null=True)
def __unicode__(self):
return self.name
@property
def last_post(self):
if self.parent is None:
return None
response = None
for thread in self.forumthread_set.all():
if response is None:
response = thread.last_post
else:
if thread.last_post.created > response.created:
response = thread.last_post
return response
@property
def post_count(self):
count = 0
for thread in self.forumthread_set.all():
count += thread.forumpost_set.count()
return count
class Meta:
verbose_name_plural = 'Forum categories'
class ForumThread(models.Model):
category = models.ForeignKey(ForumCategory)
title = models.CharField(max_length=255)
slug = models.SlugField(max_length=255)
author = models.ForeignKey(settings.AUTH_USER_MODEL)
created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse('thread_home', kwargs={'slug': self.slug})
@property
def last_post(self):
return self.forumpost_set.order_by('-created').first()
@property
def num_replies(self):
return self.forumpost_set.filter(is_thread_starter=False).count()
@property
def thread_starter(self):
return self.forumpost_set.get(thread=self, is_thread_starter=True)
def save(self, *args, **kwargs):
if self.slug == '':
self.slug = slugify(self.title)
return super(ForumThread, self).save(*args, **kwargs)
class ForumPost(models.Model):
thread = models.ForeignKey(ForumThread)
post = models.TextField()
author = models.ForeignKey(settings.AUTH_USER_MODEL)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
reply_to = models.ForeignKey('self', blank=True, null=True)
is_thread_starter = models.BooleanField(default=False)
def __unicode__(self):
return '%(thread)s - %(pk)s' % {
'thread': self.thread.title,
'pk': self.pk
}
def get_breadcrumb(self):
breadcrumb = [
(
self.thread.title,
reverse(
'thread_home',
kwargs={'slug': self.thread.slug}
)
),
]
category = self.thread.category
while True:
breadcrumb_item = (
category.name,
reverse(
'category_home',
kwargs={'slug': category.slug}
),
)
breadcrumb.insert(0, breadcrumb_item)
if category.parent is None:
break
category = category.parent
return breadcrumb
| hellsgate1001/thatforum_django | thatforum/models.py | Python | mit | 3,446 |
from app.schema_validation.definitions import https_url, uuid
create_service_callback_api_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "POST service callback/inbound api schema",
"type": "object",
"title": "Create service callback/inbound api",
"properties": {
"url": https_url,
"bearer_token": {"type": "string", "minLength": 10},
"updated_by_id": uuid
},
"required": ["url", "bearer_token", "updated_by_id"]
}
update_service_callback_api_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "POST service callback/inbound api schema",
"type": "object",
"title": "Create service callback/inbound api",
"properties": {
"url": https_url,
"bearer_token": {"type": "string", "minLength": 10},
"updated_by_id": uuid
},
"required": ["updated_by_id"]
}
| alphagov/notifications-api | app/service/service_callback_api_schema.py | Python | mit | 915 |
# encoding: UTF-8
from eventEngine import *
from ctpGateway import CtpGateway
########################################################################
class MainEngine(object):
"""主引擎"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
# 创建事件引擎
self.eventEngine = EventEngine()
self.eventEngine.start()
# 用来保存接口对象的字典
self.gatewayDict = {}
# 创建我们想要接入的接口对象
self.addGateway(CtpGateway, 'CTP')
#----------------------------------------------------------------------
def addGateway(self, gateway, gatewayName=None):
"""创建接口"""
self.gatewayDict[gatewayName] = gateway(self.eventEngine, gatewayName)
#----------------------------------------------------------------------
def connect(self, gatewayName):
"""连接特定名称的接口"""
gateway = self.gatewayDict[gatewayName]
gateway.connect()
#----------------------------------------------------------------------
def subscribe(self, subscribeReq, gatewayName):
"""订阅特定接口的行情"""
gateway = self.gatewayDict[gatewayName]
gateway.subscribe(subscribeReq)
#----------------------------------------------------------------------
def sendOrder(self, orderReq, gatewayName):
"""对特定接口发单"""
gateway = self.gatewayDict[gatewayName]
return gateway.sendOrder(orderReq)
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq, gatewayName):
"""对特定接口撤单"""
gateway = self.gatewayDict[gatewayName]
gateway.cancelOrder(cancelOrderReq)
#----------------------------------------------------------------------
def getAccont(self, gatewayName):
"""查询特定接口的账户"""
gateway = self.gatewayDict[gatewayName]
gateway.getAccount()
#----------------------------------------------------------------------
def getPosition(self, gatewayName):
"""查询特定接口的持仓"""
gateway = self.gatewayDict[gatewayName]
gateway.getPosition()
#----------------------------------------------------------------------
def exit(self):
"""退出程序前调用,保证正常退出"""
# 停止事件引擎
self.eventEngine.stop()
# 安全关闭所有接口
for gateway in self.gatewayDict.values():
gateway.close() | golden1232004/vnpy | vn.trader/vtEngine.py | Python | mit | 2,720 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 <>
#
# Distributed under terms of the MIT license.
# Third-party modules
import pytest
# Projects modules
from fnapy.fnapy_manager import FnapyManager
def test_manager_raises_TypeError_with_invalid_connection():
"""FnapyManager should raise a TypeError when the connection is not a FnapyConnection"""
with pytest.raises(TypeError):
connection = {'partner_id': 'XXX', 'shop_id': 'XXX', 'key': 'XXX'}
manager = FnapyManager(connection)
| alexandriagroup/fnapy | tests/offline/test_manager.py | Python | mit | 549 |
import math
import string
lookup_map = {}
def memcache_read(n):
global lookup_map
if lookup_map.has_key(n):
return lookup_map[n]
else:
return None
def memcache_write(n, value):
global lookup_map
lookup_map[n] = value
def get_chain_length(n):
# check cache
cache = memcache_read(n)
if cache != None:
return cache
# no cache, so caculate
if n <= 1:
memcache_write(1, 1)
return 1
if n % 2 == 0:
n = n / 2
else:
n = 3*n + 1
return get_chain_length(n) + 1
def find_longest_chain_under_N(n):
max_chain_num = -1
max_chain_length = 0
for i in xrange(1, n, 1):
chain_length = get_chain_length(i)
memcache_write(i, chain_length)
if chain_length > max_chain_length:
max_chain_length = chain_length
max_chain_num = i
#print max_chain_num
#print max_chain_length
return max_chain_num
if __name__ == '__main__':
#print find_longest_chain_under_N(3)
print find_longest_chain_under_N(1000000)
| birdchan/project_euler | problems/014/run.py | Python | mit | 979 |
#!/usr/bin/python
# Copyright (C) 2014-2016 Miquel Sabaté Solà <mikisabate@gmail.com>
# This file is licensed under the MIT license.
# See the LICENSE file.
def insertsort(lst):
for i in range(1, len(lst)):
value = lst[i]
j = i
while j > 0 and value < lst[j - 1]:
lst[j] = lst[j - 1]
j -= 1
lst[j] = value
ary = [4, 65, 2, -31, 0, 99, 2, 83, 782, 1]
print(ary)
insertsort(ary)
print(ary)
| mssola/programs | algorithms/sorting/insertsort/insertsort.py | Python | mit | 454 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-25 01:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pdfapp', '0010_auto_20170225_0034'),
]
operations = [
migrations.AddField(
model_name='document',
name='html_text_string',
field=models.TextField(blank=True, null=True),
),
]
| NumberZeroSoftware/PDFINVEST | pdfapp/migrations/0011_document_html_text_string.py | Python | mit | 468 |
#!/usr/bin/env python
"""
@author : 'Muhammad Arslan <rslnrkmt2552@gmail.com>'
"""
import re
import zlib
import cv2
from scapy.all import *
pics = "pictues"
faces_dir = "faces"
pcap_file = "bhp.pcap"
def get_http_headers(http_payload):
try:
headers_raw = http_payload[:http_payload.index("\r\n\r\n")+2]
headers = dict(re.findall(r"(?P<'name>.*?): (?P<value>.*?)\r\n", headers_raw))
except:
return None
def extract_images(headers, http_payload):
image = None
image_type = None
try:
if "image" in headers['Content-Type']:
image_type = headers['Content-Type'].split('/')[1]
image = http_payload[http_payload.index('\r\n\r\n') + 4:]
try:
if "Content-Encoding" in headers.keys():
if headers['Content-Encoding'] == 'gzip':
image = zlib.decompress(image, 16+zlib.MAX_WBITS)
elif headers['Content-Encoding'] == "deflate":
image = zlib.decompress(image)
except:
pass
except:
return None, None
return image, image_type
def face_detect(path, filename):
img = cv2.imread(path)
cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
rects = cascade.detectMultiScale(img, 1.3, 4, cv2.cv.CV_HAAR_SCALE_IMAGE, (20, 20))
if len(rects) == 0:
return False
rects[:, 2:] += rects[:, :2]
for x1, y1, x2, y2 in rects:
cv2.rectangle(img, (x1, y1), (x2, y2), (127, 255, 0), 2)
cv2.imwrite("%s/$s-%s" % (faces_dir, pcap_file, filename), img)
return True
def http_assembler(pcap_file):
carved_images = 0
faces_detected = 0
a = rdpcap(pcap_file)
sessions = a.sessions()
for session in sessions:
http_payload = ""
for packet in sessions[session]:
try:
if packet[TCP].dport == 80 or packet[TCP].sport == 80:
http_payload += str(packet[TCP].payload)
except:
pass
headers = get_http_headers(http_payload)
if headers is None:
continue
image, image_type = extract_image(headers, http_payload)
if image is not None and image_type is not None:
file_name = "%s-pic_carver_%d.%s" % (pcap_file, carved_images, image_type)
with open("%s/%s" % (pics, file_name), "wb") as fd:
fd.write(image)
carved_images += 1
try:
result = face_detect("%s/%s" % (pics, file_name), file_name)
if result is True:
faces_detected += 1
except:
pass
return carved_images, faces_detected
carved_images, faces_detected = http_assembler(pcap_file)
print "Extracted: %d images" % carved_images
print "Detected: %d faces" % faces_detected
| volf52/black_hat_python | pic_carver.py | Python | mit | 2,905 |
import json
import logging
import boto3
import hashlib
import jsonpatch
from dart.context.locator import injectable
from dart.model.trigger import TriggerType, TriggerState
from dart.message.call import TriggerCall
from dart.trigger.base import TriggerProcessor, execute_trigger
from dart.model.exception import DartValidationException
_logger = logging.getLogger(__name__)
scheduled_trigger = TriggerType(
name='scheduled',
description='Triggering from a scheduler',
params_json_schema={
'type': 'object',
'properties': {
'cron_pattern': {
'type': 'string',
'description': 'The CRON pattern for the schedule. See <a target="_blank" href=' + \
'"http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/ScheduledEvents.html"' + \
'>here</a> for correct syntax.'
},
},
'additionalProperties': False,
'required': ['cron_pattern'],
}
)
@injectable
class ScheduledTriggerProcessor(TriggerProcessor):
def __init__(self, workflow_service, dart_config):
self._workflow_service = workflow_service
self._trigger_type = scheduled_trigger
self._dart_config = dart_config
def trigger_type(self):
return self._trigger_type
def initialize_trigger(self, trigger, trigger_service):
""" :type trigger: dart.model.trigger.Trigger
:type trigger_service: dart.service.trigger.TriggerService """
self._validate_aws_cron_expression(trigger.data.args['cron_pattern'])
# http://boto3.readthedocs.org/en/latest/reference/services/events.html#CloudWatchEvents.Client.put_rule
client = boto3.client('events')
rule_name = self._create_rule_if_needed(client, trigger)
user_id = 'anonymous'
if trigger.data.user_id:
user_id = trigger.data.user_id
if len(trigger.data.tags) > 0:
workflow_id = trigger.data.tags[0]
# When a trigger is created in Dart, we should only create a corresponding rule + target if the state is set to
# ACTIVE.
if trigger.data.state == TriggerState.ACTIVE:
target = {
'Id': trigger.id,
'Arn': self._dart_config['triggers']['scheduled']['cloudwatch_scheduled_events_sns_arn'],
'Input': json.dumps({
'call': TriggerCall.PROCESS_TRIGGER,
'trigger_type_name': self._trigger_type.name,
'message': {
'trigger_id': trigger.id,
'user_id': user_id, # This info is for tracking WF when viewed in cloudwatch rules
# logging workflow_id will be auto generated in '/workflow/<workflow>/do-manual-trigger', this one is for future needs.
'workflow_id': workflow_id
},
}),
}
self._add_target_to_rule(client, rule_name, target)
def update_trigger(self, unmodified_trigger, modified_trigger):
""" :type unmodified_trigger: dart.model.trigger.Trigger
:type modified_trigger: dart.model.trigger.Trigger """
client = boto3.client('events')
patch_list = jsonpatch.make_patch(unmodified_trigger.to_dict(), modified_trigger.to_dict())
target = {
'Id': modified_trigger.id,
'Arn': self._dart_config['triggers']['scheduled']['cloudwatch_scheduled_events_sns_arn'],
'Input': json.dumps({
'call': TriggerCall.PROCESS_TRIGGER,
'trigger_type_name': self._trigger_type.name,
'message': {
'trigger_id': modified_trigger.id,
'user_id': modified_trigger.data.user_id,
'workflow_id': modified_trigger.data.workflow_ids[0]
},
}),
}
for patch in patch_list:
if patch['path'] == '/data/state':
if modified_trigger.data.state == TriggerState.ACTIVE:
rule_name = self._create_rule_if_needed(client, modified_trigger)
self._add_target_to_rule(client, rule_name, target)
elif modified_trigger.data.state == TriggerState.INACTIVE:
self._remove_target_from_prefix(client, unmodified_trigger)
else:
raise Exception('unrecognized trigger state "%s"' % modified_trigger.data.state)
elif patch['path'] == '/data/args/cron_pattern' and patch['op'] == 'replace':
self._remove_target_from_prefix(client, unmodified_trigger)
rule_name = self._create_rule_if_needed(client, modified_trigger)
self._add_target_to_rule(client, rule_name, target)
return modified_trigger
def evaluate_message(self, message, trigger_service):
""" :type message: dict
:type trigger_service: dart.service.trigger.TriggerService """
trigger_id = message['trigger_id']
trigger = trigger_service.get_trigger(trigger_id, raise_when_missing=False)
if not trigger:
_logger.info('trigger (id=%s) not found' % trigger_id)
return []
if trigger.data.state != TriggerState.ACTIVE:
_logger.info('expected trigger (id=%s) to be in ACTIVE state' % trigger.id)
return []
execute_trigger(trigger, self._trigger_type, self._workflow_service, _logger)
return [trigger_id]
def teardown_trigger(self, trigger, trigger_service):
""" :type trigger: dart.model.trigger.Trigger
:type trigger_service: dart.service.trigger.TriggerService """
client = boto3.client('events')
self._remove_target_from_prefix(client, trigger)
def _create_rule_if_needed(self, client, trigger):
"""
:param client: boto3.session.Session.client
:param trigger: dart.model.trigger.Trigger
:return: str
"""
rule_name = self._next_rule_name(client, trigger)
try:
client.describe_rule(Name=rule_name)
except Exception as e:
if 'ResourceNotFoundException' in e.message:
response = client.put_rule(
Name=rule_name,
ScheduleExpression='cron(%s)' % trigger.data.args['cron_pattern'],
State='ENABLED',
Description='scheduled trigger for dart'
)
_logger.info('Created cloudwatch rule (arn=%s) for trigger (id=%s, cron=%s)' % (response['RuleArn'], trigger.id, trigger.data.args['cron_pattern']))
else:
_logger.info('Failed to create cloudwatch rule for trigger (id=%s, cron=%s)' % (trigger.id, trigger.data.args['cron_pattern']))
raise e
return rule_name
def _add_target_to_rule(self, client, rule_name, target):
"""
:param client: boto3.session.Session.client
:param rule_name: str
:param target: str
"""
response = client.put_targets(
Rule=rule_name,
Targets=[target]
)
self._check_response(response)
_logger.info('Created target for trigger (id=%s) on cloudwatch rule (name=%s)' % (target['Id'], rule_name))
def _next_rule_name(self, client, trigger):
"""
This method determines what the next rule name should be for new triggers e.g. iff there is a certain cron
expression that resolves to 'dart-ABCDEF' after hashing and it already has 5 targets, then we create a new
cloudwatch rule with the name 'dart-ABCDEF-1'.
:param client: boto3.session.Session.client
:param trigger: dart.model.trigger.Trigger
:return: str
"""
rule_prefix = self._get_cloudwatch_events_rule_prefix(trigger.data.args['cron_pattern'])
rules = client.list_rules(NamePrefix=rule_prefix)['Rules']
if not rules:
return rule_prefix
for _rule in rules:
response = client.list_targets_by_rule(Rule=_rule['Name'], Limit=5)
if len(response['Targets']) < 5:
return _rule['Name']
return '%s-%d'% (rule_prefix, len(rules) + 1)
def _remove_target_from_prefix(self, client, trigger):
"""
This method goes through all rules with the determined rule prefix to remove the target from the appropriate
rule. The reason we have to iterate through all rules that match the prefix and can't do a direct removal by
rule name is because we don't store that anywhere on Dart side on creation.
:param client: boto3.session.Session.client
:param trigger: dart.model.trigger.Trigger
"""
rule_prefix = self._get_cloudwatch_events_rule_prefix(trigger.data.args['cron_pattern'])
rules = client.list_rules(NamePrefix=rule_prefix)['Rules']
for _rule in rules:
response = client.list_targets_by_rule(Rule=_rule['Name'], Limit=5)
for _target in response['Targets']:
if _target['Id'] == trigger.id:
r = client.remove_targets(Rule=_rule['Name'], Ids=[_target['Id']])
self._check_response(r)
_logger.info('Deleted target for trigger (id=%s) from cloudwatch rule (name=%s)' % (_target['Id'], _rule['Name']))
if len(response['Targets']) == 1:
client.delete_rule(Name=_rule['Name'])
_logger.info('Deleted cloudwatch rule (name=%s)' % _rule['Name'])
return
@staticmethod
def _get_cloudwatch_events_rule_name(trigger):
return 'dart-trigger-%s' % trigger.id
@staticmethod
def _get_cloudwatch_events_rule_prefix(cron_expression, hash_size=20):
"""
This method returns the new naming system for dart triggers. It hashes the cron pattern with sha1 to create new
cloudwatch rule name. We take only the first 20 chars because the max length allowed for cloudwatch rule name is
64.
:param cron_expression: dart.model.trigger.Trigger
:return: str
"""
return 'dart-%s' % hashlib.sha1(cron_expression).hexdigest()[:hash_size]
@staticmethod
def _check_response(response):
if response['FailedEntryCount'] > 0:
error_msg = ''
for failure in response['FailedEntries']:
msg = 'Failed on -- Target Id %s, ErrorCode %s, ErrorMessage: %s\n'
error_msg += msg % (failure['TargetId'], failure['ErrorCode'], failure['ErrorMessage'])
raise Exception(error_msg)
@staticmethod
def _validate_aws_cron_expression(cron_expression):
# See the Note on: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/ScheduledEvents.html
cron_pattern_split = cron_expression.split()
if '?' not in [cron_pattern_split[2], cron_pattern_split[4]]:
raise DartValidationException('CRON Validation Error: Support for specifying both a day-of-week and a '
'day-of-month value is not complete (you must currently use the "?"'
'character in one of these fields).')
| RetailMeNotSandbox/dart | src/python/dart/trigger/scheduled.py | Python | mit | 11,491 |
import os
from locust import HttpUser, TaskSet, task, between
from locust.clients import HttpSession
class MultipleHostsUser(HttpUser):
abstract = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.api_client = HttpSession(base_url=os.environ["API_HOST"])
class UserTasks(TaskSet):
# but it might be convenient to use the @task decorator
@task
def index(self):
self.user.client.get("/")
@task
def index_other_host(self):
self.user.api_client.get("/stats/requests")
class WebsiteUser(MultipleHostsUser):
"""
User class that does requests to the locust web server running on localhost
"""
host = "http://127.0.0.1:8089"
wait_time = between(2, 5)
tasks = [UserTasks]
| mbeacom/locust | examples/multiple_hosts.py | Python | mit | 788 |
#!/usr/bin/env python2.7
import cgi
import json
import os
import re
import SimpleHTTPServer
import SocketServer
import subprocess
import sys
import threading
VERSION = 'v0.14.6'
hostname = ''
try:
command = "bash -c '[[ $(dig +short $HOSTNAME) ]] && echo $HOSTNAME || wget -q -O - icanhazip.com'"
hostname = subprocess.check_output(command, shell=True)
if ':' in hostname:
hostname = ''
except subprocess.CalledProcessError:
pass
key_file = os.getenv('KEY_FILE', None)
if os.path.isfile('/home/ec2-user/.ssh/authorized_keys'):
key_file = '/home/ec2-user/.ssh/authorized_keys'
elif os.path.isfile('/home/ubuntu/.ssh/authorized_keys'):
key_file = '/home/ubuntu/.ssh/authorized_keys'
else:
key_file = '/root/.ssh/authorized_keys'
admin_keys = []
if os.path.isfile(key_file):
try:
command = "cat {0}".format(key_file)
admin_keys = subprocess.check_output(command, shell=True).strip().split("\n")
except subprocess.CalledProcessError:
pass
def check_boot():
if 'onboot' not in sys.argv:
return
init_dir = os.getenv('INIT_DIR', '/etc/init')
systemd_dir = os.getenv('SYSTEMD_DIR', '/etc/systemd/system')
nginx_dir = os.getenv('NGINX_DIR', '/etc/nginx/conf.d')
if os.path.exists(init_dir):
with open('{0}/dokku-installer.conf'.format(init_dir), 'w') as f:
f.write("start on runlevel [2345]\n")
f.write("exec {0} selfdestruct\n".format(os.path.abspath(__file__)))
if os.path.exists(systemd_dir):
with open('{0}/dokku-installer.service'.format(systemd_dir), 'w') as f:
f.write("[Unit]\n")
f.write("Description=Dokku web-installer\n")
f.write("\n")
f.write("[Service]\n")
f.write("ExecStart={0} selfdestruct\n".format(os.path.abspath(__file__)))
f.write("\n")
f.write("[Install]\n")
f.write("WantedBy=multi-user.target\n")
f.write("WantedBy=graphical.target\n")
if os.path.exists(nginx_dir):
with open('{0}/dokku-installer.conf'.format(nginx_dir), 'w') as f:
f.write("upstream dokku-installer { server 127.0.0.1:2000; }\n")
f.write("server {\n")
f.write(" listen 80;\n")
f.write(" location / {\n")
f.write(" proxy_pass http://dokku-installer;\n")
f.write(" }\n")
f.write("}\n")
subprocess.call('rm -f /etc/nginx/sites-enabled/*', shell=True)
sys.exit(0)
class GetHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
content = PAGE.replace('{VERSION}', VERSION)
content = content.replace('{HOSTNAME}', hostname)
content = content.replace('{AUTHORIZED_KEYS_LOCATION}', key_file)
content = content.replace('{ADMIN_KEYS}', "\n".join(admin_keys))
self.send_response(200)
self.end_headers()
self.wfile.write(content)
def do_POST(self):
if self.path not in ['/setup', '/setup/']:
return
params = cgi.FieldStorage(fp=self.rfile,
headers=self.headers,
environ={
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type']})
vhost_enable = 'false'
dokku_root = os.getenv('DOKKU_ROOT', '/home/dokku')
if 'vhost' in params and params['vhost'].value == 'true':
vhost_enable = 'true'
with open('{0}/VHOST'.format(dokku_root), 'w') as f:
f.write(params['hostname'].value)
else:
try:
os.remove('{0}/VHOST'.format(dokku_root))
except OSError:
pass
with open('{0}/HOSTNAME'.format(dokku_root), 'w') as f:
f.write(params['hostname'].value)
for (index, key) in enumerate(params['keys'].value.splitlines(), 1):
user = 'admin'
if self.admin_user_exists() is not None:
user = 'web-admin'
if self.web_admin_user_exists() is not None:
index = int(self.web_admin_user_exists()) + 1
elif self.web_admin_user_exists() is None:
index = 1
elif self.admin_user_exists() is None:
pass
else:
index = int(self.admin_user_exists()) + 1
user = user + str(index)
command = ['sshcommand', 'acl-add', 'dokku', user]
proc = subprocess.Popen(command, stdin=subprocess.PIPE)
proc.stdin.write(key)
proc.stdin.close()
proc.wait()
set_debconf_selection('boolean', 'nginx_enable', 'true')
set_debconf_selection('boolean', 'skip_key_file', 'true')
set_debconf_selection('boolean', 'vhost_enable', vhost_enable)
set_debconf_selection('boolean', 'web_config', 'false')
set_debconf_selection('string', 'hostname', params['hostname'].value)
if 'selfdestruct' in sys.argv:
DeleteInstallerThread()
self.send_response(200)
self.end_headers()
self.wfile.write(json.dumps({'status': 'ok'}))
def web_admin_user_exists(self):
return self.user_exists('web-admin(\d+)')
def admin_user_exists(self):
return self.user_exists('admin(\d+)')
def user_exists(self, name):
command = 'dokku ssh-keys:list'
pattern = re.compile(r'NAME="' + name + '"')
proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
max_num = 0
exists = False
for line in proc.stdout:
m = pattern.search(line)
if m:
# User of the form `user` or `user#` exists
exists = True
max_num = max(max_num, m.group(1))
if exists:
return max_num
else:
return None
def set_debconf_selection(debconf_type, key, value):
found = False
with open('/etc/os-release', 'r') as f:
for line in f:
if 'debian' in line:
found = True
if not found:
return
ps = subprocess.Popen(['echo', 'dokku dokku/{0} {1} {2}'.format(
key, debconf_type, value
)], stdout=subprocess.PIPE)
try:
subprocess.check_output(['debconf-set-selections'], stdin=ps.stdout)
except subprocess.CalledProcessError:
pass
ps.wait()
class DeleteInstallerThread(object):
def __init__(self, interval=1):
thread = threading.Thread(target=self.run, args=())
thread.daemon = True
thread.start()
def run(self):
command = "rm /etc/nginx/conf.d/dokku-installer.conf && /etc/init.d/nginx stop && /etc/init.d/nginx start"
try:
subprocess.call(command, shell=True)
except:
pass
command = "rm -f /etc/init/dokku-installer.conf /etc/systemd/system/dokku-installer.service && (stop dokku-installer || systemctl stop dokku-installer.service)"
try:
subprocess.call(command, shell=True)
except:
pass
def main():
check_boot()
port = int(os.getenv('PORT', 2000))
httpd = SocketServer.TCPServer(("", port), GetHandler)
print "Listening on 0.0.0.0:{0}, CTRL+C to stop".format(port)
httpd.serve_forever()
PAGE = """
<html>
<head>
<meta charset="utf-8" />
<title>Dokku Setup</title>
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css" integrity="sha384-MCw98/SFnGE8fJT3GXwEOngsV7Zt27NXFoaoApmYm81iuXoPkFOJwJ8ERdknLPMO" crossorigin="anonymous">
<style>
.bd-callout {
padding: 1.25rem;
margin-top: 1.25rem;
margin-bottom: 1.25rem;
border: 1px solid #eee;
border-left-width: .25rem;
border-radius: .25rem;
}
.bd-callout p:last-child {
margin-bottom: 0;
}
.bd-callout-info {
border-left-color: #5bc0de;
}
pre {
font-size: 80%;
margin-bottom: 0;
}
h1 small {
font-size: 50%;
}
h5 {
font-size: 1rem;
}
.container {
width: 640px;
}
.result {
padding-left: 20px;
}
input.form-control, textarea.form-control {
background-color: #fafbfc;
font-size: 14px;
}
input.form-control::placeholder, textarea.form-control::placeholder {
color: #adb2b8
}
</style>
</head>
<body>
<div class="container">
<form id="form" role="form">
<h1 class="pt-3">Dokku Setup <small class="text-muted">{VERSION}</small></h1>
<div class="alert alert-warning small" role="alert">
<strong>Warning:</strong> The SSH key filled out here can grant root access to the server. Please complete the setup as soon as possible.
</div>
<div class="row">
<div class="col">
<h3>Admin Access</h3>
<div class="form-group">
<label for="key">Public SSH Keys</label><br />
<textarea class="form-control" name="keys" rows="5" id="key" placeholder="Begins with 'ssh-rsa', 'ssh-dss', 'ssh-ed25519', 'ecdsa-sha2-nistp256', 'ecdsa-sha2-nistp384', or 'ecdsa-sha2-nistp521'">{ADMIN_KEYS}</textarea>
<small class="form-text text-muted">Public keys allow users to ssh onto the server as the <code>dokku</code> user, as well as remotely execute Dokku commands. They are currently auto-populated from: <code>{AUTHORIZED_KEYS_LOCATION}</code>, and can be changed later via the <a href="http://dokku.viewdocs.io/dokku/deployment/user-management/" target="_blank"><code>dokku ssh-keys</code></a> plugin.</small>
</div>
</div>
</div>
<div class="row">
<div class="col">
<h3>Hostname Configuration</h3>
<div class="form-group">
<label for="hostname">Hostname</label>
<input class="form-control" type="text" id="hostname" name="hostname" value="{HOSTNAME}" placeholder="A hostname or ip address such as {HOSTNAME}" />
<small class="form-text text-muted">This will be used as the default host for all applications, and can be changed later via the <a href="http://dokku.viewdocs.io/dokku/configuration/domains/" target="_blank"><code>dokku domains:set-global</code></a> command.</small>
</div>
<div class="form-check">
<input class="form-check-input" type="checkbox" id="vhost" name="vhost" value="true">
<label class="form-check-label" for="vhost">Use virtualhost naming for apps</label>
<small class="form-text text-muted">When enabled, Nginx will be run on port 80 and proxy requests to apps based on hostname.</small>
<small class="form-text text-muted">When disabled, a specific port will be setup for each application on first deploy, and requests to that port will be proxied to the relevant app.</small>
</div>
<div class="bd-callout bd-callout-info">
<h5>What will app URLs look like?</h5>
<pre><code id="example">http://hostname:port</code></pre>
</div>
</div>
</div>
<button type="button" onclick="setup()" class="btn btn-primary">Finish Setup</button> <span class="result"></span>
</form>
</div>
<div id="error-output"></div>
<script>
var $ = document.querySelector.bind(document)
function setup() {
if ($("#key").value.trim() == "") {
alert("Your admin public key cannot be blank.")
return
}
if ($("#hostname").value.trim() == "") {
alert("Your hostname cannot be blank.")
return
}
var data = new FormData($("#form"))
var inputs = [].slice.call(document.querySelectorAll("input, textarea, button"))
inputs.forEach(function (input) {
input.disabled = true
})
var result = $(".result")
fetch("/setup", {method: "POST", body: data})
.then(function(response) {
if (response.ok) {
return response.json()
} else {
throw new Error('Server returned error')
}
})
.then(function(response) {
result.classList.add("text-success");
result.textContent = "Success! Redirecting in 3 seconds. .."
setTimeout(function() {
window.location.href = "http://dokku.viewdocs.io/dokku~{VERSION}/deployment/application-deployment/";
}, 3000);
})
.catch(function (error) {
result.classList.add("text-danger");
result.textContent = "Could not send the request"
})
}
function update() {
if ($("#vhost").matches(":checked") && $("#hostname").value.match(/^(\d{1,3}\.){3}\d{1,3}$/)) {
alert("In order to use virtualhost naming, the hostname must not be an IP but a valid domain name.")
$("#vhost").checked = false;
}
if ($("#vhost").matches(':checked')) {
$("#example").textContent = "http://<app-name>."+$("#hostname").value
} else {
$("#example").textContent = "http://"+$("#hostname").value+":<app-port>"
}
}
$("#vhost").addEventListener("change", update);
$("#hostname").addEventListener("input", update);
update();
</script>
</body>
</html>
"""
if __name__ == "__main__":
main()
| alexquick/dokku | contrib/dokku-installer.py | Python | mit | 13,390 |
# Copyright (C) 2016 Deloitte Argentina.
# This file is part of CodexGigas - https://github.com/codexgigassys/
# See the file 'LICENSE' for copying permission.
from PlugIns.PlugIn import PlugIn
class CypherPlug(PlugIn):
def __init__(self, sample=None):
PlugIn.__init__(self, sample)
def getPath(self):
return "particular_header.cypher"
def getName(self):
return "cypher"
def getVersion(self):
return 1
def process(self):
return "Not_implemented"
| codexgigassys/codex-backend | src/PlugIns/PE/CypherPlug.py | Python | mit | 513 |
# vi: ts=8 sts=4 sw=4 et
#
# uri.py: various URI related utilties
#
# This file is part of Draco2. Draco2 is free software and is made available
# under the MIT license. Consult the file "LICENSE" that is distributed
# together with this file for the exact licensing terms.
#
# Draco2 is copyright (c) 1999-2007 by the Draco2 authors. See the file
# "AUTHORS" for a complete overview.
#
# $Revision: 1187 $
import os
import os.path
import re
import stat
# URL/Form encoding
safe_chars = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789' '_.-')
def quote_hex(s, safe=''):
"""
Replace potentially unsafe characters in 's' with their %XX hexadecimal
counterparts. You can pass additional safe characters in `safe'.
"""
res = list(s)
safe += safe_chars
for i in range(len(s)):
c = res[i]
if c not in safe:
res[i] = '%%%02X' % ord(c)
return ''.join(res)
def unquote_hex(s):
"""
Change %XX occurences in `s' with their character value.
Does the opposite of quote_url().
"""
lst = s.split('%')
res = [lst[0]]
for s in lst[1:]:
if len(s) >= 2:
try:
c = chr(int(s[:2], 16))
res.append(c + s[2:])
except ValueError:
res.append('%' + s)
else:
res.append('%' + s)
return ''.join(res)
def quote_url(s):
"""URL encode a string."""
if isinstance(s, unicode):
s = s.encode('utf-8')
return quote_hex(s, '/')
def unquote_url(s):
"""Decode an URL encoded string."""
s = unquote_hex(s)
s = s.decode('utf-8')
return s
def quote_form(s):
"""Form encode a string."""
if isinstance(s, unicode):
s = s.encode('utf-8')
s = quote_hex(s, ' ')
s = s.replace(' ', '+')
return s
def unquote_form(s):
"""Decode a form encoded string."""
s = s.replace('+', ' ')
s = unquote_hex(s)
s = s.decode('utf-8')
return s
# URI parsing
re_uri = re.compile('(?:([^:/?]*):)?(?://([^?/]*))?(?:/?([^?]*))(?:\?(.*))?')
def parse_uri(uri):
"""Parse an URI into its components.
The result is a 4-tuple (scheme, host, path, query).
Note: This function only supports the "hier_part" URL format as
defined in RFC2396 section 3. The "opaque_part" format is not
supported.
"""
mobj = re_uri.match(uri)
assert mobj
result = list(mobj.groups())
for i,value in enumerate(result):
if result[i] is None:
result[i] = ''
return tuple(result)
def create_uri(scheme=None, host=None, path=None, query=None):
"""Create an URI from its components."""
uri = ''
if scheme:
uri += '%s:' % scheme
if host:
uri += '//%s' % host
if path:
uri += '/%s' % path
if query:
uri += '?%s' % query
return uri
def parse_path(path):
"""Parse the "path" component of an URI.
The result is a list of path components.
"""
parts = [ unquote_url(pa) for pa in path.split('/') if pa ]
return parts
def create_path(parts):
"""Create a "path" component of an URI.
This function is the reverse of parse_path().
"""
parts = [ quote_url(pa) for pa in parts ]
path = '/'.join(parts)
return path
def parse_query(query):
"""Parse the "query" component of an URI.
The result is a dictionary that maps a string key to a list with
one or more string values.
"""
args = {}
parts = query.split('&')
for pa in parts:
try:
name, value = pa.split('=')
except ValueError:
continue
name = unquote_form(name)
value = unquote_form(value)
try:
args[name].append(value)
except KeyError:
args[name] = [value]
return args
def create_query(args):
"""Create the "query" component of an URI.
This function is the reverse of parse_query().
"""
args = [ '%s=%s' % (quote_form(key), quote_form(value))
for key,value in args.items() ]
query = '&'.join(args)
return query
# URL path resolution
class ResolutionError(Exception):
pass
def resolve_path_uri(path, docroot):
"""Resolves the path part of an URI.
The URI is resolved to the 3-tuple: directory, filename, pathinfo.
The filename component is either empty or a single path component,
and may or may not exist as a physical file. The pathinfo component
consists of zero or more path components.
"""
try:
st = os.stat(docroot)
except OSError:
st = None
if st is None or not stat.S_ISDIR(st.st_mode):
raise ResolutionError, 'Document root does not exist.'
directory = []
subdir = docroot
parts = [ unquote_url(part) for part in path.split('/') if part ]
for i in range(len(parts)):
part = parts[i]
if part in ('.', '..'):
raise ResolutionError, \
'Current or parent directory not allowed in URI.'
subdir = os.path.join(subdir, part)
try:
st = os.stat(subdir)
except OSError:
st = None
if st is None or not stat.S_ISDIR(st.st_mode):
filename = parts[i]
pathinfo = '/'.join(parts[i+1:])
break
directory.append(part)
else:
filename = ''
pathinfo = ''
directory = '/'.join(directory)
return (directory, filename, pathinfo)
def create_path_uri(directory, filename, pathinfo):
"""Create a path URI from a 3-tuple (directory, filename, pathinfo)."""
parts = []
if directory:
parts.append(directory)
if filename:
parts.append(filename)
if pathinfo:
parts += [ part for part in pathinfo.split('/') if part ]
parts = [ quote_url(part) for part in parts ]
path = '/'.join(parts)
return path
| geertj/draco2 | draco2/util/uri.py | Python | mit | 5,926 |
from datetime import datetime
import rfGengou
from . import PluginBase
__all__ = ['Gengo']
class Gengo(PluginBase):
def execute(self, args):
if len(args) == 0:
target = datetime.now()
elif len(args) == 1:
target = datetime.strptime(args[0], '%Y/%m/%d')
else:
raise ValueError('wrong number of arguments are given')
return '{:s}{:d}年{:d}月{:d}日'.format(*rfGengou.s2g(target))
def help(self):
return """[yyyy/mm/dd]
Convert from string to Japanese Gengo.
If string is not given, use current time.
ex)
> gengo
平成28年12月2日
> gengo 2000/01/01
平成12年1月1日
""" | mikoim/funstuff | codecheck/codecheck-3608/app/plugins/gengo.py | Python | mit | 676 |
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
// Copyright (c) 2014 Dyffy, Inc.
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class SidecoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = SidecoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| joeykrug/sidecoin | contrib/pyminer/pyminer.py | Python | mit | 6,470 |
#!/usr/bin/env python
###############################################################################
# Copyright (C) 1994 - 2013, Performance Dynamics Company #
# #
# This software is licensed as described in the file COPYING, which #
# you should have received as part of this distribution. The terms #
# are also available at http://www.perfdynamics.com/Tools/copyright.html. #
# #
# You may opt to use, copy, modify, merge, publish, distribute and/or sell #
# copies of the Software, and permit persons to whom the Software is #
# furnished to do so, under the terms of the COPYING file. #
# #
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY #
# KIND, either express or implied. #
###############################################################################
# $Id: spamcan1.py,v 1.3 2012/11/13 03:12:04 earl-lang Exp $
# Created by NJG on Wed, Apr 18, 2007
#
# Queueing model of an email-spam analyzer system comprising a
# battery of SMP servers essentially running in batch mode.
# Each node was a 4-way SMP server.
# The performance metric of interest was the mean queue length.
#
# This simple M/M/4 model gave results that were in surprisingly
# good agreement with monitored queue lengths.
import pdq
# Measured performance parameters
cpusPerServer = 4
emailThruput = 2376 # emails per hour
scannerTime = 6.0 # seconds per email
pdq.Init("Spam Farm Model")
# Timebase is SECONDS ...
nstreams = pdq.CreateOpen("Email", float(emailThruput)/3600)
nnodes = pdq.CreateNode("spamCan", int(cpusPerServer), pdq.MSQ)
pdq.SetDemand("spamCan", "Email", scannerTime)
pdq.Solve(pdq.CANON)
pdq.Report()
| evelynmitchell/pdq | examples/Linux Magazine/spamcan1.py | Python | mit | 1,994 |
#coding=utf-8
import pymongo
def delete_repeat_data():
client = pymongo.MongoClient('localhost', 27017)
db = client.admin
collection = db.taplist
for url in collection.distinct('game_id'): # 使用distinct方法,获取每一个独特的元素列表
num = collection.count({"game_id": url}) # 统计每一个元素的数量
print num, "===== aawa =====", url
# for i in range(1, num): # 根据每一个元素的数量进行删除操作,当前元素只有一个就不再删除
# print 'delete %s %d times ' % (url, i)
# # 注意后面的参数, 很奇怪,在mongo命令行下,它为1时,是删除一个元素,这里却是为0时删除一个
# collection.remove({"game_id": url}, 0)
# for i in collection.find({"game_id": url}): # 打印当前所有元素
# print i
# print collection.distinct('game_id') # 再次打印一遍所要去重的元素
delete_repeat_data()
| andyrenpanlong/Taptap | tabUnique.py | Python | mit | 987 |
import argparse
from osgb import osgb_to_lonlat
from osgb.convert import eastnorth_to_osgb
from utils.database import insert_into_db, empty_table, execute_sql
# Loads data from here: https://data.gov.uk/dataset/naptan
def read_stations(filename):
with open(filename, 'r') as input_file:
for line in input_file.readlines()[1:]:
splits = line.strip().split(",")
yield {
"crs": splits[2],
"name": splits[3],
"easting": long(splits[6]),
"northing": long(splits[7])
}
def convert(row):
e = row["easting"]
n = row["northing"]
lon, lat = osgb_to_lonlat(eastnorth_to_osgb(e, n, digits=4))
row["latitude"] = lat
row["longitude"] = lon
return row
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='National Rail Data Collector')
parser.add_argument('--filename', help='Input CSV file', default="data/RailReferences.csv")
parser.add_argument('--db', help='SQLite DB Name', default="data/trains.db")
args = parser.parse_args()
execute_sql(args.db, "create table if not exists stations (crs TEXT, name TEXT, easting INT, northing INT, latitude DOUBLE, longitude DOUBLE);")
rows = read_stations(args.filename)
stations = map(convert, rows)
empty_table(args.db, "stations")
insert_into_db(args.db, "stations", stations)
| DanteLore/national-rail | loadstations.py | Python | mit | 1,412 |
"""
Tests for install.py for SUSE based Linux distributions
"""
import os
import shutil
from unittest import mock
import pytest
from install import Cmd, CmdError, RemoteFileNotFoundError
pytestmark = pytest.mark.skipif(
not pytest.helpers.helper_is_suse(),
reason="Tests for openSUSE/SUSE"
)
def test_rpm_download_raise_not_found_error(sys_rpm):
with mock.patch.object(Cmd, 'sh_e') as mock_sh_e:
ce = CmdError('test.')
ce.stderr = 'Package \'dummy\' not found.\n'
mock_sh_e.side_effect = ce
with pytest.raises(RemoteFileNotFoundError) as exc:
sys_rpm.download('dummy')
assert mock_sh_e.called
assert str(exc.value) == 'Package dummy not found on remote'
def test_rpm_extract_is_ok(sys_rpm, rpm_files, monkeypatch):
# mocking arch object for multi arch test cases.
sys_rpm.arch = 'x86_64'
with pytest.helpers.work_dir():
for rpm_file in rpm_files:
shutil.copy(rpm_file, '.')
sys_rpm.extract('rpm-build-libs')
files = os.listdir('./usr/lib64')
files.sort()
assert files == [
'librpmbuild.so.7',
'librpmbuild.so.7.0.1',
'librpmsign.so.7',
'librpmsign.so.7.0.1',
]
@pytest.mark.network
def test_app_verify_system_status_is_ok_on_sys_rpm_and_missing_pkgs(app):
app.linux.rpm.is_system_rpm = mock.MagicMock(return_value=True)
app.linux.verify_system_status()
| junaruga/rpm-py-installer | tests/test_install_suse.py | Python | mit | 1,463 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DscpConfigurationOperations(object):
"""DscpConfigurationOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
dscp_configuration_name, # type: str
parameters, # type: "_models.DscpConfiguration"
**kwargs # type: Any
):
# type: (...) -> "_models.DscpConfiguration"
cls = kwargs.pop('cls', None) # type: ClsType["_models.DscpConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'dscpConfigurationName': self._serialize.url("dscp_configuration_name", dscp_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DscpConfiguration')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DscpConfiguration', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DscpConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dscpConfigurations/{dscpConfigurationName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
dscp_configuration_name, # type: str
parameters, # type: "_models.DscpConfiguration"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.DscpConfiguration"]
"""Creates or updates a DSCP Configuration.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dscp_configuration_name: The name of the resource.
:type dscp_configuration_name: str
:param parameters: Parameters supplied to the create or update dscp configuration operation.
:type parameters: ~azure.mgmt.network.v2020_06_01.models.DscpConfiguration
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DscpConfiguration or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_06_01.models.DscpConfiguration]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DscpConfiguration"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
dscp_configuration_name=dscp_configuration_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DscpConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'dscpConfigurationName': self._serialize.url("dscp_configuration_name", dscp_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dscpConfigurations/{dscpConfigurationName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
dscp_configuration_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'dscpConfigurationName': self._serialize.url("dscp_configuration_name", dscp_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dscpConfigurations/{dscpConfigurationName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
dscp_configuration_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a DSCP Configuration.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dscp_configuration_name: The name of the resource.
:type dscp_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
dscp_configuration_name=dscp_configuration_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'dscpConfigurationName': self._serialize.url("dscp_configuration_name", dscp_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dscpConfigurations/{dscpConfigurationName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
dscp_configuration_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.DscpConfiguration"
"""Gets a DSCP Configuration.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dscp_configuration_name: The name of the resource.
:type dscp_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DscpConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_06_01.models.DscpConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DscpConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'dscpConfigurationName': self._serialize.url("dscp_configuration_name", dscp_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DscpConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dscpConfigurations/{dscpConfigurationName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DscpConfigurationListResult"]
"""Gets a DSCP Configuration.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DscpConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_06_01.models.DscpConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DscpConfigurationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DscpConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/dscpConfigurations'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DscpConfigurationListResult"]
"""Gets all dscp configurations in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DscpConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_06_01.models.DscpConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DscpConfigurationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DscpConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/dscpConfigurations'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_06_01/operations/_dscp_configuration_operations.py | Python | mit | 23,827 |
import rdflib
from rdflib.term import URIRef, Variable
from PyOpenWorm.dataObject import DataObject, InverseProperty
from PyOpenWorm.context import Context
from PyOpenWorm.context_store import ContextStore
from .DataTestTemplate import _DataTest
try:
from unittest.mock import MagicMock, Mock
except ImportError:
from mock import MagicMock, Mock
class ContextTest(_DataTest):
def test_inverse_property_context(self):
class A(DataObject):
def __init__(self, **kwargs):
super(A, self).__init__(**kwargs)
self.a = A.ObjectProperty(value_type=B)
class B(DataObject):
def __init__(self, **kwargs):
super(B, self).__init__(**kwargs)
self.b = B.ObjectProperty(value_type=A)
InverseProperty(B, 'b', A, 'a')
ctx1 = Context(ident='http://example.org/context_1')
ctx2 = Context(ident='http://example.org/context_2')
a = ctx1(A)(ident='a')
b = ctx2(B)(ident='b')
a.a(b)
expected = (URIRef('b'), URIRef('http://openworm.org/entities/B/b'), URIRef('a'))
self.assertIn(expected, list(ctx1.contents_triples()))
def test_defined(self):
class A(DataObject):
def __init__(self, **kwargs):
super(A, self).__init__(**kwargs)
self.a = A.ObjectProperty(value_type=B)
def defined_augment(self):
return self.a.has_defined_value()
def identifier_augment(self):
return self.make_identifier(self.a.onedef().identifier.n3())
class B(DataObject):
def __init__(self, **kwargs):
super(B, self).__init__(**kwargs)
self.b = B.ObjectProperty(value_type=A)
InverseProperty(B, 'b', A, 'a')
ctx1 = Context(ident='http://example.org/context_1')
ctx2 = Context(ident='http://example.org/context_2')
a = ctx1(A)()
b = ctx2(B)(ident='b')
a.a(b)
self.assertTrue(a.defined)
def test_save_context_no_graph(self):
ctx = Context()
del ctx.conf['rdf.graph']
with self.assertRaisesRegexp(Exception, r'graph'):
ctx.save_context()
def test_context_store(self):
class A(DataObject):
pass
ctx = Context(ident='http://example.com/context_1')
ctx(A)(ident='anA')
self.assertIn(URIRef('anA'),
tuple(x.identifier for x in ctx.query(A)().load()))
def test_decontextualize(self):
class A(DataObject):
pass
ctx = Context(ident='http://example.com/context_1')
ctxda = ctx(A)(ident='anA')
self.assertIsNone(ctxda.decontextualize().context)
def test_init_imports(self):
ctx = Context(ident='http://example.com/context_1')
self.assertEqual(len(list(ctx.imports)), 0)
def test_zero_imports(self):
ctx0 = Context(ident='http://example.com/context_0')
ctx = Context(ident='http://example.com/context_1')
ctx.save_imports(ctx0)
self.assertEqual(len(ctx0), 0)
def test_save_import(self):
ctx0 = Context(ident='http://example.com/context_0')
ctx = Context(ident='http://example.com/context_1')
new_ctx = Context(ident='http://example.com/context_1')
ctx.add_import(new_ctx)
ctx.save_imports(ctx0)
self.assertEqual(len(ctx0), 1)
def test_add_import(self):
ctx0 = Context(ident='http://example.com/context_0')
ctx = Context(ident='http://example.com/context_1')
ctx2 = Context(ident='http://example.com/context_2')
ctx2_1 = Context(ident='http://example.com/context_2_1')
ctx.add_import(ctx2)
ctx.add_import(ctx2_1)
ctx3 = Context(ident='http://example.com/context_3')
ctx3.add_import(ctx)
final_ctx = Context(ident='http://example.com/context_1', imported=(ctx3,))
final_ctx.save_imports(ctx0)
self.assertEqual(len(ctx0), 4)
def test_init_len(self):
ctx = Context(ident='http://example.com/context_1')
self.assertEqual(len(ctx), 0)
def test_len(self):
ident_uri = 'http://example.com/context_1'
ctx = Context(ident=ident_uri)
for i in range(5):
ctx.add_statement(create_mock_statement(ident_uri, i))
self.assertEqual(len(ctx), 5)
def test_add_remove_statement(self):
ident_uri = 'http://example.com/context_1'
ctx = Context(ident=ident_uri)
stmt_to_remove = create_mock_statement(ident_uri, 42)
for i in range(5):
ctx.add_statement(create_mock_statement(ident_uri, i))
ctx.add_statement(stmt_to_remove)
ctx.remove_statement(stmt_to_remove)
self.assertEqual(len(ctx), 5)
def test_add_statement_with_different_context(self):
ctx = Context(ident='http://example.com/context_1')
stmt1 = create_mock_statement('http://example.com/context_2', 1)
with self.assertRaises(ValueError):
ctx.add_statement(stmt1)
def test_contents_triples(self):
res_wanted = []
ident_uri = 'http://example.com/context_1'
ctx = Context(ident=ident_uri)
for i in range(5):
stmt = create_mock_statement(ident_uri, i)
ctx.add_statement(stmt)
res_wanted.append(stmt.to_triple())
for triples in ctx.contents_triples():
self.assertTrue(triples in res_wanted)
def test_clear(self):
ident_uri = 'http://example.com/context_1'
ctx = Context(ident=ident_uri)
for i in range(5):
ctx.add_statement(create_mock_statement(ident_uri, i))
ctx.clear()
self.assertEqual(len(ctx), 0)
def test_save_context(self):
graph = set()
ident_uri = 'http://example.com/context_1'
ctx = Context(ident=ident_uri)
for i in range(5):
ctx.add_statement(create_mock_statement(ident_uri, i))
ctx.save_context(graph)
self.assertEqual(len(graph), 5)
def test_save_context_with_inline_imports(self):
graph = set()
ident_uri = 'http://example.com/context_1'
ident_uri2 = 'http://example.com/context_2'
ident_uri2_1 = 'http://example.com/context_2_1'
ident_uri3 = 'http://example.com/context_3'
ident_uri4 = 'http://example.com/context_4'
ctx = Context(ident=ident_uri)
ctx2 = Context(ident=ident_uri2)
ctx2_1 = Context(ident=ident_uri2_1)
ctx.add_import(ctx2)
ctx.add_import(ctx2_1)
ctx3 = Context(ident=ident_uri3)
ctx3.add_import(ctx)
last_ctx = Context(ident=ident_uri4)
last_ctx.add_import(ctx3)
ctx.add_statement(create_mock_statement(ident_uri, 1))
ctx2.add_statement(create_mock_statement(ident_uri2, 2))
ctx2_1.add_statement(create_mock_statement(ident_uri2_1, 2.1))
ctx3.add_statement(create_mock_statement(ident_uri3, 3))
last_ctx.add_statement(create_mock_statement(ident_uri4, 4))
last_ctx.save_context(graph, True)
self.assertEqual(len(graph), 5)
def test_triples_saved(self):
graph = set()
ident_uri = 'http://example.com/context_1'
ident_uri2 = 'http://example.com/context_2'
ident_uri2_1 = 'http://example.com/context_2_1'
ident_uri3 = 'http://example.com/context_3'
ident_uri4 = 'http://example.com/context_4'
ctx = Context(ident=ident_uri)
ctx2 = Context(ident=ident_uri2)
ctx2_1 = Context(ident=ident_uri2_1)
ctx.add_import(ctx2)
ctx.add_import(ctx2_1)
ctx3 = Context(ident=ident_uri3)
ctx3.add_import(ctx)
last_ctx = Context(ident=ident_uri4)
last_ctx.add_import(ctx3)
ctx.add_statement(create_mock_statement(ident_uri, 1))
ctx2.add_statement(create_mock_statement(ident_uri2, 2))
ctx2_1.add_statement(create_mock_statement(ident_uri2_1, 2.1))
ctx3.add_statement(create_mock_statement(ident_uri3, 3))
last_ctx.add_statement(create_mock_statement(ident_uri4, 4))
last_ctx.save_context(graph, True)
self.assertEqual(last_ctx.triples_saved, 5)
def test_triples_saved_noundef_triples_counted(self):
graph = set()
ident_uri = 'http://example.com/context_1'
ctx = Context(ident=ident_uri)
statement = MagicMock()
statement.context.identifier = rdflib.term.URIRef(ident_uri)
statement.to_triple.return_value = (Variable('var'), 1, 2)
ctx.add_statement(statement)
ctx.save_context(graph)
self.assertEqual(ctx.triples_saved, 0)
def test_triples_saved_multi(self):
graph = set()
ident_uri = 'http://example.com/context_1'
ident_uri1 = 'http://example.com/context_11'
ident_uri2 = 'http://example.com/context_12'
ctx = Context(ident=ident_uri)
ctx1 = Context(ident=ident_uri1)
ctx2 = Context(ident=ident_uri2)
ctx2.add_import(ctx)
ctx1.add_import(ctx2)
ctx1.add_import(ctx)
ctx.add_statement(create_mock_statement(ident_uri, 1))
ctx1.add_statement(create_mock_statement(ident_uri1, 3))
ctx2.add_statement(create_mock_statement(ident_uri2, 2))
ctx1.save_context(graph, inline_imports=True)
self.assertEqual(ctx1.triples_saved, 3)
def test_context_getter(self):
ctx = Context(ident='http://example.com/context_1')
self.assertIsNone(ctx.context)
def test_context_setter(self):
ctx = Context(ident='http://example.com/context_1')
ctx.context = 42
self.assertEqual(ctx.context, 42)
class ContextStoreTest(_DataTest):
def test_query(self):
rdf_type = 'http://example.org/A'
ctxid = URIRef('http://example.com/context_1')
ctx = Mock()
graph = Mock()
graph.store.triples.side_effect = ([], [((URIRef('anA0'), rdflib.RDF.type, rdf_type), (ctxid,))],)
ctx.conf = {'rdf.graph': graph}
ctx.contents_triples.return_value = [(URIRef('anA'), rdflib.RDF.type, rdf_type)]
ctx.identifier = ctxid
ctx.imports = []
store = ContextStore(ctx, include_stored=True)
self.assertEqual(set([URIRef('anA'), URIRef('anA0')]),
set(x[0][0] for x in store.triples((None, rdflib.RDF.type, rdf_type))))
def test_contexts_staged_ignores_stored(self):
ctxid0 = URIRef('http://example.com/context_0')
ctxid1 = URIRef('http://example.com/context_1')
ctx = Mock()
graph = Mock()
graph.store.triples.side_effect = [[((None, None, ctxid0), ())], []]
ctx.conf = {'rdf.graph': graph}
ctx.contents_triples.return_value = ()
ctx.identifier = ctxid1
ctx.imports = []
store = ContextStore(ctx)
self.assertNotIn(ctxid0, set(store.contexts()))
def test_contexts_combined(self):
ctxid0 = URIRef('http://example.com/context_0')
ctxid1 = URIRef('http://example.com/context_1')
ctx = Mock()
graph = Mock()
graph.store.triples.side_effect = [[((None, None, ctxid0), ())], []]
ctx.conf = {'rdf.graph': graph}
ctx.contents_triples.return_value = ()
ctx.identifier = ctxid1
ctx.imports = []
store = ContextStore(ctx, include_stored=True)
self.assertEqual(set([ctxid0, ctxid1]),
set(store.contexts()))
def test_len_fail(self):
ctx = Mock()
graph = Mock()
ctx.conf = {'rdf.graph': graph}
ctx.contents_triples.return_value = ()
ctx.imports = []
store = ContextStore(ctx, include_stored=True)
with self.assertRaises(NotImplementedError):
len(store)
def create_mock_statement(ident_uri, stmt_id):
statement = MagicMock()
statement.context.identifier = rdflib.term.URIRef(ident_uri)
statement.to_triple.return_value = (True, stmt_id, -stmt_id)
return statement
| gsarma/PyOpenWorm | tests/ContextTest.py | Python | mit | 12,077 |
# -*- coding: utf-8 -*-
#
# Wokkel documentation build configuration file, created by
# sphinx-quickstart on Mon May 7 11:15:38 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['apilinks_sphinxext']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Wokkel'
copyright = u'2003-2012, Ralph Meijer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '18.0.0'
# The full version, including alpha/beta/rc tags.
release = '18.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'listings']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# pydoctor API base URL
apilinks_base_url = 'api/'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['localtoc.html', 'indexsidebar.html', 'searchbox.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Wokkeldoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Wokkel.tex', u'Wokkel Documentation',
u'Ralph Meijer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'wokkel', u'Wokkel Documentation',
[u'Ralph Meijer'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Wokkel', u'Wokkel Documentation',
u'Ralph Meijer', 'Wokkel', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| ralphm/wokkel | doc/conf.py | Python | mit | 7,874 |
'''
A drop-in replacement for optparse ( "import optparse_gui as optparse" )
Provides an identical interface to optparse(.OptionParser),
But displays an automatically generated wx dialog in order to enter the
options/args, instead of parsing command line arguments
'''
import sys, os, os.path, fnmatch, types, time
import re, copy, StringIO, csv, glob
import math, optparse
from optparse import OptionGroup
from datetime import timedelta
__version__ = 0.1
__revision__ = '$Id: $'
def check_multichoice(option, opt, value):
if not value:
return value
for v in value.split(','):
if v not in option.multichoices:
choices = ", ".join(map(repr, option.multichoices))
raise optparse.OptionValueError(
"option %s: invalid choice: %r (choose one or more from %s)"
% (opt, value, choices))
return value
def check_file(option, opt, value):
value = value.strip('"')
if not value:
return value
value = os.path.expanduser(value)
value = os.path.expandvars(value)
value1 = glob.glob(value)
# value1 += glob.glob(value+'.gz')
# value1 += glob.glob(value+'.bz2')
if len(value1) > 1:
raise optparse.OptionValueError(
"option %s: Too many files selected: %s" % (opt, value))
if len(value1) == 0:
raise optparse.OptionValueError(
"option %s: File does not exist: %s" % (opt, value))
value = value1[0]
if option.filetypes:
match = False
for name,globlst in option.filetypes:
for gl in globlst.split(';'):
for cmp in ('','.gz','.bz2'):
if fnmatch.fnmatch(os.path.split(value)[1],gl+cmp):
match = True
break
if match:
break
if match:
break
if not match:
raise optparse.OptionValueError(
"option %s: File %s does not match required filetypes: %s" % (opt, value, ', '.join([ "%s (%s)"%(nm,ft) for nm,ft in option.filetypes])))
return value
def check_files(option, opt, ssv):
s = StringIO.StringIO(ssv)
rd = csv.reader(s,delimiter=' ',quotechar='"')
try:
files = iter(rd).next()
except StopIteration:
files = []
s.close()
files1 = []
for value in files:
value = os.path.expanduser(value)
value = os.path.expandvars(value)
gv = glob.glob(value)
# gv += glob.glob(value+'.gz')
# gv += glob.glob(value+'.bz2')
if len(gv) == 0 and '*' not in value and '?' not in value:
raise optparse.OptionValueError(
"option %s: File does not exist: %s" % (opt, value))
files1.extend(gv)
if len(files1) == 0 and ssv.strip():
raise optparse.OptionValueError(
"option %s: No files match pattern(s): %s" % (opt, ssv))
for value in files1:
if not os.path.isfile(value):
raise optparse.OptionValueError(
"option %s: File does not exist: %s" % (opt, value))
if option.filetypes:
match = False
for name,glb in option.filetypes:
for glbi in glb.split(';'):
for cmp in ('','.gz','.bz2'):
if fnmatch.fnmatch(os.path.split(value)[1],glbi+cmp):
match = True
break
if match:
break
if match:
break
if not match:
raise optparse.OptionValueError(
"option %s: File %s does not match required filetypes: %s" % (opt, value, ', '.join([ "%s (%s)"%(nm,ft) for nm,ft in option.filetypes])))
return files1
def check_savefile(option, opt, value):
value = value.strip('"')
if not option.notNone and not value:
return value
if os.path.exists(value) and not os.path.isfile(value):
raise optparse.OptionValueError(
"option %s: Can't overwrite path: %s" % (opt, value))
if option.filetypes:
match = False
for name,glb in option.filetypes:
for glbi in glb.split(';'):
if fnmatch.fnmatch(os.path.split(value)[1],glbi):
match = True
break
if match:
break
if not match:
raise optparse.OptionValueError(
"option %s: File %s does not match required filetypes: %s" % (opt, value, ', '.join([ "%s (%s)"%(nm,ft) for nm,ft in option.filetypes])))
return value
def check_savedir(option, opt, value):
value = value.strip('"')
if not option.notNone and not value:
return value
if os.path.exists(value) and not os.path.isdir(value):
raise optparse.OptionValueError(
"option %s: Can't remove path %s" % (opt, value))
return value
def check_dir(option, opt, value):
value = value.strip('"')
if not option.notNone and not value:
return value
if not os.path.exists(value):
raise optparse.OptionValueError(
"option %s: Does not exist %s" % (opt, value))
if not os.path.isdir(value):
raise optparse.OptionValueError(
"option %s: Not a directory %s" % (opt, value))
return value
class Option(optparse.Option):
ATTRS = optparse.Option.ATTRS + ['notNone','filetypes','name','text','multichoices','remember']
TYPES = optparse.Option.TYPES + ("password","file","savefile", "dir", "savedir", "files","multichoice")
TYPE_CHECKER = copy.copy(optparse.Option.TYPE_CHECKER)
TYPE_CHECKER["file"] = check_file
TYPE_CHECKER["files"] = check_files
TYPE_CHECKER["savefile"] = check_savefile
TYPE_CHECKER["savedir"] = check_savedir
TYPE_CHECKER["dir"] = check_dir
TYPE_CHECKER["multichoice"] = check_multichoice
class OptionParser( optparse.OptionParser ):
def __init__(self, *args, **kwargs ):
kwargs['option_class'] = Option
if 'dotfilename' in kwargs:
self.dotfilename = kwargs['dotfilename']
del kwargs['dotfilename']
optparse.OptionParser.__init__( self, *args, **kwargs )
def check_values (self, values, args):
for option in self.option_list:
if (isinstance(option, Option) and
option.notNone and
(getattr(values,option.dest) == "" or
getattr(values,option.dest) == None)):
self.error("%s is empty" % option)
return (values, args)
def get_defaults(self):
values = {}
for (g,o) in self.iteropts():
if o.dest != None:
if o.default == optparse.NO_DEFAULT or \
o.default == None:
values[o.dest] = ''
else:
values[o.dest] = o.default
values['-args-'] = ''
return values
def iteropts(self):
for o in self.option_list:
yield (None,o)
for og in self.option_groups:
for o in og.option_list:
yield (og,o)
def grpopts(self):
from collections import defaultdict
d = defaultdict(list)
for (g,o) in self.iteropts():
d[g].append(o)
return d
class UserCancelledError( Exception ):
pass
class Progress(object):
def __init__(self,quiet=0):
self._quiet = 0
self.quiet(quiet)
def quiet(self,q):
oldq = self._quiet
if isinstance(q,bool):
self._quiet = 2*q;
else:
assert isinstance(q,int)
self._quiet = q
return oldq
def message(self,message):
if self._quiet >= 2:
return
self.initbar(message,nl=True)
def stage(self,message,max=None,min=None,elapsed=True):
self.elapsed = elapsed
self.max = None
self.min = 0
if max != None:
self.max = float(max)
if min != None:
self.min = float(min)
self.value = 0
if self._quiet >= 2:
return
self.start = time.time()
if self.max:
self.initprogressbar(message)
else:
self.initbar(message)
def update(self,increment=1,newvalue=None):
if self._quiet >= 1:
return
if self.max != None:
if newvalue != None:
self.value = newvalue
else:
self.value += increment
self.updateprogressbar(math.floor(1000*(self.value-self.min)/(self.max-self.min)))
else:
self.updatebar()
def done(self):
if self._quiet >= 1:
return
if self.max != None:
self.doneprogressbar()
else:
self.donebar()
class ProgressText(Progress):
def __init__(self,*args,**kwargs):
super(ProgressText,self).__init__(*args,**kwargs)
self.handle = sys.stdout
self.barwidth = 10
self.maxwidth = 60
self.symbol = "*"
self.bs = chr(8)
self.neednl = False
def initbar(self,message,nl=False):
if self.neednl:
self.handle.write('\n')
self.neednl = False
print >>self.handle, message,
self.handle.flush()
self.barpos = 0
self.toright = True
if nl:
self.handle.write('\n')
else:
self.neednl = True
@staticmethod
def deltaformat(delta):
sd = map(float,str(delta).split(',',1)[-1].split(':'))
hours,minutes,seconds = sd
days = delta.days
if days > 0:
return "%d days, %d:%02d"%(days,hours,minutes)
if hours > 0:
return "%d:%02d:%02d hrs"%(hours,minutes,int(seconds))
if minutes > 0:
return "%d:%02d min"%(minutes,int(seconds))
return "%.2f sec"%(seconds,)
def donebar(self):
if self.elapsed:
d = timedelta(seconds=(time.time()-self.start))
print >>self.handle, "(%s)"%(self.deltaformat(d),)
else:
print >>self.handle, ""
self.neednl = False
self.handle.flush()
def updatebar(self):
if self.neednl:
self.handle.write('\n')
self.neednl = False
extrabs = False
if self.barpos + self.barwidth >= self.maxwidth:
self.toright = False
extrabs = True
elif self.barpos == 0:
self.toright = True
extrabs = True
if self.toright:
self.barpos += 1
self.handle.write("%s%s%s"%(self.bs*(self.barwidth+1*extrabs)," " if not extrabs else "",self.symbol*self.barwidth))
else:
self.barpos -= 1
self.handle.write("%s%s%s"%(self.bs*(self.barwidth+2),self.symbol*self.barwidth," " if extrabs else " "))
self.handle.flush()
def initprogressbar(self,message):
if self.neednl:
self.handle.write('\n')
self.neednl = False
# print >>self.handle, message
print >>self.handle, "%-*s->|"%(self.maxwidth-3,
message[:self.maxwidth-3])
self.handle.flush()
self.barpos = 0
def doneprogressbar(self):
print >>self.handle, (self.maxwidth-self.barpos)*self.symbol,
if self.elapsed:
d = timedelta(seconds=(time.time()-self.start))
print >>self.handle, "(%s)"%(self.deltaformat(d),)
else:
print >>self.handle, ""
self.handle.flush()
def updateprogressbar(self,value):
newpos = int(round(self.maxwidth*float(value)/1000))
if newpos > self.barpos:
self.handle.write("%s"%self.symbol*(newpos-self.barpos))
self.handle.flush()
self.barpos = newpos
try:
from needswx import *
__gui__ = True
except ImportError:
__gui__ = False
def GUI():
return __gui__
################################################################################
def sample_parse_args():
usage = "usage: %prog [options] args"
if 1 == len( sys.argv ):
option_parser_class = OptionParserGUI
else:
option_parser_class = OptionParser
parser = option_parser_class( usage = usage, version='0.1' )
parser.add_option("-f", "--file", dest="filename", default = r'c:\1.txt',
help="read data from FILENAME")
parser.add_option("-a", "--action", dest="action",
choices = ['delete', 'copy', 'move'],
help="Which action do you wish to take?!")
parser.add_option("-n", "--number", dest="number", default = 23,
type = 'int',
help="Just a number")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose")
(options, args) = parser.parse_args()
return options, args
def main():
options, args = sample_parse_args()
print 'args: %s' % repr( args )
print 'options: %s' % repr( options )
if '__main__' == __name__:
main()
| HorvathLab/NGS | attic/readCounts/src/optparse_gui/__init__.py | Python | mit | 12,249 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-13 16:33
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('posts', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='tags',
),
migrations.DeleteModel(
name='Tag',
),
]
| nagracks/dj-hackernews-clone | hn_clone/posts/migrations/0002_auto_20170413_1633.py | Python | mit | 441 |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import os
from figure3 import select, ket, exp
from matrix import ops
from measures import local_entropies_from_rhos, local_exp_vals_from_rhos
from mpl_toolkits.axes_grid1 import ImageGrid
from matplotlib import rc
rc("text", usetex=True)
font = {"size": 11, "weight": "normal"}
mpl.rc(*("font",), **font)
mpl.rcParams["pdf.fonttype"] = 42
mpl.rcParams["text.latex.preamble"] = [
r"\usepackage{amsmath}",
r"\usepackage{sansmath}", # sanserif math
r"\sansmath",
]
if __name__ == "__main__":
names = {
"c1_f0": {"name": ket("010"), "ls": "-", "c": "C5", "m": "v"},
"exp-z": {"name": exp("\hat{\sigma_j}^z"), "ls": "-", "c": "C5", "m": "v"},
"exp-x": {"name": exp("\hat{\sigma_j}^x"), "ls": "-", "c": "C5", "m": "v"},
"s-2": {"name": " $s^{(2)}_j$", "ls": "-", "c": "C5", "m": "v"},
}
cmaps = ["inferno_r", "inferno"]
plot_fname = "figures/figure2/figure2_V5.pdf"
fig = plt.figure(figsize=(4.75, 3.7))
Skey = ["3.6", "3.13", "3.14", "5.4", "5.2"]
measures = ["exp-z", "s-2"]
IC = "c1_f0"
L = 18
T = (L - 1) * 3 + 1 # plot ylim
letts1 = [
r"$\mathrm{A}$",
r"$\mathrm{C}$",
r"$\mathrm{E}$",
r"$\mathrm{G}$",
r"$\mathrm{I}$",
]
letts2 = [
r"$\mathrm{B}$",
r"$\mathrm{D}$",
r"$\mathrm{F}$",
r"$\mathrm{H}$",
r"$\mathrm{J}$",
]
clett1 = ["w", "w", "w", "w", "w"]
clett2 = ["k", "k", "k", "w", "k"]
letts = [letts1, letts2]
cletts = [clett1, clett2]
for row, (meas, letti, cli) in enumerate(zip(measures, letts, cletts)):
grid = ImageGrid(
fig,
int("21" + str(1 + row)),
nrows_ncols=(1, 5),
direction="row",
axes_pad=0.1,
add_all=True,
cbar_mode="single",
cbar_location="right",
cbar_size="20%",
cbar_pad=0.05,
)
for col, (S, lett, cl) in enumerate(zip(Skey, letti, cli)):
N, S = map(int, S.split("."))
ax = grid[col]
if N == 3:
sim = select(L=L, S=S, IC=IC, V="H", BC="0")
if sim is None:
print("No sim!")
continue
S = sim["S"]
L = sim["L"]
IC = sim["IC"]
h5file = sim["h5file"]
if meas[0] == "e":
ticks = [-1, 1]
ticklabels = ["↑", "↓"]
else:
ticks = [0, 1]
ticklabels = ["$0$","$1$"]
vmin, vmax = ticks
d = h5file[meas]
elif N == 5:
der = "/home/lhillber/documents/research/cellular_automata/qeca/qops"
der = os.path.join(der, f"qca_output/hamiltonian/rule{S}/rho_i.npy")
one_site = np.load(der)
one_site = one_site.reshape(2000, 22, 2, 2)
one_site = one_site[::, 2:-2, :, :]
T5, L5, *_ = one_site.shape
d = np.zeros((T5, L5))
ti = 0
for t, rhoi in enumerate(one_site):
if t % 10 == 0:
if meas == "exp-z":
d[ti, :] = local_exp_vals_from_rhos(rhoi, ops["Z"])
elif meas == "s-2":
d[ti, :] = local_entropies_from_rhos(rhoi, order=2)
ti += 1
I = ax.imshow(
d[0:T],
origin="lower",
interpolation=None,
cmap=cmaps[row],
vmin=vmin,
vmax=vmax,
)
ax.cax.colorbar(I)
ax.cax.set_yticks(ticks)
ax.cax.set_yticklabels(ticklabels)
ax.set_xticks([0, 8, 17])
ax.set_yticks([i * (L - 1) for i in range(4)])
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.text(0.5, 46, lett, color=cl, family="sans-serif", weight="bold")
if col == len(Skey) - 1:
ax.cax.text(
1.6,
0.5,
names[meas]["name"],
rotation=0,
transform=ax.transAxes,
ha="left",
va="center",
)
if row == 0 and col < 3:
ax.set_title(r"$T_{%d}$" % S)
elif row == 0 and col > 2:
ax.set_title(r"${F_{%d}}$" % S)
ax.tick_params(direction="out")
grid[0].set_yticklabels(["$"+str(i * (L - 1))+"$" for i in range(4)])
grid[0].set_xticklabels(["$0$", "$8$", "$17$"])
grid[0].set_xlabel("$j$", labelpad=0)
grid[0].set_ylabel("$t$", labelpad=0)
fig.subplots_adjust(hspace=0.1, left=0.05, top=0.93)
plt.savefig(plot_fname, dpi=300)
print("plot saved to ", plot_fname)
| lhillber/qops | figure2.py | Python | mit | 5,048 |
"""
This file should only work on Python 3.6 and newer.
Its purpose is to test a correct installation of Python 3.
"""
from random import randint
print("Generating one thousand random numbers...")
for i in range(1000):
random_number = randint(0, 100000)
print(f"Number {i} was: {random_number}")
| PhantomAppDevelopment/python-getting-started | step-1/myscript.py | Python | mit | 308 |
"""
TAGME implementation
@author: Faegheh Hasibi (faegheh.hasibi@idi.ntnu.no)
"""
import argparse
import math
from nordlys.config import OUTPUT_DIR
from nordlys.tagme import config
from nordlys.tagme import test_coll
from nordlys.tagme.query import Query
from nordlys.tagme.mention import Mention
from nordlys.tagme.lucene_tools import Lucene
ENTITY_INDEX = Lucene(config.INDEX_PATH)
ANNOT_INDEX = Lucene(config.INDEX_ANNOT_PATH, use_ram=True)
# ENTITY_INDEX = IndexCache("/data/wikipedia-indices/20120502-index1")
# ANNOT_INDEX = IndexCache("/data/wikipedia-indices/20120502-index1-annot/", use_ram=True)
ENTITY_INDEX.open_searcher()
ANNOT_INDEX.open_searcher()
class Tagme(object):
DEBUG = 0
def __init__(self, query, rho_th, sf_source="wiki"):
self.query = query
self.rho_th = rho_th
self.sf_source = sf_source
# TAMGE params
self.link_prob_th = 0.001
self.cmn_th = 0.02
self.k_th = 0.3
self.link_probs = {}
self.in_links = {}
self.rel_scores = {} # dictionary {men: {en: rel_score, ...}, ...}
self.disamb_ens = {}
def parse(self):
"""
Parses the query and returns all candidate mention-entity pairs.
:return: candidate entities {men:{en:cmn, ...}, ...}
"""
ens = {}
for ngram in self.query.get_ngrams():
mention = Mention(ngram)
# performs mention filtering (based on the paper)
if (len(ngram) == 1) or (ngram.isdigit()) or (mention.wiki_occurrences < 2) or (len(ngram.split()) > 6):
continue
link_prob = self.__get_link_prob(mention)
if link_prob < self.link_prob_th:
continue
# These mentions will be kept
self.link_probs[ngram] = link_prob
# Filters entities by cmn threshold 0.001; this was only in TAGME source code and speeds up the process.
# TAGME source code: it.acubelab.tagme.anchor (lines 279-284)
ens[ngram] = mention.get_men_candidate_ens(0.001)
# filters containment mentions (based on paper)
candidate_entities = {}
sorted_mentions = sorted(ens.keys(), key=lambda item: len(item.split())) # sorts by mention length
for i in range(0, len(sorted_mentions)):
m_i = sorted_mentions[i]
ignore_m_i = False
for j in range(i+1, len(sorted_mentions)):
m_j = sorted_mentions[j]
if (m_i in m_j) and (self.link_probs[m_i] < self.link_probs[m_j]):
ignore_m_i = True
break
if not ignore_m_i:
candidate_entities[m_i] = ens[m_i]
return candidate_entities
def disambiguate(self, candidate_entities):
"""
Performs disambiguation and link each mention to a single entity.
:param candidate_entities: {men:{en:cmn, ...}, ...}
:return: disambiguated entities {men:en, ...}
"""
# Gets the relevance score
rel_scores = {}
for m_i in candidate_entities.keys():
if self.DEBUG:
print "********************", m_i, "********************"
rel_scores[m_i] = {}
for e_m_i in candidate_entities[m_i].keys():
if self.DEBUG:
print "-- ", e_m_i
rel_scores[m_i][e_m_i] = 0
for m_j in candidate_entities.keys(): # all other mentions
if (m_i == m_j) or (len(candidate_entities[m_j].keys()) == 0):
continue
vote_e_m_j = self.__get_vote(e_m_i, candidate_entities[m_j])
rel_scores[m_i][e_m_i] += vote_e_m_j
if self.DEBUG:
print m_j, vote_e_m_j
# pruning uncommon entities (based on the paper)
self.rel_scores = {}
for m_i in rel_scores:
for e_m_i in rel_scores[m_i]:
cmn = candidate_entities[m_i][e_m_i]
if cmn >= self.cmn_th:
if m_i not in self.rel_scores:
self.rel_scores[m_i] = {}
self.rel_scores[m_i][e_m_i] = rel_scores[m_i][e_m_i]
# DT pruning
disamb_ens = {}
for m_i in self.rel_scores:
if len(self.rel_scores[m_i].keys()) == 0:
continue
top_k_ens = self.__get_top_k(m_i)
best_cmn = 0
best_en = None
for en in top_k_ens:
cmn = candidate_entities[m_i][en]
if cmn >= best_cmn:
best_en = en
best_cmn = cmn
disamb_ens[m_i] = best_en
return disamb_ens
def prune(self, dismab_ens):
"""
Performs AVG pruning.
:param dismab_ens: {men: en, ... }
:return: {men: (en, score), ...}
"""
linked_ens = {}
for men, en in dismab_ens.iteritems():
coh_score = self.__get_coherence_score(men, en, dismab_ens)
rho_score = (self.link_probs[men] + coh_score) / 2.0
if rho_score >= self.rho_th:
linked_ens[men] = (en, rho_score)
return linked_ens
def __get_link_prob(self, mention):
"""
Gets link probability for the given mention.
Here, in fact, we are computing key-phraseness.
"""
pq = ENTITY_INDEX.get_phrase_query(mention.text, Lucene.FIELDNAME_CONTENTS)
mention_freq = ENTITY_INDEX.searcher.search(pq, 1).totalHits
if mention_freq == 0:
return 0
if self.sf_source == "wiki":
link_prob = mention.wiki_occurrences / float(mention_freq)
# This is TAGME implementation, from source code:
# link_prob = float(mention.wiki_occurrences) / max(mention_freq, mention.wiki_occurrences)
elif self.sf_source == "facc":
link_prob = mention.facc_occurrences / float(mention_freq)
return link_prob
def __get_vote(self, entity, men_cand_ens):
"""
vote_e = sum_e_i(mw_rel(e, e_i) * cmn(e_i)) / i
:param entity: en
:param men_cand_ens: {en: cmn, ...}
:return: voting score
"""
entity = entity if self.sf_source == "wiki" else entity[0]
vote = 0
for e_i, cmn in men_cand_ens.iteritems():
e_i = e_i if self.sf_source == "wiki" else e_i[0]
mw_rel = self.__get_mw_rel(entity, e_i)
# print "\t", e_i, "cmn:", cmn, "mw_rel:", mw_rel
vote += cmn * mw_rel
vote /= float(len(men_cand_ens))
return vote
def __get_mw_rel(self, e1, e2):
"""
Calculates Milne & Witten relatedness for two entities.
This implementation is based on Dexter implementation (which is similar to TAGME implementation).
- Dexter implementation: https://github.com/dexter/dexter/blob/master/dexter-core/src/main/java/it/cnr/isti/hpc/dexter/relatedness/MilneRelatedness.java
- TAGME: it.acubelab.tagme.preprocessing.graphs.OnTheFlyArrayMeasure
"""
if e1 == e2: # to speed-up
return 1.0
en_uris = tuple(sorted({e1, e2}))
ens_in_links = [self.__get_in_links([en_uri]) for en_uri in en_uris]
if min(ens_in_links) == 0:
return 0
conj = self.__get_in_links(en_uris)
if conj == 0:
return 0
numerator = math.log(max(ens_in_links)) - math.log(conj)
denominator = math.log(ANNOT_INDEX.num_docs()) - math.log(min(ens_in_links))
rel = 1 - (numerator / denominator)
if rel < 0:
return 0
return rel
def __get_in_links(self, en_uris):
"""
returns "and" occurrences of entities in the corpus.
:param en_uris: list of dbp_uris
"""
en_uris = tuple(sorted(set(en_uris)))
if en_uris in self.in_links:
return self.in_links[en_uris]
term_queries = []
for en_uri in en_uris:
term_queries.append(ANNOT_INDEX.get_id_lookup_query(en_uri, Lucene.FIELDNAME_CONTENTS))
and_query = ANNOT_INDEX.get_and_query(term_queries)
self.in_links[en_uris] = ANNOT_INDEX.searcher.search(and_query, 1).totalHits
return self.in_links[en_uris]
def __get_coherence_score(self, men, en, dismab_ens):
"""
coherence_score = sum_e_i(rel(e_i, en)) / len(ens) - 1
:param en: entity
:param dismab_ens: {men: (dbp_uri, fb_id), ....}
"""
coh_score = 0
for m_i, e_i in dismab_ens.iteritems():
if m_i == men:
continue
coh_score += self.__get_mw_rel(e_i, en)
coh_score = coh_score / float(len(dismab_ens.keys()) - 1) if len(dismab_ens.keys()) - 1 != 0 else 0
return coh_score
def __get_top_k(self, mention):
"""Returns top-k percent of the entities based on rel score."""
k = int(round(len(self.rel_scores[mention].keys()) * self.k_th))
k = 1 if k == 0 else k
sorted_rel_scores = sorted(self.rel_scores[mention].items(), key=lambda item: item[1], reverse=True)
top_k_ens = []
count = 1
prev_rel_score = sorted_rel_scores[0][1]
for en, rel_score in sorted_rel_scores:
if rel_score != prev_rel_score:
count += 1
if count > k:
break
top_k_ens.append(en)
prev_rel_score = rel_score
return top_k_ens
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-th", "--threshold", help="score threshold", type=float, default=0)
parser.add_argument("-data", help="Data set name", choices=['y-erd', 'erd-dev', 'wiki-annot30', 'wiki-disamb30'])
args = parser.parse_args()
if args.data == "erd-dev":
queries = test_coll.read_erd_queries()
elif args.data == "y-erd":
queries = test_coll.read_yerd_queries()
elif args.data == "wiki-annot30":
queries = test_coll.read_tagme_queries(config.WIKI_ANNOT30_SNIPPET)
elif args.data == "wiki-disamb30":
queries = test_coll.read_tagme_queries(config.WIKI_DISAMB30_SNIPPET)
out_file_name = OUTPUT_DIR + "/" + args.data + "_tagme_wiki10.txt"
open(out_file_name, "w").close()
out_file = open(out_file_name, "a")
# process the queries
for qid, query in sorted(queries.items(), key=lambda item: int(item[0]) if item[0].isdigit() else item[0]):
print "[" + qid + "]", query
tagme = Tagme(Query(qid, query), args.threshold)
print " parsing ..."
cand_ens = tagme.parse()
print " disambiguation ..."
disamb_ens = tagme.disambiguate(cand_ens)
print " pruning ..."
linked_ens = tagme.prune(disamb_ens)
out_str = ""
for men, (en, score) in linked_ens.iteritems():
out_str += str(qid) + "\t" + str(score) + "\t" + en + "\t" + men + "\tpage-id" + "\n"
print out_str, "-----------\n"
out_file.write(out_str)
print "output:", out_file_name
if __name__ == "__main__":
main() | hasibi/TAGME-Reproducibility | nordlys/tagme/tagme.py | Python | mit | 11,198 |
from django.shortcuts import render
from enfermeriaapp.models import Cola_Consulta, Cola_Enfermeria
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from django.utils import timezone
import time
from django.contrib import messages
from django.contrib.auth.decorators import login_required
import datetime
from django.db import connection
import json
from datospersonalesapp.models import Paciente
from nuevoingresoapp.models import Expediente_Provisional
from enfermeriaapp.forms import ColaEnfermeriaForm
# Vista para poner un nuevo paciente en la cola para la toma de signos vitales
@login_required(login_url='logins')
def cola_enfermeria_nuevo(request,pk):
info = ""
pacientes=Paciente.objects.filter(estadoExpediente='A').order_by('facultadE')
cursor = connection.cursor()
cursor.execute('SELECT distinct(p.facultadE_id), f.nombreFacultad FROM datospersonalesapp_paciente as p, datospersonalesapp_facultad as f WHERE p.facultadE_id = f.codigoFacultad ORDER BY f.nombreFacultad')
auxL = cursor.fetchall()
if request.method == "GET":
data = {'idPaciente':Paciente.objects.filter(codigoPaciente = pk)
}
form = ColaEnfermeriaForm(data)
existe = Cola_Enfermeria.objects.filter(idPaciente = pk)
if existe:
info="El paciente ya existe en la cola"
else:
if form.is_valid():
expediente = form.save(commit=False)
expediente.hora = time.strftime("%H:%M:%S") #Formato de 24 horas
expediente.save()
info = "Datos Guardados Exitosamen"
return render(request,"datospersonales/paciente_list.html",{'personalpaciente':pacientes,'datoFacult':auxL,'informacion':info})
else:
form=ColaEnfermeriaForm()
info = "Ocurrio un error los datos no se guardaron"
return render(request,"datospersonales/paciente_list.html",{'personalpaciente':pacientes,'datoFacult':auxL,'informacion':info})
#Muestra el listado de pacientes en cola para tomarles signos vitales
@login_required(login_url='logins')
def cola_enfermeria_list(request):
cola=Cola_Enfermeria.objects.order_by('hora')
return render(request,"enfermeriaapp/cola_enfermeria_list.html",{'cola':cola})
# Vista para borrar manualmente un paciente en la cola para la toma de signos vitales
@login_required(login_url='logins')
def cola_enfermeria_borrar(request,pk):
cola=Cola_Enfermeria.objects.order_by('hora')
info = ""
if request.method == "GET":
data = {'idPaciente':Paciente.objects.filter(codigoPaciente = pk)
}
form = ColaEnfermeriaForm(data)
existe = Cola_Enfermeria.objects.filter(idPaciente = pk)
if existe:
if form.is_valid():
existe.delete()
info = "Datos eliminados exitosamente"
return render(request,"enfermeriaapp/cola_enfermeria_list.html",{'cola':cola})
else:
form=ColaEnfermeriaForm()
info = "Ocurrio un error no se pudo eliminar el paciente de la cola"
else:
info="El paciente no existe en la cola"
return render(request,"enfermeriaapp/cola_enfermeria_list.html",{'cola':cola})
#Muestra el listado de pacientes en cola para pasar consulta
@login_required(login_url='logins')
def cola_consulta_list(request):
cursor = connection.cursor()
cursor.execute('SELECT distinct(p.nit) as codigo, p.nombrePrimero as nombre,p.nombreSegundo as nombreSegundo, p.apellidoPrimero as apellido,c.hora,c.idDoctor_id as doctor FROM datospersonalesapp_paciente as p, enfermeriaapp_cola_consulta as c WHERE p.nit = c.nit')
cursor2 = connection.cursor()
cursor2.execute('SELECT distinct(p.nit) as codigo, p.nombrePrimero as nombre,p.nombreSegundo as nombreSegundo, p.apellidoPrimero as apellido,c.hora,c.idDoctor_id as doctor FROM nuevoingresoapp_expediente_provisional as p, enfermeriaapp_cola_consulta as c WHERE p.nit = c.nit')
cola = cursor.fetchall()
cola += cursor2.fetchall()
#cola=Cola_Consulta.objects.order_by('hora')
return render(request,"enfermeriaapp/cola_consulta_list.html",{'cola':cola})
| anderson7ru/bienestarues | enfermeriaapp/views.py | Python | mit | 4,242 |
class InvalidAPIUsage(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
| devcenter-square/states-cities | app/mod_endpoints/exceptions.py | Python | mit | 413 |
import os, sys, multiprocessing, subprocess
from build_util import *
if __name__ == "__main__":
cfg = cfg_from_argv(sys.argv)
bi = build_info(cfg.compiler, cfg.archs, cfg.cfg)
print("Starting build project: " + build_cfg.project_name + " ...")
additional_options = "-DCFG_PROJECT_NAME:STRING=\"%s\"" % build_cfg.project_name
additional_options += " -DCFG_BINARY_PATH:STRING=\"%s\"" % build_cfg.binary_path
additional_options += " -DCFG_BUILD_PATH:STRING=\"%s\"" % build_cfg.build_path
additional_options += " -DCFG_DEPENDENT_PATH:STRING=\"%s\"" % build_cfg.dependent_path
additional_options += " -DCFG_DOCUMENT_PATH:STRING=\"%s\"" % build_cfg.document_path
additional_options += " -DCFG_EXTERNAL_PATH:STRING=\"%s\"" % build_cfg.external_path
additional_options += " -DCFG_INCLUDE_PATH:STRING=\"%s\"" % build_cfg.include_path
additional_options += " -DCFG_SOURCE_PATH:STRING=\"%s\"" % build_cfg.source_path
additional_options += " -DCFG_TEST_PATH:STRING=\"%s\"" % build_cfg.test_path
additional_options += " -DCFG_INSTALL_PATH:STRING=\"%s\"" % build_cfg.install_path
additional_options += " -DCFG_INTRINSICS_LEVEL:STRING=\"%d\"" % build_cfg.intrinsics_level
print("Generating %s..." % (build_cfg.project_name))
for info in bi.compilers:
build_project(build_cfg.project_name, build_cfg.build_path, bi, "../cmake", info, False, False, additional_options)
| Napoleon314/Venus3D | generate_projects.py | Python | mit | 1,373 |
# -*- coding: utf-8 -*-
import logging
import types
import dateutil.parser
import feedparser
import pytz
import http
from html_sanitizer import HTMLSanitizer
def getFeed(url):
current_feed = []
content = http.get(url)
feed = feedparser.parse(content) # even if content is None feedparser returns object with empty entries list
for item in feed.entries:
parsed = FeedParser.parse(item)
current_feed.append(parsed)
logging.info("Downloaded %d posts." % len(current_feed))
return current_feed
def filterExistingFeeds(feeds, latest_feed):
filtered = []
if feeds is not None and len(feeds) > 0:
if latest_feed is not None:
for feed in feeds:
logging.info("Comparing downloaded and latest feed date - (%s, %s)" % (feed["published"], latest_feed))
if feed["published"] is not None and feed["published"] > latest_feed:
filtered.append(feed)
else:
filtered = feeds
logging.info("After filtering there is %d posts to store." % len(filtered))
return filtered
class FeedParser():
@staticmethod
def parse(item):
link = FeedParser._getFirstOf(item, ["link", "id"])
title = FeedParser._getFirstOf(item, ["title"])
summary = FeedParser._getFirstOf(item, ["summary"])
published = FeedParser._getFirstOf(item, ["published", "updated"])
categories = FeedParser._getFirstOf(item, ["tags"])
# for everyone using BlogEngine.NET (this item contains last betag:tag item for single feed item)
betag = FeedParser._getFirstOf(item, ["betag"])
categories_names = FeedParser._getNames(categories)
categories_names.append(FeedParser._encode(betag))
datetime_published = DateParser.parse(published)
sanitized_summary = HTMLSanitizer.sanitize_and_parse(summary)
return {
"link": link,
"published": datetime_published,
"title": FeedParser._encode(title),
"summary": sanitized_summary,
"categories": categories_names
}
@staticmethod
def _getFirstOf(feed_entry, attributes):
if attributes is not None:
for attr in attributes:
if hasattr(feed_entry, attr):
return feed_entry[attr]
@staticmethod
def _encode(value_to_encode):
if type(value_to_encode) is types.UnicodeType:
return value_to_encode.encode("UTF-8")
return value_to_encode
@staticmethod
def _getNames(categories):
result = []
if categories is not None:
for category in categories:
if "term" in category.keys():
result.append(FeedParser._encode(category["term"]))
return result
class DateParser():
@staticmethod
def parse(date):
try:
result = dateutil.parser.parse(date).astimezone(tz=pytz.UTC).replace(tzinfo=None)
except ValueError:
try:
result = dateutil.parser.parse(date, parserinfo=DateParser.PolishParserInfo()) \
.astimezone(tz=pytz.UTC) \
.replace(tzinfo=None)
except ValueError as e:
logging.error("Unknown date string format. Provided date: %s" % date.encode("utf-8"))
raise
return result
class PolishParserInfo(dateutil.parser.parserinfo):
MONTHS = [(u'Sty', u'Styczeń'), (u'Lut', u'Luty'), (u'Mar', u'Marzec'), (u'Kwi', u'Kwiecień'), (u'Maj', u'Maj'),
(u'Cze', u'Czerwiec'), (u'Lip', u'Lipiec'), (u'Sie', u'Sierpień'), (u'Wrz', u'Wrzesień'),
(u'Paź', u'Październik'), (u'Lis', u'Listopad'), (u'Gru', u'Grudzień')]
WEEKDAYS = [(u'Pn', u'Pon', u'Poniedziałek'), (u'Wt', u'Wto', u'Wtorek'), (u'Śr', u'Śro', u'Środa'),
(u'Cz', u'Czw', u'Czwartek'), (u'Pt', u'Pią', u'Piątek'), (u'So', u'Sob', u'Sobota'),
(u'N', u'Nd', u'Nie', u'Niedziela')]
# By default this method checks if name has length greater or equal 3
# and I need to override this method because weekday abbreviations in Poland might have one letter like 'N' (Sunday)
def weekday(self, name):
if len(name) >= 1:
try:
return self._weekdays[name.lower()]
except KeyError:
pass
return None | macborowy/dajsiepoznac-feed | DajSiePoznacFeed-Server/crawler/src/scrapper/feed.py | Python | mit | 4,500 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""Wrapper for netCDF readers."""
from __future__ import unicode_literals, division, print_function
import os.path
import warnings
import numpy as np
from collections import OrderedDict
from monty.dev import requires
from monty.collections import AttrDict
from monty.functools import lazy_property
from monty.string import marquee
from pymatgen.core.units import ArrayWithUnit
from pymatgen.core.xcfunc import XcFunc
from pymatgen.core.structure import Structure
import logging
logger = logging.getLogger(__name__)
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__email__ = "gmatteo at gmail.com"
__status__ = "Development"
__date__ = "$Feb 21, 2013M$"
__all__ = [
"as_ncreader",
"as_etsfreader",
"NetcdfReader",
"NetcdfReaderError",
"ETSF_Reader",
"NO_DEFAULT",
"structure_from_ncdata",
]
try:
import netCDF4
except ImportError as exc:
netCDF4 = None
warnings.warn("""\
`import netCDF4` failed with the following error:
%s
Please install netcdf4 with `conda install netcdf4`
If the conda version does not work, uninstall it with `conda uninstall hdf4 hdf5 netcdf4`
and use `pip install netcdf4`""" % str(exc))
def _asreader(file, cls):
closeit = False
if not isinstance(file, cls):
file, closeit = cls(file), True
return file, closeit
def as_ncreader(file):
"""
Convert file into a NetcdfReader instance.
Returns reader, closeit where closeit is set to True
if we have to close the file before leaving the procedure.
"""
return _asreader(file, NetcdfReader)
def as_etsfreader(file):
return _asreader(file, ETSF_Reader)
class NetcdfReaderError(Exception):
"""Base error class for NetcdfReader"""
class NO_DEFAULT(object):
"""Signal that read_value should raise an Error"""
class NetcdfReader(object):
"""
Wraps and extends netCDF4.Dataset. Read only mode. Supports with statements.
Additional documentation available at:
http://netcdf4-python.googlecode.com/svn/trunk/docs/netCDF4-module.html
"""
Error = NetcdfReaderError
@requires(netCDF4 is not None, "netCDF4 must be installed to use this class")
def __init__(self, path):
"""Open the Netcdf file specified by path (read mode)."""
self.path = os.path.abspath(path)
try:
self.rootgrp = netCDF4.Dataset(self.path, mode="r")
except Exception as exc:
raise self.Error("In file %s: %s" % (self.path, str(exc)))
self.ngroups = len(list(self.walk_tree()))
#self.path2group = OrderedDict()
#for children in self.walk_tree():
# for child in children:
# #print(child.group, child.path)
# self.path2group[child.path] = child.group
def __enter__(self):
"""Activated when used in the with statement."""
return self
def __exit__(self, type, value, traceback):
"""Activated at the end of the with statement. It automatically closes the file."""
self.rootgrp.close()
def close(self):
try:
self.rootgrp.close()
except Exception as exc:
logger.warning("Exception %s while trying to close %s" % (exc, self.path))
def walk_tree(self, top=None):
"""
Navigate all the groups in the file starting from top.
If top is None, the root group is used.
"""
if top is None:
top = self.rootgrp
values = top.groups.values()
yield values
for value in top.groups.values():
for children in self.walk_tree(value):
yield children
def print_tree(self):
for children in self.walk_tree():
for child in children:
print(child)
def read_dimvalue(self, dimname, path="/", default=NO_DEFAULT):
"""
Returns the value of a dimension.
Args:
dimname: Name of the variable
path: path to the group.
default: return `default` if `dimname` is not present and
`default` is not `NO_DEFAULT` else raise self.Error.
"""
try:
dim = self._read_dimensions(dimname, path=path)[0]
return len(dim)
except self.Error:
if default is NO_DEFAULT: raise
return default
def read_varnames(self, path="/"):
"""List of variable names stored in the group specified by path."""
if path == "/":
return self.rootgrp.variables.keys()
else:
group = self.path2group[path]
return group.variables.keys()
def read_value(self, varname, path="/", cmode=None, default=NO_DEFAULT):
"""
Returns the values of variable with name varname in the group specified by path.
Args:
varname: Name of the variable
path: path to the group.
cmode: if cmode=="c", a complex ndarrays is constructed and returned
(netcdf does not provide native support from complex datatype).
default: returns default if varname is not present.
self.Error is raised if default is default is NO_DEFAULT
Returns:
numpy array if varname represents an array, scalar otherwise.
"""
try:
var = self.read_variable(varname, path=path)
except self.Error:
if default is NO_DEFAULT: raise
return default
if cmode is None:
# scalar or array
# getValue is not portable!
try:
return var.getValue()[0] if not var.shape else var[:]
except IndexError:
return var.getValue() if not var.shape else var[:]
else:
assert var.shape[-1] == 2
if cmode == "c":
return var[...,0] + 1j*var[...,1]
else:
raise ValueError("Wrong value for cmode %s" % cmode)
def read_variable(self, varname, path="/"):
"""Returns the variable with name varname in the group specified by path."""
return self._read_variables(varname, path=path)[0]
def _read_dimensions(self, *dimnames, **kwargs):
path = kwargs.get("path", "/")
try:
if path == "/":
return [self.rootgrp.dimensions[dname] for dname in dimnames]
else:
group = self.path2group[path]
return [group.dimensions[dname] for dname in dimnames]
except KeyError:
raise self.Error("In file %s:\nError while reading dimensions: `%s` with kwargs: `%s`" %
(self.path, dimnames, kwargs))
def _read_variables(self, *varnames, **kwargs):
path = kwargs.get("path", "/")
try:
if path == "/":
return [self.rootgrp.variables[vname] for vname in varnames]
else:
group = self.path2group[path]
return [group.variables[vname] for vname in varnames]
except KeyError:
raise self.Error("In file %s:\nError while reading variables: `%s` with kwargs `%s`." %
(self.path, varnames, kwargs))
def read_keys(self, keys, dict_cls=AttrDict, path="/"):
"""
Read a list of variables/dimensions from file. If a key is not present the corresponding
entry in the output dictionary is set to None.
"""
od = dict_cls()
for k in keys:
try:
# Try to read a variable.
od[k] = self.read_value(k, path=path)
except self.Error:
try:
# Try to read a dimension.
od[k] = self.read_dimvalue(k, path=path)
except self.Error:
od[k] = None
return od
class ETSF_Reader(NetcdfReader):
"""
This object reads data from a file written according to the ETSF-IO specifications.
We assume that the netcdf file contains at least the crystallographic section.
"""
@lazy_property
def chemical_symbols(self):
"""Chemical symbols char [number of atom species][symbol length]."""
charr = self.read_value("chemical_symbols")
symbols = []
for v in charr:
symbols.append("".join(c.decode("utf-8") for c in v))
return symbols
def typeidx_from_symbol(self, symbol):
"""Returns the type index from the chemical symbol. Note python convention."""
return self.chemical_symbols.index(symbol)
def read_structure(self, cls=Structure):
"""Returns the crystalline structure."""
if self.ngroups != 1:
raise NotImplementedError("In file %s: ngroups != 1" % self.path)
return structure_from_ncdata(self, cls=cls)
def read_abinit_xcfunc(self):
"""
Read ixc from an Abinit file. Return :class:`XcFunc` object.
"""
ixc = int(self.read_value("ixc"))
return XcFunc.from_abinit_ixc(ixc)
def read_abinit_hdr(self):
"""
Read the variables associated to the Abinit header.
Return :class:`AbinitHeader`
"""
d = {}
for hvar in _HDR_VARIABLES.values():
ncname = hvar.etsf_name if hvar.etsf_name is not None else hvar.name
if ncname in self.rootgrp.variables:
d[hvar.name] = self.read_value(ncname)
elif ncname in self.rootgrp.dimensions:
d[hvar.name] = self.read_dimvalue(ncname)
else:
raise ValueError("Cannot find `%s` in `%s`" % (ncname, self.path))
# Convert scalars to (well) scalars.
if hasattr(d[hvar.name], "shape") and not d[hvar.name].shape:
d[hvar.name] = np.asscalar(d[hvar.name])
if hvar.name in ("title", "md5_pseudos", "codvsn"):
# Convert array of numpy bytes to list of strings
if hvar.name == "codvsn":
d[hvar.name] = "".join(bs.decode("utf-8").strip() for bs in d[hvar.name])
else:
d[hvar.name] = ["".join(bs.decode("utf-8") for bs in astr).strip()
for astr in d[hvar.name]]
return AbinitHeader(d)
def structure_from_ncdata(ncdata, site_properties=None, cls=Structure):
"""
Reads and returns a pymatgen structure from a NetCDF file
containing crystallographic data in the ETSF-IO format.
Args:
ncdata: filename or NetcdfReader instance.
site_properties: Dictionary with site properties.
cls: The Structure class to instanciate.
"""
ncdata, closeit = as_ncreader(ncdata)
# TODO check whether atomic units are used
lattice = ArrayWithUnit(ncdata.read_value("primitive_vectors"), "bohr").to("ang")
red_coords = ncdata.read_value("reduced_atom_positions")
natom = len(red_coords)
znucl_type = ncdata.read_value("atomic_numbers")
# type_atom[0:natom] --> index Between 1 and number of atom species
type_atom = ncdata.read_value("atom_species")
# Fortran to C index and float --> int conversion.
species = natom * [None]
for atom in range(natom):
type_idx = type_atom[atom] - 1
species[atom] = int(znucl_type[type_idx])
d = {}
if site_properties is not None:
for prop in site_properties:
d[property] = ncdata.read_value(prop)
structure = cls(lattice, species, red_coords, site_properties=d)
# Quick and dirty hack.
# I need an abipy structure since I need to_abivars and other methods.
try:
from abipy.core.structure import Structure as AbipyStructure
structure.__class__ = AbipyStructure
except ImportError:
pass
if closeit:
ncdata.close()
return structure
class _H(object):
__slots__ = ["name", "doc", "etsf_name"]
def __init__(self, name, doc, etsf_name=None):
self.name, self.doc, self.etsf_name = name, doc, etsf_name
_HDR_VARIABLES = (
# Scalars
_H("bantot", "total number of bands (sum of nband on all kpts and spins)"),
_H("date", "starting date"),
_H("headform", "format of the header"),
_H("intxc", "input variable"),
_H("ixc", "input variable"),
_H("mband", "maxval(hdr%nband)", etsf_name="max_number_of_states"),
_H("natom", "input variable", etsf_name="number_of_atoms"),
_H("nkpt", "input variable", etsf_name="number_of_kpoints"),
_H("npsp", "input variable"),
_H("nspden", "input variable", etsf_name="number_of_components"),
_H("nspinor", "input variable", etsf_name="number_of_spinor_components"),
_H("nsppol", "input variable", etsf_name="number_of_spins"),
_H("nsym", "input variable", etsf_name="number_of_symmetry_operations"),
_H("ntypat", "input variable", etsf_name="number_of_atom_species"),
_H("occopt", "input variable"),
_H("pertcase", "the index of the perturbation, 0 if GS calculation"),
_H("usepaw", "input variable (0=norm-conserving psps, 1=paw)"),
_H("usewvl", "input variable (0=plane-waves, 1=wavelets)"),
_H("kptopt", "input variable (defines symmetries used for k-point sampling)"),
_H("pawcpxocc", "input variable"),
_H("nshiftk_orig", "original number of shifts given in input (changed in inkpts, the actual value is nshiftk)"),
_H("nshiftk", "number of shifts after inkpts."),
_H("icoulomb", "input variable."),
_H("ecut", "input variable", etsf_name="kinetic_energy_cutoff"),
_H("ecutdg", "input variable (ecut for NC psps, pawecutdg for paw)"),
_H("ecutsm", "input variable"),
_H("ecut_eff", "ecut*dilatmx**2 (dilatmx is an input variable)"),
_H("etot", "EVOLVING variable"),
_H("fermie", "EVOLVING variable", etsf_name="fermi_energy"),
_H("residm", "EVOLVING variable"),
_H("stmbias", "input variable"),
_H("tphysel", "input variable"),
_H("tsmear", "input variable"),
_H("nelect", "number of electrons (computed from pseudos and charge)"),
_H("charge", "input variable"),
# Arrays
_H("qptn", "qptn(3) the wavevector, in case of a perturbation"),
#_H("rprimd", "rprimd(3,3) EVOLVING variables", etsf_name="primitive_vectors"),
#_H(ngfft, "ngfft(3) input variable", number_of_grid_points_vector1"
#_H("nwvlarr", "nwvlarr(2) the number of wavelets for each resolution.", etsf_name="number_of_wavelets"),
_H("kptrlatt_orig", "kptrlatt_orig(3,3) Original kptrlatt"),
_H("kptrlatt", "kptrlatt(3,3) kptrlatt after inkpts."),
_H("istwfk", "input variable istwfk(nkpt)"),
_H("lmn_size", "lmn_size(npsp) from psps"),
_H("nband", "input variable nband(nkpt*nsppol)", etsf_name="number_of_states"),
_H("npwarr", "npwarr(nkpt) array holding npw for each k point", etsf_name="number_of_coefficients"),
_H("pspcod", "pscod(npsp) from psps"),
_H("pspdat", "psdat(npsp) from psps"),
_H("pspso", "pspso(npsp) from psps"),
_H("pspxc", "pspxc(npsp) from psps"),
_H("so_psp", "input variable so_psp(npsp)"),
_H("symafm", "input variable symafm(nsym)"),
#_H(symrel="input variable symrel(3,3,nsym)", etsf_name="reduced_symmetry_matrices"),
_H("typat", "input variable typat(natom)", etsf_name="atom_species"),
_H("kptns", "input variable kptns(nkpt, 3)", etsf_name="reduced_coordinates_of_kpoints"),
_H("occ", "EVOLVING variable occ(mband, nkpt, nsppol)", etsf_name="occupations"),
_H("tnons", "input variable tnons(nsym, 3)", etsf_name="reduced_symmetry_translations"),
_H("wtk", "weight of kpoints wtk(nkpt)", etsf_name="kpoint_weights"),
_H("shiftk_orig", "original shifts given in input (changed in inkpts)."),
_H("shiftk", "shiftk(3,nshiftk), shiftks after inkpts"),
_H("amu", "amu(ntypat) ! EVOLVING variable"),
#_H("xred", "EVOLVING variable xred(3,natom)", etsf_name="reduced_atom_positions"),
_H("zionpsp", "zionpsp(npsp) from psps"),
_H("znuclpsp", "znuclpsp(npsp) from psps. Note the difference between (znucl|znucltypat) and znuclpsp"),
_H("znucltypat", "znucltypat(ntypat) from alchemy", etsf_name="atomic_numbers"),
_H("codvsn", "version of the code"),
_H("title", "title(npsp) from psps"),
_H("md5_pseudos", "md5pseudos(npsp), md5 checksums associated to pseudos (read from file)"),
#_H(type(pawrhoij_type), allocatable :: pawrhoij(:) ! EVOLVING variable, only for paw
)
_HDR_VARIABLES = OrderedDict([(h.name, h) for h in _HDR_VARIABLES])
class AbinitHeader(AttrDict):
"""Stores the values reported in the Abinit header."""
#def __init__(self, *args, **kwargs):
# super(AbinitHeader, self).__init__(*args, **kwargs)
# for k, v in self.items():
# v.__doc__ = _HDR_VARIABLES[k].doc
def __str__(self):
return self.to_string()
def to_string(self, verbose=0, title=None, **kwargs):
"""
String representation. kwargs are passed to `pprint.pformat`.
Args:
verbose: Verbosity level
title: Title string.
"""
from pprint import pformat
s = pformat(self, **kwargs)
if title is not None:
return "\n".join([marquee(title, mark="="), s])
return s
| setten/pymatgen | pymatgen/io/abinit/netcdf.py | Python | mit | 17,296 |
class Solution(object):
def isPalindrome(self, x):
"""
:type x: int
:rtype: bool
"""
s=str(x)
l=list(s)
l.reverse()
sq=''
s1=sq.join(l)
if s==s1:
return True
else:
return False
| thydeyx/LeetCode-Python | Palindrome Number.py | Python | mit | 302 |
__author__ = 'http://www.python-course.eu/python3_inheritance.php'
class Person:
def __init__(self, first, last):
self.firstname = first
self.lastname = last
def Name(self):
return self.firstname + " " + self.lastname
class Employee(Person):
def __init__(self, first, last, staffnum):
Person.__init__(self,first, last)
self.staffnumber = staffnum
def GetEmployee(self):
return self.Name() + ", " + self.staffnumber
x = Person("Marge", "Simpson")
y = Employee("Homer", "Simpson", "1007")
print(x.Name())
print(y.GetEmployee())
| MarketShareData/Internal | code/test1/inheritanceTest.py | Python | mit | 600 |
from build import evaluate_callables
class WhenEvaluatingADictWithNoCallables:
def when_i_evaluate_the_dict(self):
self.result = evaluate_callables({"abc": 123, "def": 456, "xyz": 789})
def it_should_return_the_same_dict(self):
assert self.result == {"abc": 123, "def": 456, "xyz": 789}
class WhenEvaluatingADictWithCallables:
def given_input_containing_lambdas(self):
self.input = {"abc": lambda: 123, "def": lambda: 456, "xyz": 789}
self.input_copy = self.input.copy()
def when_i_evaluate_the_dict(self):
self.result = evaluate_callables(self.input)
def it_should_return_the_dict_having_called_the_functions(self):
assert self.result == {"abc": 123, "def": 456, "xyz": 789}
def it_should_not_change_the_original_dict(self):
assert self.input == self.input_copy
class MyDict(dict):
def __eq__(self, other):
if not isinstance(other, MyDict):
return False
return super().__eq__(other)
def copy(self):
return MyDict({k: v for k, v in self.items()})
class WhenEvaluatingACustomDictWithNoCallables:
def when_i_evaluate_the_dict(self):
self.result = evaluate_callables(MyDict({"abc": 123, "def": 456, "xyz": 789}))
def it_should_return_an_instance_of_the_same_class(self):
assert self.result == MyDict({"abc": 123, "def": 456, "xyz": 789})
class WhenEvaluatingACustomDictWithCallables:
def given_input_containing_lambdas(self):
self.input = MyDict({"abc": lambda: 123, "def": lambda: 456, "xyz": 789})
self.input_copy = self.input.copy()
def when_i_evaluate_the_dict(self):
self.result = evaluate_callables(self.input)
def it_should_return_an_instance_of_the_same_class_having_called_the_functions(self):
assert self.result == MyDict({"abc": 123, "def": 456, "xyz": 789})
def it_should_not_change_the_original_dict(self):
assert self.input == self.input_copy
# todo: make it work for other sequences
| benjamin-hodgson/build | test/evaluate_callables_tests.py | Python | mit | 2,020 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class FrontendIPConfiguration(SubResource):
"""Frontend IP address of the load balancer.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar inbound_nat_rules: Read only. Inbound rules URIs that use this
frontend IP.
:vartype inbound_nat_rules:
list[~azure.mgmt.network.v2018_01_01.models.SubResource]
:ivar inbound_nat_pools: Read only. Inbound pools URIs that use this
frontend IP.
:vartype inbound_nat_pools:
list[~azure.mgmt.network.v2018_01_01.models.SubResource]
:ivar outbound_nat_rules: Read only. Outbound rules URIs that use this
frontend IP.
:vartype outbound_nat_rules:
list[~azure.mgmt.network.v2018_01_01.models.SubResource]
:ivar load_balancing_rules: Gets load balancing rules URIs that use this
frontend IP.
:vartype load_balancing_rules:
list[~azure.mgmt.network.v2018_01_01.models.SubResource]
:param private_ip_address: The private IP address of the IP configuration.
:type private_ip_address: str
:param private_ip_allocation_method: The Private IP allocation method.
Possible values are: 'Static' and 'Dynamic'. Possible values include:
'Static', 'Dynamic'
:type private_ip_allocation_method: str or
~azure.mgmt.network.v2018_01_01.models.IPAllocationMethod
:param subnet: The reference of the subnet resource.
:type subnet: ~azure.mgmt.network.v2018_01_01.models.Subnet
:param public_ip_address: The reference of the Public IP resource.
:type public_ip_address:
~azure.mgmt.network.v2018_01_01.models.PublicIPAddress
:param provisioning_state: Gets the provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
:param zones: A list of availability zones denoting the IP allocated for
the resource needs to come from.
:type zones: list[str]
"""
_validation = {
'inbound_nat_rules': {'readonly': True},
'inbound_nat_pools': {'readonly': True},
'outbound_nat_rules': {'readonly': True},
'load_balancing_rules': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'inbound_nat_rules': {'key': 'properties.inboundNatRules', 'type': '[SubResource]'},
'inbound_nat_pools': {'key': 'properties.inboundNatPools', 'type': '[SubResource]'},
'outbound_nat_rules': {'key': 'properties.outboundNatRules', 'type': '[SubResource]'},
'load_balancing_rules': {'key': 'properties.loadBalancingRules', 'type': '[SubResource]'},
'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'},
'private_ip_allocation_method': {'key': 'properties.privateIPAllocationMethod', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'Subnet'},
'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'PublicIPAddress'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'zones': {'key': 'zones', 'type': '[str]'},
}
def __init__(self, *, id: str=None, private_ip_address: str=None, private_ip_allocation_method=None, subnet=None, public_ip_address=None, provisioning_state: str=None, name: str=None, etag: str=None, zones=None, **kwargs) -> None:
super(FrontendIPConfiguration, self).__init__(id=id, **kwargs)
self.inbound_nat_rules = None
self.inbound_nat_pools = None
self.outbound_nat_rules = None
self.load_balancing_rules = None
self.private_ip_address = private_ip_address
self.private_ip_allocation_method = private_ip_allocation_method
self.subnet = subnet
self.public_ip_address = public_ip_address
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
self.zones = zones
| lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2018_01_01/models/frontend_ip_configuration_py3.py | Python | mit | 4,846 |
# -*- coding: utf8 -*-
from decode import decode | memorycoin/asm2mmc | asm2mmc/__init__.py | Python | mit | 49 |
import sys, re
if sys.version_info < (3, 0):
import testcase
import modules.cssfuncs as funcs
else:
from . import testcase
from ..modules import cssfuncs as funcs
class TestFunctions(testcase.TestCase):
title = "CSS Functions"
def test_functions(self):
self.set_text( self.input() )
self.text_equals( self.input() )
self.compile()
self.find( re.escape(self.result()) )
self.decompile()
self.text_equals( self.input() )
def vars(self):
return """
/*
* @box-shadow = box-shadow(0 0 4px #ff0)
* @transition = transition(all 0.3s ease)
* @transform = transform(rotate(7.deg))
* @gradient1 = linear-gradient(#fff, #f00)
* @gradient2 = linear-gradient(to top, #fff, #f00)
* @gradient3 = linear-gradient(to bottom , #fff, #f00)
*/
"""
def input(self):
return self.vars()+"""
h1 {
@box-shadow;
@transform;
@transition;
@gradient1;
@gradient2;
@gradient3;
}
"""
def result(self):
return self.vars()+"""
h1 {
-webkit-box-shadow: 0 0 4px #ff0;
box-shadow: 0 0 4px #ff0;
-webkit-transform: rotate(7.deg);
-ms-transform: rotate(7.deg);
transform: rotate(7.deg);
-webkit-transition: all 0.3s ease;
transition: all 0.3s ease;
background-image: -webkit-linear-gradient(bottom, #fff, #f00);
background-image: linear-gradient(to top, #fff, #f00);
background-image: -webkit-linear-gradient(bottom, #fff, #f00);
background-image: linear-gradient(to top, #fff, #f00);
background-image: -webkit-linear-gradient(top, #fff, #f00);
background-image: linear-gradient(to bottom , #fff, #f00);
}
"""
| kizza/CSS-Less-ish | tests/testfuncs.py | Python | mit | 1,589 |
"""Special exceptions for the ``command_interface`` app."""
class CommandError(Exception):
pass
| bitmazk/django-command-interface | command_interface/exceptions.py | Python | mit | 102 |
from django.apps import AppConfig
class MailinglistsConfig(AppConfig):
name = 'apps.mailinglists'
verbose_name = 'Mailinglists'
| dotKom/onlineweb4 | apps/mailinglists/appconfig.py | Python | mit | 138 |
import os
import pandas as pd
import seaborn as sns
dataDir = '..\\Test_Data\\'
pilotMarkerDataFile = 'Pilot.csv'
df = pd.read_csv( dataDir + '\\' + pilotMarkerDataFile,sep='\t', engine='python')
repr(df.head())
# TODO times per position
# plotting a heatmap http://stanford.edu/~mwaskom/software/seaborn/examples/many_pairwise_correlations.html
## Generate a custom diverging colormap
#cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
#sns.heatmap(timesAtPositions, mask=mask, cmap=cmap, vmax=.3,
# square=True, xticklabels=5, yticklabels=5,
# linewidths=.5, cbar_kws={"shrink": .5}, ax=ax) | xfleckx/BeMoBI_Tools | analytics/BeMoBI_PyAnalytics/BeMoBI_PyAnalytics.py | Python | mit | 682 |
from django.http import Http404
from rest_framework import generics, permissions
from rest_framework.reverse import reverse
from rest_framework.response import Response
from rest_framework.views import APIView
from core.utils import filter_files_by_n_slashes
from .serializers import (FileBrowserPathListSerializer, FileBrowserPathSerializer,
FileBrowserPathFileSerializer)
from .services import get_path_folders, get_path_file_queryset, get_path_file_model_class
class FileBrowserPathList(generics.ListAPIView):
"""
A view for the initial page of the collection of file browser paths. The returned
collection only has a single element.
"""
http_method_names = ['get']
serializer_class = FileBrowserPathListSerializer
permission_classes = (permissions.IsAuthenticated,)
def list(self, request, *args, **kwargs):
"""
Overriden to append a query list to the response.
"""
response = super(FileBrowserPathList, self).list(request, *args, **kwargs)
# append query list
query_url = reverse('filebrowserpath-list-query-search', request=request)
data = [{'name': 'path', 'value': ''}]
queries = [{'href': query_url, 'rel': 'search', 'data': data}]
response.data['queries'] = queries
return response
def get_queryset(self):
"""
Overriden to return a custom queryset that is only comprised by the initial
path (empty path).
"""
username = self.request.user.username
objects = [{'path': '', 'subfolders': f'SERVICES,{username}'}]
return self.filter_queryset(objects)
class FileBrowserPathListQuerySearch(generics.ListAPIView):
"""
A view for the collection of file browser paths resulting from a query search.
The returned collection only has at most one element.
"""
http_method_names = ['get']
permission_classes = (permissions.IsAuthenticated,)
def get_queryset(self):
"""
Overriden to return a custom queryset.
"""
username = self.request.user.username
path = self.request.GET.get('path', '')
if not path:
objects = [{'path': '', 'subfolders': f'SERVICES,{username}'}]
else:
path = path.strip('/')
try:
subfolders = get_path_folders(path, username)
except ValueError:
objects = []
else:
objects = [{'path': path, 'subfolders': ','.join(subfolders)}]
return self.filter_queryset(objects)
def get_serializer_class(self, *args, **kwargs):
"""
Overriden to return the serializer class that should be used for serializing
output.
"""
path = self.request.GET.get('path', '')
if not path:
return FileBrowserPathListSerializer
self.kwargs['path'] = path.strip('/')
return FileBrowserPathSerializer
class FileBrowserPath(APIView):
"""
A file browser path view.
"""
http_method_names = ['get']
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, *args, **kwargs):
"""
Overriden to be able to make a GET request to an actual file resource.
"""
username = request.user.username
path = kwargs.get('path')
try:
subfolders = get_path_folders(path, username)
except ValueError:
raise Http404('Not found.')
object = {'path': path, 'subfolders': ','.join(subfolders)}
serializer = self.get_serializer(object)
return Response(serializer.data)
def get_serializer(self, *args, **kwargs):
"""
Return the serializer instance that should be used for serializing output.
"""
kwargs.setdefault('context', self.get_serializer_context())
return FileBrowserPathSerializer(*args, **kwargs)
def get_serializer_context(self):
"""
Extra context provided to the serializer class.
"""
return {'request': self.request, 'view': self}
class FileBrowserPathFileList(generics.ListAPIView):
"""
A view for the collection of a file browser path's files.
"""
http_method_names = ['get']
permission_classes = (permissions.IsAuthenticated, )
def get_queryset(self):
"""
Overriden to return a custom queryset.
"""
username = self.request.user.username
path = self.kwargs.get('path')
try:
qs = get_path_file_queryset(path, username)
except ValueError:
raise Http404('Not found.')
n_slashes = path.count('/') + 1
return filter_files_by_n_slashes(qs, str(n_slashes))
def get_serializer_class(self):
"""
Overriden to return the serializer class that should be used for serializing
output.
"""
username = self.request.user.username
path = self.kwargs.get('path')
model_class = get_path_file_model_class(path, username)
FileBrowserPathFileSerializer.Meta.model = model_class
return FileBrowserPathFileSerializer
| FNNDSC/ChRIS_ultron_backEnd | chris_backend/filebrowser/views.py | Python | mit | 5,178 |
# parsing the dump to get all the keys for the current players
import json
dic={}
with open('currentPlayerDump.json','r') as f:
data=json.load(f)
print data["resultSets"][0]["headers"]
print len(data["resultSets"][0]["rowSet"])
for obj in data["resultSets"][0]["rowSet"]:
if obj[0] not in dic:
dic[obj[0]]=obj[1]
with open('playerKey','w') as f1:
for key in dic:
f1.write(str(key)+" : "+ str(dic[key])+"\n") | bam2332g/proj1part3 | rahulCode_redo/project1Part3/nba/parseCurrentPlayers.py | Python | mit | 422 |
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
super().finalize_options()
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
requires = [
'blessings >= 1.6, < 2.0',
'sqlalchemy >= 1.3, < 2.0',
'PyYAML >= 5.1, < 6.0',
'python-dateutil >= 2.8, <3.0',
'click >= 6.7, <7.0',
'czech-holidays',
'python-slugify',
]
tests_require = ['pytest']
if sys.version_info < (3, 4):
# pathlib is in the stdlib since Python 3.4
requires.append('pathlib >= 1.0.1, < 2.0')
setup_args = dict(
name='pyvodb',
version='1.0',
packages=find_packages(),
url='https://github.com/pyvec/pyvodb',
description="""Database of Pyvo meetups""",
author='Petr Viktorin',
author_email='encukou@gmail.com',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
install_requires=requires,
tests_require=tests_require,
cmdclass={'test': PyTest},
entry_points={
'console_scripts': [
'pyvo=pyvodb.cli:main',
],
},
)
if __name__ == '__main__':
setup(**setup_args)
| pyvec/pyvodb | setup.py | Python | mit | 1,702 |
from contextlib import contextmanager
from fnmatch import fnmatch
import stat
from pyrsistent import pset
import attr
from filesystems import Path, exceptions
def _realpath(fs, path, seen=pset()):
"""
.. warning::
The ``os.path`` module's realpath does not error or warn about
loops, but we do, following the behavior of GNU ``realpath(1)``!
"""
real = Path.root()
for segment in path.segments:
current = real / segment
seen = seen.add(current)
while True:
try:
current = fs.readlink(current)
except (exceptions.FileNotFound, exceptions.NotASymlink):
break
else:
current = current.relative_to(real)
if current in seen:
raise exceptions.SymbolicLoop(path)
current = fs.realpath(current, seen=seen)
real = current
return real
def _recursive_remove(fs, path):
"""
A recursive, non-atomic directory removal.
"""
if not fs.is_link(path=path) and fs.is_dir(path=path):
for child in fs.children(path=path):
_recursive_remove(fs=fs, path=child)
fs.remove_empty_directory(path=path)
else:
fs.remove_file(path=path)
def create(
name,
create_file,
open_file,
remove_file,
create_directory,
list_directory,
remove_empty_directory,
temporary_directory,
stat,
lstat,
link,
readlink,
realpath=_realpath,
remove=_recursive_remove,
):
"""
Create a new kind of filesystem.
"""
def _create_directory(fs, path, with_parents=False, allow_existing=False):
create_directory(
fs,
path,
with_parents=with_parents,
allow_existing=allow_existing,
)
return path
methods = dict(
create=create_file,
open=lambda fs, path, mode="r": open_file(
fs=fs, path=path, mode=mode,
),
remove_file=remove_file,
create_directory=_create_directory,
list_directory=list_directory,
remove_empty_directory=remove_empty_directory,
temporary_directory=temporary_directory,
get_contents=lambda fs, path, mode="": _get_contents(
fs=fs, path=path, mode=mode,
),
set_contents=lambda fs, path, contents, mode="": _set_contents(
fs=fs, path=path, contents=contents, mode=mode,
),
create_with_contents=_create_with_contents,
remove=remove,
removing=_removing,
stat=stat,
lstat=lstat,
link=link,
readlink=readlink,
realpath=realpath,
exists=_exists,
is_dir=_is_dir,
is_file=_is_file,
is_link=_is_link,
touch=_touch,
children=_children,
glob_children=_glob_children,
)
return attr.s(hash=True)(type(name, (object,), methods))
@contextmanager
def _removing(fs, path):
try:
yield path
finally:
fs.remove(path=path)
def _get_contents(fs, path, mode):
with fs.open(path=path, mode="r" + mode) as file:
return file.read()
def _set_contents(fs, path, contents, mode):
with fs.open(path=path, mode="w" + mode) as file:
file.write(contents)
def _create_with_contents(fs, path, contents):
with fs.create(path=path) as file:
file.write(contents)
def _children(fs, path):
return pset(path / p for p in fs.list_directory(path=path))
def _glob_children(fs, path, glob):
return pset(
path / p
for p in fs.list_directory(path=path)
if fnmatch(p, glob)
)
def _touch(fs, path):
fs.open(path=path, mode="wb").close()
def _open_and_read(fs, path):
with fs.open(path=path) as file:
return file.read()
def _exists(fs, path):
"""
Check that the given path exists on the filesystem.
Note that unlike `os.path.exists`, we *do* propagate file system errors
other than a non-existent path or non-existent directory component.
E.g., should EPERM or ELOOP be raised, an exception will bubble up.
"""
try:
fs.stat(path)
except (exceptions.FileNotFound, exceptions.NotADirectory):
return False
return True
def _is_dir(fs, path):
"""
Check that the given path is a directory.
Note that unlike `os.path.isdir`, we *do* propagate file system errors
other than a non-existent path or non-existent directory component.
E.g., should EPERM or ELOOP be raised, an exception will bubble up.
"""
try:
return stat.S_ISDIR(fs.stat(path).st_mode)
except exceptions.FileNotFound:
return False
def _is_file(fs, path):
"""
Check that the given path is a file.
Note that unlike `os.path.isfile`, we *do* propagate file system errors
other than a non-existent path or non-existent directory component.
E.g., should EPERM or ELOOP be raised, an exception will bubble up.
"""
try:
return stat.S_ISREG(fs.stat(path).st_mode)
except exceptions.FileNotFound:
return False
def _is_link(fs, path):
"""
Check that the given path is a symbolic link.
Note that unlike `os.path.islink`, we *do* propagate file system errors
other than a non-existent path or non-existent directory component.
E.g., should EPERM or ELOOP be raised, an exception will bubble up.
"""
try:
return stat.S_ISLNK(fs.lstat(path).st_mode)
except exceptions.FileNotFound:
return False
@attr.s(frozen=True)
class _FileMode(object):
activity = attr.ib(default="r")
mode = attr.ib(default='', converter=lambda x: x if x != "" else "t")
read = attr.ib()
write = attr.ib()
append = attr.ib()
text = attr.ib()
binary = attr.ib()
@read.default
def read_default(self):
return self.activity == "r"
@write.default
def write_default(self):
return self.activity == "w"
@append.default
def append_default(self):
return self.activity == "a"
@text.default
def text_default(self):
return self.mode == "t"
@binary.default
def binary_default(self):
return self.mode == "b"
@activity.validator
def activity_validator(self, attribute, value):
options = ("r", "w", "a")
if value not in options:
raise exceptions.InvalidMode(
"Mode must start with one of {} but found {}".format(
repr(options),
repr(value),
)
)
@mode.validator
def _(self, attribute, value):
options = ("b", "t")
if value not in options:
raise exceptions.InvalidMode(
"Mode must start with one of {} but found {}".format(
repr(options),
repr(value),
)
)
def io_open_string(self):
return self.activity + self.mode
def _parse_mode(mode):
parameters = {}
first = mode[:1]
rest = mode[1:]
if len(first) > 0:
parameters["activity"] = first
if len(rest) > 0:
parameters["mode"] = rest
return _FileMode(**parameters)
| Julian/Filesystems | filesystems/common.py | Python | mit | 7,276 |
# -*- coding: UTF-8 -*-
"""
Домашнее задание по уроку 2-2
«Работа с разными форматами данных»
Выполнил Мартысюк Илья PY-3
"""
import re
import glob
import chardet
from os.path import join
from xml.etree.cElementTree import XMLParser, parse
def open_data_file(path):
with open(path, 'rb') as encoding_detect_file:
file_text = encoding_detect_file.read()
encoding = chardet.detect(file_text)['encoding']
parser = XMLParser(encoding=encoding)
tree = parse(path, parser=parser)
root = tree.getroot()
return root
def compile_data(root):
long_dict = dict()
for i in root.iter('description'):
clean_re = re.compile(r'<.*?>|[^\w\s]+|[\d]+|[a-z]+|[A-Z]+|[\n]')
clean_text = clean_re.sub('', i.text)
temp_list = clean_text.strip().split(' ')
for t in temp_list:
if len(t) > 6:
try:
long_dict[t] += 1
except KeyError:
long_dict.update({t: 1})
long_dict = sorted(long_dict.items(), key=lambda x: x[1], reverse=True)
print(long_dict)
return long_dict
def print_result(long_dict):
print('ТОП 10 самых часто встречающихся слов:')
for i in range(10):
print('{}) Слово "{}" встречается {} раз'.format(i+1, long_dict[i][0], long_dict[i][1]))
path = 'lesson2-2'
files = glob.glob(join(path, '*.xml'))
for file in files:
print('\nОбработка файла {}'.format(file))
print_result(compile_data(open_data_file(file)))
| martysyuk/PY-3-Learning | homeworks/lesson2-2.py | Python | mit | 1,637 |
from sqlalchemy import inspection, event, Column, Integer, ForeignKey
from sqlalchemy.orm import session, query
from sqlalchemy.sql import expression
from sqlalchemy.ext.declarative import declared_attr
import sqlalchemy
__all__ = [
'Base',
'TenantSession',
'TenantConflict',
'UnboundTenantError'
]
SQLA_VERSION_8 = sqlalchemy.__version__.startswith('0.8')
class UnboundTenantError(Exception):
pass
class TenantConflict(Exception):
pass
class Base(object):
__multitenant__ = True
__plural_tablename__ = None
@classmethod
def tenant_class(cls, tenant_cls):
cls._tenant_cls = tenant_cls
event.listen(tenant_cls, 'after_insert', after_tenant_insert)
event.listen(tenant_cls, 'before_delete', before_tenant_delete)
return tenant_cls
@declared_attr
def tenant_id(cls):
if not cls.__multitenant__:
return None
return Column(
Integer, ForeignKey("%s.id" % cls._tenant_cls.__tablename__),
index=True)
# abandoning this for now as it causes unexpected SQLAlchemy error
#@declared_attr
#def tenant(cls):
#if not cls.__multitenant__:
#return None
#return relationship(
#cls._tenant_cls, primaryjoin=(cls.tenant_id ==
#cls._tenant_cls.id),
#backref=cls._tenant_cls.__tablename__)
def after_tenant_insert(mapper, connection, target):
# create user
# create views
# revoke all on user
pass
def before_tenant_delete(mapper, connection, target):
# backup data?
# drop views
# drop user
# drop data
pass
class TenantSession(session.Session):
def __init__(self, query_cls=None, *args, **kwargs):
self.tenant = None
query_cls = query_cls or TenantQuery
super(TenantSession, self).__init__(
query_cls=query_cls, *args, **kwargs)
def query(self, *args, **kwargs):
kwargs.setdefault('safe', True)
return super(TenantSession, self).query(*args, **kwargs)
def add(self, instance, *args, **kwargs):
self.check_instance(instance)
instance.tenant_id = self.tenant.id
super(TenantSession, self).add(instance, *args, **kwargs)
def delete(self, instance, *args, **kwargs):
self.check_instance(instance)
super(TenantSession, self).delete(instance, *args, **kwargs)
def merge(self, instance, *args, **kwargs):
self.check_instance(instance)
super(TenantSession, self).merge(instance, *args, **kwargs)
def check_instance(self, instance):
if instance.__multitenant__ and self.tenant is None:
raise UnboundTenantError(
"Tried to do a tenant-safe operation in a tenantless context.")
if instance.__multitenant__ and instance.tenant_id is not None and \
instance.tenant_id != self.tenant.id:
raise TenantConflict((
"Tried to use a %r with tenant_id %r in a session with " +
"tenant_id %r") % (
type(instance), instance.tenant_id, self.tenant.id))
class TenantQuery(query.Query):
def __init__(self, *args, **kwargs):
self._safe = kwargs.pop('safe', True)
super(TenantQuery, self).__init__(*args, **kwargs)
@property
def _from_obj(self):
# we only do the multitenant processing on accessing the _from_obj /
# froms properties, rather than have a wrapper object, because it
# wasn't possible to implement the right magic methods and still have
# the wrapper object evaluate to the underlying sequence.
# This approach is fine because adding a given criterion is idempotent.
if getattr(self, '_from_obj_', None) is None:
self._from_obj_ = ()
for from_ in self._from_obj_:
_process_from(from_, self)
return self._from_obj_
@_from_obj.setter
def _from_obj(self, value):
self._from_obj_ = value
def _join_to_left(self, *args, **kwargs):
right = args[1 if SQLA_VERSION_8 else 2]
super(TenantQuery, self)._join_to_left(*args, **kwargs)
_process_from(inspection.inspect(right).selectable, self)
class TenantQueryContext(query.QueryContext):
@property
def froms(self):
if getattr(self, '_froms', None) is None:
self._froms = []
for from_ in self._froms:
_process_from(from_, self.query, self)
return self._froms
@froms.setter
def froms(self, value):
self._froms = value
# monkey patch to avoid needing changes to SQLAlchemy
query.QueryContext = TenantQueryContext
def _process_from(from_, query, query_context=None):
if not getattr(query, '_safe', None):
return
tenant_id_col = from_.c.get('tenant_id')
if tenant_id_col is not None:
if query.session.tenant is None:
raise UnboundTenantError(
"Tried to do a tenant-bound query in a tenantless context.")
# logic copied from orm.Query.filter, in order to be able to modify
# the existing query in place
criterion = expression._literal_as_text(
tenant_id_col == query.session.tenant.id)
criterion = query._adapt_clause(criterion, True, True)
if query_context is None:
if query._criterion is not None:
query._criterion = query._criterion & criterion
else:
query._criterion = criterion
else:
if query_context.whereclause is not None:
query_context.whereclause = (
query_context.whereclause & criterion)
else:
query_context.whereclause = criterion
| mwhite/MultiAlchemy | multialchemy/base.py | Python | mit | 5,831 |
# -*- coding: utf-8 -*-
import unittest
from cwr.parser.encoder.dictionary import PublisherForWriterDictionaryEncoder
from cwr.interested_party import PublisherForWriterRecord
"""
Publisher for Writer record to dictionary encoding tests.
The following cases are tested:
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class TestPublisherForWriterRecordDictionaryEncoding(unittest.TestCase):
def setUp(self):
self._encoder = PublisherForWriterDictionaryEncoder()
def test_encoded(self):
data = PublisherForWriterRecord(record_type='SPU',
transaction_sequence_n=3,
record_sequence_n=15,
publisher_ip_n='111',
writer_ip_n='222',
submitter_agreement_n='333',
society_assigned_agreement_n='444')
encoded = self._encoder.encode(data)
self.assertEqual('SPU', encoded['record_type'])
self.assertEqual(3, encoded['transaction_sequence_n'])
self.assertEqual(15, encoded['record_sequence_n'])
self.assertEqual('111', encoded['publisher_ip_n'])
self.assertEqual('222', encoded['writer_ip_n'])
self.assertEqual('333', encoded['submitter_agreement_n'])
self.assertEqual('444', encoded['society_assigned_agreement_n'])
| weso/CWR-DataApi | tests/parser/dictionary/encoder/record/test_publisher_for_writer.py | Python | mit | 1,487 |
# !/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
ERP+
"""
__author__ = 'António Anacleto'
__credits__ = []
__version__ = "1.0"
__maintainer__ = "António Anacleto"
__status__ = "Development"
__model_name__ = 'balancete.Balancete'
import auth, base_models
from orm import *
from form import *
try:
from my_ano_fiscal import AnoFiscal
except:
from ano_fiscal import AnoFiscal
try:
from my_periodo import Periodo
except:
from periodo import Periodo
class Balancete(Model, View):
def __init__(self, **kargs):
Model.__init__(self, **kargs)
self.__name__ = 'balancete'
self.__title__ = 'Balancete'
self.__model_name__ = __model_name__
self.__list_edit_mode__ = 'edit'
self.__db_mode__ = 'None'# se o db_mode for none abre directo no edit em vez da lista
self.__workflow__ = (
'estado', {'Rascunho':['Imprimir', 'Exportar']}
)
self.__workflow_auth__ = {
'Imprimir':['Contabilista'],
'Exportar':['Contabilista'],
'full_access':['Gestor']
}
self.__auth__ = {
'read':['All'],
'write':['Contabilista'],
'create':['Contabilista'],
'delete':['Contabilista'],
'full_access':['Gestor']
}
self.data_inicial = date_field(view_order=1, name ='Data Inicial')
self.data_final = date_field(view_order=2, name ='Data Final', default=datetime.date.today())
self.nivel = combo_field(view_order=5, name ='Nivel', options=[('lancamento','Lançamento'), ('razao','Razão'), ('agrupadoras','Agrupadoras')])
# ano fiscal e periodos deverá ser lista idealmente seria um multiselect
#self.ano_fiscal = choice_field(view_order=3, name ='Ano Fiscal', model='ano_fiscal', column='nome', options='model.get_ano_fiscal()')
#self.periodo = choice_field(view_order=4, name ='Periodo', model='periodo', column='nome', options='model.get_periodo()')
#self.saldos_do_periodo = boolean_field(view_order=6, name ='Saldos do Periodo?')
#self.Inclui_passado = boolean_field(view_order=6, name ='Inclui Saldos Anteriores?')
self.estado = info_field(view_order=7, name ='Estado', hidden=True, nolabel=True, default='Rascunho')
def get_ano_fiscal(self):
return AnoFiscal().get_options()
def get_periodo(self):
return Periodo().get_options()
def prepare_data(self):
#print('prepare data do balancete')
nivel = bottle.request.forms.get('nivel')
#print(nivel)
#depois implementar os restantes filtros
#ano_fiscal =
#periodo =
#saldos_do_periodo =
data_final = bottle.request.forms.get('data_final')
#print(data_final)
data_inicial = bottle.request.forms.get('data_inicial')
#print(data_inicial)
data_where = """and m.data <= '{data_final}'""".format(data_final=data_final)
#print(data_where)
if data_inicial:
data_where += """and m.data >= '{data_inicial}'""".format(data_inicial=data_inicial)
#print(data_where)
sql = """
select
pc.id,
pc.codigo,
pc.nome as conta,
pc.ascendente,
coalesce(sum(lm.debito),0.00) as debito,
coalesce(sum(lm.credito),0.00) as credito,
coalesce(sum(lm.debito),0.00) - coalesce(sum(lm.credito),0.00) as saldo
from plano_contas pc
join linha_movimento lm
on lm.conta = pc.id
join movimento m
on lm.movimento = m.id
where (pc.active = True or pc.active is null)
and (m.active = True or m.active is null)
and (lm.active = True or lm.active is null)
{data_where}
group by pc.id, pc.codigo, pc.nome, pc.ascendente
order by pc.codigo
""".format(data_where=data_where)
#print(sql)
db_lines = run_sql(sql)
#print(db_lines)
try:
from my_plano_contas import PlanoContas
except:
from plano_contas import PlanoContas
all_contas = PlanoContas().get(order_by='codigo')
contas = {}
for conta in all_contas:
contas[conta['id']] = conta
#descendentes = {}
#for conta in all_contas:
# if conta['ascendente'] in descendentes:
# descendentes[conta['ascendente']].append(conta['id'])
# else:
# descendentes[conta['ascendente']] = [conta['id']]
#print(contas)
movimentos = {}
for line in db_lines:
#print(line)
movimentos[line['id']] = {'codigo':line['codigo'], 'conta':line['conta'], 'ascendente':line['ascendente'], 'debito':line['debito'], 'credito':line['credito'], 'saldo':line['saldo'], 'somado':False}
#print(movimentos)
for x in range(30):#abordagem feia e deselegante
linhas = movimentos.copy()
for m in linhas:
movimento = linhas[m]
if not movimento['somado']:
movimento['somado'] = True
if movimento['ascendente']:
if movimento['ascendente'] in movimentos:
movimentos[movimento['ascendente']]['debito'] += movimento['debito']
movimentos[movimento['ascendente']]['credito'] += movimento['credito']
movimentos[movimento['ascendente']]['saldo'] += movimento['saldo']
else:
movimentos[movimento['ascendente']] = {'codigo': contas[movimento['ascendente']]['codigo'], 'conta': contas[movimento['ascendente']]['nome'], 'ascendente': contas[movimento['ascendente']]['ascendente'], 'debito': movimento['debito'], 'credito': movimento['credito'], 'saldo': movimento['saldo'], 'somado':False}
#print(movimentos)
lines = []
for line in all_contas:
#print(line['id'])
#print(movimentos[line['id']])
if line['id'] in movimentos:
#print('im in movimentos')
rec_line = {}
rec_line['codigo'] = movimentos[line['id']]['codigo']
rec_line['conta'] = movimentos[line['id']]['conta'].replace(';', ',')
rec_line['debito'] = format_number(movimentos[line['id']]['debito'])
rec_line['credito'] = format_number(movimentos[line['id']]['credito'])
rec_line['saldo'] = format_number(movimentos[line['id']]['saldo'])
lines.append(rec_line)
#else:
#print('im not in movimentos')
#rec_line = {}
#rec_line['codigo'] = line['codigo']
#rec_line['conta'] = line['nome'].replace(';', ',')
#rec_line['debito'] = format_number(0)
#rec_line['credito'] = format_number(0)
#rec_line['saldo'] = format_number(0)
#lines.append(rec_line)
#print(lines)
record = {}
record['lines'] = lines
record['data_inicial'] = data_inicial
record['data_final'] = data_final
record['periodos'] = ''#periodos
record['anos_fiscais'] = ''#anos_fiscais
record['nivel'] = ''#nivel
return record
def Imprimir(self, key, window_id):
#print('estou no imprimir do balancete')
template = 'balancete'
record = self.prepare_data()
return Report(record=record, report_template=template).show()
def Exportar(self, key, window_id):
#print('estou na função de Exportar no balancete')
result = self.prepare_data()['lines']
#print('result: ', result)
return data_to_csv(data=result, model=self, text='Gravar', cols=['codigo', 'conta', 'debito', 'credito', 'saldo'])
| IdeaSolutionsOnline/ERP4R | core/objs/balancete.py | Python | mit | 7,841 |
import sublime, sublime_plugin
import os
import subprocess
import threading
class OpenGitbashHere(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
file_path = view.file_name()
dirname = os.path.dirname(file_path)
th = BashTerminalThread(dirname)
th.start()
def enabled(self):
return True if self.view.file_name() else False
class BashTerminalThread(threading.Thread):
def __init__(self, dirname):
self.dirname = dirname
threading.Thread.__init__(self)
def run(self):
if self.dirname:
fpc = "--cd={0}".format(self.dirname)
subprocess.call([r"C:\Program Files\Git\git-bash.exe", fpc])
| zeffii/sublimetext_productivity | Packages/User/open_gitbash_here.py | Python | mit | 719 |
import _plotly_utils.basevalidators
class PatternValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(self, plotly_name="pattern", parent_name="volume.surface", **kwargs):
super(PatternValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
extras=kwargs.pop("extras", ["all", "odd", "even"]),
flags=kwargs.pop("flags", ["A", "B", "C", "D", "E"]),
role=kwargs.pop("role", "style"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/volume/surface/_pattern.py | Python | mit | 582 |
import sys, os
import tweepy
# File with colon-separaten consumer/access token and secret
consumer_file='twitter.consumer'
access_file='twitter.access'
def __load_auth(file):
if os.path.exists(file):
with open(file) as f:
tokens = f.readline().replace('\n','').replace('\r','').split(':')
if len(tokens) == 2:
return tokens[0],tokens[1]
else:
raise ValueError("Expecting two colon-separated tokens")
else:
raise IOError("File not found: %s" % file)
def twit(message, secret_dir='/secret'):
#
# Load the twitter consumer and access tokens and secrets
consumer_token, consumer_secret = __load_auth(os.path.join(secret_dir, consumer_file))
access_token, access_secret = __load_auth(os.path.join(secret_dir, access_file))
#
# Perform OAuth authentication
auth = tweepy.OAuthHandler(consumer_token, consumer_secret)
auth.set_access_token(access_token, access_secret)
#
# Create the API and post the status update
try:
api = tweepy.API(auth)
api.update_status(message)
except tweepy.error.TweepError, e:
print "Failed to post status update"
print "Error: %s" % str(e)
print "Using:"
print " consumer[%s][%s]" % (consumer_token, consumer_secret)
print " access[%s][%s]" % (access_token, access_secret)
if __name__ == '__main__':
tokens = sys.argv[1:]
#
twit(' '.join(tokens))
| marc0uk/twit | twit.py | Python | mit | 1,468 |
# Python 3 program for soundscape generation. (C) P.B.L. Meijer 2015
# Direct port of the hificode.c C program
# Last update: October 6, 2015; released under the Creative
# Commons Attribution 4.0 International License (CC BY 4.0),
# see http://www.seeingwithsound.com/im2sound.htm for details
#
# Beware that this program runs excruciatingly slowly under Python,
# while the PyPy python JIT compiler does not (yet) support OpenCV
import math
import os
import struct
import sys
import wave
import cv2 as cv
import numpy as np
file_name = 'hificode.wav' # User-defined parameters
min_frequency = 500 # Lowest frequency (Hz) in soundscape
max_frequency = 5000 # Highest frequency (Hz)
sample_frequency = 44100 # Sample frequency (Hz)
image_to_sound_conversion_time = 1.05 # Image to sound conversion time (s)
use_exponential = False # Linear|Exponential=0|1 distribution
hifi = 1 # 8-bit|16-bit=0|1 sound quality
stereo = 1 # Mono|Stereo=0|1 sound selection
delay = 1 # Nodelay|Delay=0|1 model (stereo=1)
relative_fade = 1 # Relative fade No|Yes=0|1 (stereo=1)
diffraction = 1 # Diffraction No|Yes=0|1 (stereo=1)
use_b_spline = 1 # Rectangular|B-spline=0|1 time window
gray_levels = 0 # 16|2-level=0|1 gray format in P[][]
use_camera = 1 # Use OpenCV camera input No|Yes=0|1
use_screen = 1 # Screen view for debugging No|Yes=0|1
class Soundscape(object):
IR = 0
IA = 9301
IC = 49297
IM = 233280
TwoPi = 6.283185307179586476925287
WHITE = 1.00
BLACK = 0.00
def __init__(self, file_name='hificode.wav', min_frequency=500, max_frequency=5000, sample_frequency=44100,
image_to_sound_conversion_time=1.05, is_exponential=False, hifi=True, stereo=True, delay=True,
relative_fade=True, diffraction=True, use_b_spline=True, gray_levels=16, use_camera=True,
use_screen=True):
"""
:param file_name:
:type file_name: str
:param min_frequency:
:type min_frequency: int
:param max_frequency:
:type max_frequency: int
:param sample_frequency:
:type sample_frequency: int
:param image_to_sound_conversion_time:
:type image_to_sound_conversion_time: float
:param is_exponential:
:type is_exponential: bool
:param hifi:
:type hifi: bool
:param stereo:
:type stereo: bool
:param delay:
:type delay: bool
:param relative_fade:
:type relative_fade: bool
:param diffraction:
:type diffraction: bool
:param use_b_spline:
:type use_b_spline: bool
:param gray_levels:
:type gray_levels: int
:param use_camera:
:type use_camera: bool
:param use_screen:
:type use_screen: bool
:return:
:rtype:
"""
self.file_name = file_name
self.min_frequency = min_frequency
self.max_frequency = max_frequency
self.sample_frequency = sample_frequency
self.image_to_sound_conversion_time = image_to_sound_conversion_time
self.is_exponential = is_exponential
self.hifi = hifi
self.stereo = stereo
self.delay = delay
self.relative_fade = relative_fade
self.diffraction = diffraction
self.use_b_spline = use_b_spline
self.gray_levels = gray_levels
self.use_camera = use_camera
self.use_screen = use_screen
self.hist = (1 + self.hifi) * (1 + self.stereo)
if use_camera:
self.num_columns = 176
self.num_rows = 64
else:
self.num_columns = 64
self.num_rows = 64
self.k = 0
self.b = 0
self.num_frames = 2 * int(0.5 * self.sample_frequency * self.image_to_sound_conversion_time)
self.frames_per_column = int(self.num_frames / self.num_columns)
self.sso = 0 if self.hifi else 128
self.ssm = 32768 if self.hifi else 128
self.scale = 0.5 / math.sqrt(self.num_rows)
self.dt = 1.0 / self.sample_frequency
self.v = 340.0 # v = speed of sound (m/s)
self.hs = 0.20 # hs = characteristic acoustical size of head (m)
self.w = np.arange(self.num_rows, dtype=np.float)
self.phi0 = np.zeros(self.num_rows, dtype=np.float)
self.A = np.zeros((self.num_columns, self.num_rows), dtype=np.uint8)
# Coefficients used in rnd()
IR = 0
IA = 9301
IC = 49297
IM = 233280
TwoPi = 6.283185307179586476925287
HIST = (1 + hifi) * (1 + stereo)
WHITE = 1.00
BLACK = 0.00
if use_camera:
num_columns = 176
num_rows = 64
else:
num_columns = 64
num_rows = 64
# if gray_levels:
# else:
try:
# noinspection PyUnresolvedReferences
import winsound
except ImportError:
def playsound(frequency, duration):
# sudo dnf -y install beep
os.system('beep -f %s -l %s' % (frequency, duration))
else:
def playsound(frequency, duration):
winsound.Beep(frequency, duration)
# def playSound(file):
# if sys.platform == "win32":
# winsound.PlaySound(file, winsound.SND_FILENAME) # Windows only
# # os.system('start %s' %file) # Windows only
# elif sys.platform.startswith('linux'):
# print("No audio player called for Linux")
# else:
# print("No audio player called for your platform")
def wi(file_object, i):
b0 = int(i % 256)
b1 = int((i - b0) / 256)
file_object.write(struct.pack('B', b0 & 0xff))
file_object.write(struct.pack('B', b1 & 0xff))
def wl(fp, l):
i0 = l % 65536
i1 = (l - i0) / 65536
wi(fp, i0)
wi(fp, i1)
def rnd():
global IR, IA, IC, IM
IR = (IR * IA + IC) % IM
return IR / (1.0 * IM)
def main():
current_frame = 0
b = 0
num_frames = 2 * int(0.5 * sample_frequency * image_to_sound_conversion_time)
frames_per_column = int(num_frames / num_columns)
sso = 0 if hifi else 128
ssm = 32768 if hifi else 128
scale = 0.5 / math.sqrt(num_rows)
dt = 1.0 / sample_frequency
v = 340.0 # v = speed of sound (m/s)
hs = 0.20 # hs = characteristic acoustical size of head (m)
w = np.arange(num_rows, dtype=np.float)
phi0 = np.zeros(num_rows)
A = np.zeros((num_columns, num_rows), dtype=np.uint8)
# w = [0 for i in range(num_rows)]
# phi0 = [0 for i in range(num_rows)]
# A = [[0 for j in range(num_columns)] for i in range(num_rows)] # num_rows x num_columns pixel matrix
# Set lin|exp (0|1) frequency distribution and random initial phase
freq_ratio = max_frequency / float(min_frequency)
if use_exponential:
w = TwoPi * min_frequency * np.power(freq_ratio, w / (num_rows - 1))
for i in range(0, num_rows):
w[i] = TwoPi * min_frequency * pow(freq_ratio, 1.0 * i / (num_rows - 1))
else:
for i in range(0, num_rows):
w[i] = TwoPi * min_frequency + TwoPi * (max_frequency - min_frequency) * i / (
num_rows - 1)
for i in range(0, num_rows): phi0[i] = TwoPi * rnd()
cam_id = 0 # First available OpenCV camera
# Optionally override ID from command line parameter: python hificode_OpenCV.py cam_id
if len(sys.argv) > 1:
cam_id = int(sys.argv[1])
try:
# noinspection PyArgumentList
cap = cv.VideoCapture(cam_id)
if not cap.isOpened():
raise ValueError('camera ID')
except ValueError:
print("Could not open camera", cam_id)
raise
# Setting standard capture size, may fail; resize later
cap.read() # Dummy read needed with some devices
# noinspection PyUnresolvedReferences
cap.set(cv.CAP_PROP_FRAME_WIDTH, 176)
# noinspection PyUnresolvedReferences
cap.set(cv.CAP_PROP_FRAME_HEIGHT, 144)
if use_screen: # Screen views only for debugging
cv.namedWindow('Large', cv.WINDOW_AUTOSIZE)
cv.namedWindow('Small', cv.WINDOW_AUTOSIZE)
key = 0
while key != 27: # Escape key
ret, frame = cap.read()
if not ret:
# Sometimes initial frames fail
print("Capture failed\n")
key = cv.waitKey(100)
continue
tmp = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
if frame.shape[1] != num_rows or frame.shape[0] != num_columns:
# cv.resize(tmp, gray, Size(num_columns,num_rows))
gray = cv.resize(tmp, (num_columns, num_rows), interpolation=cv.INTER_AREA)
else:
gray = tmp
if use_screen: # Screen views only for debugging
cv.imwrite("hificodeLarge.jpg", frame)
cv.imshow('Large', frame)
cv.moveWindow('Large', 20, 20)
cv.imwrite("hificodeSmall.jpg", gray)
cv.imshow('Small', gray)
cv.moveWindow('Small', 220, 20)
key = cv.waitKey(10)
if use_camera: # Set live camera image
mVal = gray / 16
A[mVal == 0] = 0
A[mVal > 0] = np.power(10.0, (mVal[mVal > 0] - 15) / 10.0)
# Write 8/16-bit mono/stereo .wav file
with open(file_name, 'wb') as nf:
fp = wave.open(nf)
fp.setnchannels(2 if stereo else 1)
fp.setframerate(sample_frequency)
fp.setsampwidth(2 if hifi else 1)
tau1 = 0.5 / w[num_rows - 1]
tau2 = 0.25 * (tau1 * tau1)
y = yl = yr = z = zl = zr = 0.0
while current_frame < num_frames and not stereo:
if use_b_spline:
q = 1.0 * (current_frame % frames_per_column) / (frames_per_column - 1)
q2 = 0.5 * q * q
j = int(current_frame / frames_per_column)
j = num_columns - 1 if j > num_columns - 1 else j
s = 0.0
t = current_frame * dt
if current_frame < num_frames / (5 * num_columns):
s = (2.0 * rnd() - 1.0) / scale # "click"
else:
for i in range(0, num_rows):
if use_b_spline: # Quadratic B-spline for smooth C1 time window
if j == 0:
a = (1.0 - q2) * A[i][j] + q2 * A[i][j + 1]
elif j == num_columns - 1:
a = (q2 - q + 0.5) * A[i][j - 1] + (0.5 + q - q2) * A[i][j]
else:
a = (q2 - q + 0.5) * A[i][j - 1] + (0.5 + q - q * q) * A[i][j] + q2 * A[i][j + 1]
else:
a = A[i][j] # Rectangular time window
s += a * math.sin(w[i] * t + phi0[i])
yp = y
y = tau1 / dt + tau2 / (dt * dt)
y = (s + y * yp + tau2 / dt * z) / (1.0 + y)
z = (y - yp) / dt
l = sso + 0.5 + scale * ssm * y # y = 2nd order filtered s
if l >= sso - 1 + ssm: l = sso - 1 + ssm
if l < sso - ssm: l = sso - ssm
ss = int(l) & 0xFFFFFFFF # Make unsigned int
if hifi:
wi(fp, ss)
else:
fp.write(struct.pack('B', ss & 0xff))
current_frame += 1
while current_frame < num_frames and stereo:
if use_b_spline:
q = 1.0 * (current_frame % frames_per_column) / (frames_per_column - 1)
q2 = 0.5 * q * q
j = int(current_frame / frames_per_column)
j = num_columns - 1 if j > num_columns - 1 else j
r = 1.0 * current_frame / (num_frames - 1) # Binaural attenuation/delay parameter
theta = (r - 0.5) * TwoPi / 3
x = 0.5 * hs * (theta + math.sin(theta))
tl = tr = current_frame * dt
if delay:
tr += x / v # Time delay model
x = abs(x)
sl = sr = 0.0
hrtfl = hrtfr = 1.0
for i in range(0, num_rows):
if diffraction:
# First order frequency-dependent azimuth diffraction model
hrtf = 1.0 if (TwoPi * v / w[i] > x) else TwoPi * v / (x * w[i])
if theta < 0.0:
hrtfl = 1.0
hrtfr = hrtf
else:
hrtfl = hrtf
hrtfr = 1.0
if relative_fade:
# Simple frequency-independent relative fade model
hrtfl *= (1.0 - 0.7 * r)
hrtfr *= (0.3 + 0.7 * r)
if use_b_spline:
if j == 0:
a = (1.0 - q2) * A[i][j] + q2 * A[i][j + 1]
elif j == num_columns - 1:
a = (q2 - q + 0.5) * A[i][j - 1] + (0.5 + q - q2) * A[i][j]
else:
a = (q2 - q + 0.5) * A[i][j - 1] + (0.5 + q - q * q) * A[i][j] + q2 * A[i][j + 1]
else:
a = A[i][j]
sl += hrtfl * a * math.sin(w[i] * tl + phi0[i])
sr += hrtfr * a * math.sin(w[i] * tr + phi0[i])
sl = (2.0 * rnd() - 1.0) / scale if (current_frame < num_frames / (5 * num_columns)) else sl # Left "click"
if tl < 0.0: sl = 0.0;
if tr < 0.0: sr = 0.0;
ypl = yl
yl = tau1 / dt + tau2 / (dt * dt)
yl = (sl + yl * ypl + tau2 / dt * zl) / (1.0 + yl)
zl = (yl - ypl) / dt
ypr = yr
yr = tau1 / dt + tau2 / (dt * dt)
yr = (sr + yr * ypr + tau2 / dt * zr) / (1.0 + yr)
zr = (yr - ypr) / dt
l = sso + 0.5 + scale * ssm * yl
if l >= sso - 1 + ssm: l = sso - 1 + ssm
if l < sso - ssm: l = sso - ssm
ss = int(l) & 0xFFFFFFFF
# Left channel
if hifi:
wi(fp, ss)
else:
fp.write(struct.pack('B', ss & 0xff))
l = sso + 0.5 + scale * ssm * yr
if l >= sso - 1 + ssm: l = sso - 1 + ssm
if l < sso - ssm: l = sso - ssm
ss = int(l) & 0xFFFFFFFF
# Right channel
if hifi:
wi(fp, ss)
else:
fp.write(struct.pack('B', ss & 0xff))
current_frame += 1
fp.close()
playSound("hificode.wav") # Play the soundscape
current_frame = 0 # Reset sample count
cap.release()
cv.destroyAllWindows()
return 0
main()
| joshainglis/python-soundscape | soundscape.py | Python | mit | 14,885 |
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.utils import six
from djblets.util.decorators import augment_method_from
from djblets.webapi.decorators import (webapi_login_required,
webapi_response_errors,
webapi_request_fields)
from djblets.webapi.errors import (DOES_NOT_EXIST, NOT_LOGGED_IN,
PERMISSION_DENIED)
from reviewboard.reviews.models import Group
from reviewboard.webapi.base import WebAPIResource
from reviewboard.webapi.decorators import webapi_check_local_site
from reviewboard.webapi.errors import INVALID_USER
from reviewboard.webapi.resources import resources
from reviewboard.webapi.resources.user import UserResource
class ReviewGroupUserResource(UserResource):
"""Provides information on users that are members of a review group."""
allowed_methods = ('GET', 'POST', 'DELETE')
policy_id = 'review_group_user'
def get_queryset(self, request, group_name, local_site_name=None,
*args, **kwargs):
group = Group.objects.get(name=group_name,
local_site__name=local_site_name)
return group.users.all()
def has_access_permissions(self, request, user, *args, **kwargs):
group = resources.review_group.get_object(request, *args, **kwargs)
return group.is_accessible_by(request.user)
def has_list_access_permissions(self, request, *args, **kwargs):
group = resources.review_group.get_object(request, *args, **kwargs)
return group.is_accessible_by(request.user)
def has_modify_permissions(self, request, group, username, local_site):
return (
resources.review_group.has_modify_permissions(request, group) or
(request.user.username == username and
group.is_accessible_by(request.user))
)
def has_delete_permissions(self, request, user, *args, **kwargs):
group = resources.review_group.get_object(request, *args, **kwargs)
return group.is_mutable_by(request.user)
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, INVALID_USER,
NOT_LOGGED_IN, PERMISSION_DENIED)
@webapi_request_fields(required={
'username': {
'type': six.text_type,
'description': 'The user to add to the group.',
},
})
def create(self, request, username, *args, **kwargs):
"""Adds a user to a review group."""
group_resource = resources.review_group
try:
group = group_resource.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
local_site = self._get_local_site(kwargs.get('local_site_name', None))
if (not group_resource.has_access_permissions(request, group) or
not self.has_modify_permissions(request, group, username,
local_site)):
return self._no_access_error(request.user)
try:
if local_site:
user = local_site.users.get(username=username)
else:
user = User.objects.get(username=username)
except ObjectDoesNotExist:
return INVALID_USER
group.users.add(user)
return 201, {
self.item_result_key: user,
}
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, INVALID_USER,
NOT_LOGGED_IN, PERMISSION_DENIED)
def delete(self, request, *args, **kwargs):
"""Removes a user from a review group."""
group_resource = resources.review_group
try:
group = group_resource.get_object(request, *args, **kwargs)
user = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
local_site = self._get_local_site(kwargs.get('local_site_name', None))
if (not group_resource.has_access_permissions(request, group) or
not self.has_modify_permissions(request, group, user.username,
local_site)):
return self._no_access_error(request.user)
group.users.remove(user)
return 204, {}
@webapi_check_local_site
@augment_method_from(WebAPIResource)
def get_list(self, *args, **kwargs):
"""Retrieves the list of users belonging to a specific review group.
This includes only the users who have active accounts on the site.
Any account that has been disabled (for inactivity, spam reasons,
or anything else) will be excluded from the list.
The list of users can be filtered down using the ``q`` and
``fullname`` parameters.
Setting ``q`` to a value will by default limit the results to
usernames starting with that value. This is a case-insensitive
comparison.
If ``fullname`` is set to ``1``, the first and last names will also be
checked along with the username. ``fullname`` is ignored if ``q``
is not set.
For example, accessing ``/api/users/?q=bo&fullname=1`` will list
any users with a username, first name or last name starting with
``bo``.
"""
pass
review_group_user_resource = ReviewGroupUserResource()
| 1tush/reviewboard | reviewboard/webapi/resources/review_group_user.py | Python | mit | 5,564 |
# set command to set global variables
from lib.utils import *
def _help():
usage = '''
Usage: set [options] (var) [value]
[options]:
-h Print this help.
-del (var) Delete variable
(var) if defined.
where (var) is a valid
global variable
if [value] is not given,
current value is returned
'''
print(usage)
def main(argv):
if '-h' in argv:
_help()
return
# The shell doesnt send the
# command name in the arg list
# so the next line is not needed
# anymore
# argv.pop(0) #remove arg
# to show all vars
if len(argv) < 1:
for i in prop.vars():
print(i, ' = ', prop.get(i))
return
if '-del' in argv:
try:
var = argv[1]
# detect system vars
if var == 'save_state' or var == 'c_char':
err(4, add='Cant delete system variable "' + var + '"')
return
prop.delete(var)
return
except IndexError:
err(4, add='variable name was missing')
return
var = argv[0]
if len(argv) < 2:
val = prop.get(var)
if val == NULL:
err(4, var)
return
print(val)
return
# remove name of var
argv.pop(0)
# make the rest the val
val = make_s(argv)
try:
prop.set(var, val)
except ValueError:
err(4, add="can't create this variable")
| nayas360/pyterm | bin/set.py | Python | mit | 1,471 |
'''
- Leetcode problem: 210
- Difficulty: Medium
- Brief problem description:
There are a total of n courses you have to take, labeled from 0 to n-1.
Some courses may have prerequisites, for example to take course 0 you have to first take course 1, which is expressed as a pair: [0,1]
Given the total number of courses and a list of prerequisite pairs, return the ordering of courses you should take to finish all courses.
There may be multiple correct orders, you just need to return one of them. If it is impossible to finish all courses, return an empty array.
Example 1:
Input: 2, [[1,0]]
Output: [0,1]
Explanation: There are a total of 2 courses to take. To take course 1 you should have finished
course 0. So the correct course order is [0,1] .
Example 2:
Input: 4, [[1,0],[2,0],[3,1],[3,2]]
Output: [0,1,2,3] or [0,2,1,3]
Explanation: There are a total of 4 courses to take. To take course 3 you should have finished both
courses 1 and 2. Both courses 1 and 2 should be taken after you finished course 0.
So one correct course order is [0,1,2,3]. Another correct ordering is [0,2,1,3] .
Note:
The input prerequisites is a graph represented by a list of edges, not adjacency matrices. Read more about how a graph is represented.
You may assume that there are no duplicate edges in the input prerequisites.
- Solution Summary:
Topological sort
- Used Resources:
--- Bo Zhou
'''
class Solution:
def findOrder(self, numCourses: int, prerequisites: List[List[int]]) -> List[int]:
dag = defaultdict(list)
in_degree = {}
for p in prerequisites:
in_degree[p[0]] = in_degree.get(p[0], 0) + 1
dag[p[1]].append(p[0])
zero_dq = deque()
for i in range(numCourses):
if not in_degree.get(i):
zero_dq.append(i)
ordered_course = []
while zero_dq:
course = zero_dq.popleft()
ordered_course.append(course)
nb = dag.get(course, [])
for c in nb:
in_degree[c] = in_degree.get(c) - 1
if in_degree[c] == 0:
zero_dq.append(c)
if len(ordered_course) == numCourses:
return ordered_course
else:
return []
| bzhou26/leetcode_sol | p210_Course_Schedule_II.py | Python | mit | 2,295 |
import logging
logging.basicConfig(filename='test-logfile.log', level=logging.DEBUG)
top = 50
for i in range(top):
print(i)
logging.info('Loop completed, reached %s' % top)
| peterhogan/python | logtest.py | Python | mit | 177 |
import os
import pytest
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
SITE_DIR = os.path.join(BASE_DIR, "site")
@pytest.fixture
def site_dir():
return SITE_DIR
@pytest.fixture
def output_exist():
return lambda path: os.path.exists(os.path.join(SITE_DIR, "deploy", path))
@pytest.fixture(autouse=True)
def chdir():
from catsup.options import g
os.chdir(SITE_DIR)
g.cwdpath = SITE_DIR
| whtsky/Catsup | tests/conftest.py | Python | mit | 419 |
# -*- coding: utf-8 -*-
from ..request import Request
from ..query import Entity
from ..query import Entry
import json
class UserEntity(Entity):
"""UserEntity object used for upload user entities.
Detail information about user entities you can see at our site
https://docs.api.ai/docs/userentities"""
@property
def session_id(self):
"""session_id parameter used for determinate of every unique users."""
return self._session_id
@session_id.setter
def session_id(self, session_id):
self._session_id = session_id
@property
def extend(self):
return self._extend
@extend.setter
def extend(self, extend):
"""extend parameter used definition user entities logic. If True then
uploaded user entities will be mixed with user entities specified in
server side else currently uploaded entities witll uverride
server entities."""
self._extend = extend
def __init__(self, name, entries, session_id=None, extend=False):
super(UserEntity, self).__init__(name, entries)
self._session_id = session_id
self._extend = extend
"""Private method used for object serialization."""
def _to_dict(self):
parent_data = super(UserEntity, self)._to_dict()
if self._session_id is not None:
parent_data['sessionId'] = self._session_id
parent_data['extend'] = self._extend
return parent_data
class UserEntityEntry(Entry):
"""UserEntityEntry object used for upload user entities.
Detail information about user entities you can see at our site
https://docs.api.ai/docs/userentities"""
pass
class UserEntitiesRequest(Request):
"""UserEntitiesRequest is request for upload user entities.
Detail see http://docs.api.ai/"""
@property
def user_entities(self):
"user_entities parameter for specification of same user entities."
return self._user_entities
@user_entities.setter
def user_entities(self, user_entities):
self._user_entities = user_entities
def __init__(self, client_access_token, base_url, user_entities=[]):
super(UserEntitiesRequest, self).__init__(client_access_token,
base_url,
'/v1/userEntities',
{})
self._user_entities = user_entities
def _prepare_headers(self):
return {
'Content-Type': 'application/json; charset=utf-8',
'Content-Length': len(self._prepage_end_request_data())
}
def _prepage_begin_request_data(self):
return None
def _prepage_end_request_data(self):
return json.dumps(map(lambda x: x._to_dict(), self._user_entities))
| ChonchoFronto/sarah | lambda/apiai/requests/user_entities/user_entities_request.py | Python | mit | 2,842 |
from simplequeue.lib.configuration import config
__all__ = ['config']
| geonetix/simplemq | simplequeue/__init__.py | Python | mit | 71 |
from __future__ import unicode_literals
import arrow
from django.db.models import DateTimeField, SubfieldBase
from .form_fields import ArrowField as ArrowFormField
class ArrowField(DateTimeField):
__metaclass__ = SubfieldBase
def to_python(self, value):
if isinstance(value, arrow.Arrow):
return value
value = super(ArrowField, self).to_python(value)
if value:
return arrow.get(value)
def get_prep_value(self, value):
if value:
return value.datetime
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return '' if value is None else value.isoformat()
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = arrow.utcnow()
setattr(model_instance, self.attname, value)
return value
else:
return super(ArrowField, self).pre_save(model_instance, add)
def formfield(self, **kwargs):
defaults = {'form_class': ArrowFormField}
defaults.update(kwargs)
return super(ArrowField, self).formfield(**defaults)
| gizmag/django-arrow-field | arrow_field/model_fields.py | Python | mit | 1,161 |
#!/usr/bin/env python
#
#
#
# $Header: /opt/cvs/python/packages/share1.5/AutoDockTools/Utilities24/prepare_dpf4.py,v 1.14.4.1 2011/12/01 17:16:33 rhuey Exp $
#
import string
import os.path
from MolKit import Read
from AutoDockTools.DockingParameters import DockingParameters, DockingParameter4FileMaker, genetic_algorithm_list, \
genetic_algorithm_local_search_list4, local_search_list4,\
simulated_annealing_list4
def usage():
print "Usage: prepare_dpf4.py -l pdbqt_file -r pdbqt_file"
print " -l ligand_filename"
print " -r receptor_filename"
print
print "Optional parameters:"
print " [-o output dpf_filename]"
print " [-i template dpf_filename]"
print " [-x flexres_filename]"
print " [-p parameter_name=new_value]"
print " [-k list of parameters to write]"
print " [-e write epdb dpf ]"
print " [-v] verbose output"
print " [-L] use local search parameters"
print " [-S] use simulated annealing search parameters"
print " [-s] seed population using ligand's present conformation"
print
print "Prepare a docking parameter file (DPF) for AutoDock4."
print
print " The DPF will by default be <ligand>_<receptor>.dpf. This"
print "may be overridden using the -o flag."
if __name__ == '__main__':
import getopt
import sys
try:
opt_list, args = getopt.getopt(sys.argv[1:], 'sLShvl:r:i:o:x:p:k:e')
except getopt.GetoptError, msg:
print 'prepare_dpf4.py: %s' % msg
usage()
sys.exit(2)
receptor_filename = ligand_filename = None
dpf_filename = None
template_filename = None
flexres_filename = None
parameters = []
parameter_list = genetic_algorithm_local_search_list4
pop_seed = False
verbose = None
epdb_output = False
for o, a in opt_list:
if verbose: print "o=", o, ' a=', a
if o in ('-v', '--v'):
verbose = 1
if verbose: print 'verbose output'
if o in ('-l', '--l'): #ligand filename
ligand_filename = a
if verbose: print 'ligand_filename =', ligand_filename
if o in ('-r', '--r'): #receptor filename
receptor_filename = a
if verbose: print 'receptor_filename =', receptor_filename
if o in ('-x', '--x'): #flexres_filename
flexres_filename = a
if verbose: print 'flexres_filename =', flexres_filename
if o in ('-i', '--i'): #input reference
template_filename = a
if verbose: print 'template_filename =', template_filename
if o in ('-o', '--o'): #output filename
dpf_filename = a
if verbose: print 'output dpf_filename =', dpf_filename
if o in ('-p', '--p'): #parameter
parameters.append(a)
if verbose: print 'parameters =', parameters
if o in ('-e', '--e'):
epdb_output = True
if verbose: print 'output epdb file'
parameter_list = epdb_list4_2
if o in ('-k', '--k'): #parameter_list_to_write
parameter_list = a
if verbose: print 'parameter_list =', parameter_list
if o in ('-L', '--L'): #parameter_list_to_write
local_search = 1
parameter_list = local_search_list4
if verbose: print 'parameter_list =', parameter_list
if o in ('-S', '--S'): #parameter_list_to_write
parameter_list = simulated_annealing_list4
if verbose: print 'parameter_list =', parameter_list
if o in ('-h', '--'):
usage()
sys.exit()
if o in ('-s'):
pop_seed = True
if (not receptor_filename) or (not ligand_filename):
print "prepare_dpf4.py: ligand and receptor filenames"
print " must be specified."
usage()
sys.exit()
#9/2011: fixing local_search bugs:
# specifically:
# 1. quaternion0 0 0 0 0
# 2. dihe0 0 0 0 0 0 <one per rotatable bond>
# 3. about == tran0
# 4. remove tstep qstep and dstep
# 5. remove ls_search_freq
local_search = parameter_list==local_search_list4
dm = DockingParameter4FileMaker(verbose=verbose)
if template_filename is not None: #setup values by reading dpf
dm.dpo.read(template_filename)
dm.set_ligand(ligand_filename)
dm.set_receptor(receptor_filename)
if flexres_filename is not None:
flexmol = Read(flexres_filename)[0]
flexres_types = flexmol.allAtoms.autodock_element
lig_types = dm.dpo['ligand_types']['value'].split()
all_types = lig_types
for t in flexres_types:
if t not in all_types:
all_types.append(t)
all_types_string = all_types[0]
if len(all_types)>1:
for t in all_types[1:]:
all_types_string = all_types_string + " " + t
if verbose: print "adding ", t, " to all_types->", all_types_string
dm.dpo['ligand_types']['value'] = all_types_string
dm.dpo['flexres']['value'] = flexres_filename
dm.dpo['flexres_flag']['value'] = True
#dm.set_docking_parameters( ga_num_evals=1750000,ga_pop_size=150, ga_run=20, rmstol=2.0)
kw = {}
for p in parameters:
key,newvalue = string.split(p, '=')
#detect string reps of lists: eg "[1.,1.,1.]"
if newvalue[0]=='[':
nv = []
for item in newvalue[1:-1].split(','):
nv.append(float(item))
#print "nv=", nv
newvalue = nv
if key=='epdb_flag':
print "setting epdb_flag to", newvalue
kw['epdb_flag'] = 1
elif key=='set_psw1':
print "setting psw1_flag to", newvalue
kw['set_psw1'] = 1
kw['set_sw1'] = 0
elif key=='set_sw1':
print "setting set_sw1 to", newvalue
kw['set_sw1'] = 1
kw['set_psw1'] = 0
elif key=='include_1_4_interactions_flag':
kw['include_1_4_interactions'] = 1
elif 'flag' in key:
if newvalue in ['1','0']:
newvalue = int(newvalue)
if newvalue =='False':
newvalue = False
if newvalue =='True':
newvalue = True
elif local_search and 'about' in key:
kw['about'] = newvalue
kw['tran0'] = newvalue
else:
kw[key] = newvalue
apply(dm.set_docking_parameters, (), kw)
if key not in parameter_list:
#special hack for output_pop_file
if key=='output_pop_file':
parameter_list.insert(parameter_list.index('set_ga'), key)
else:
parameter_list.append(key)
dm.write_dpf(dpf_filename, parameter_list, pop_seed)
#prepare_dpf4.py -l indinavir.pdbq -r 1hsg.pdbqs -p ga_num_evals=20000000 -p ga_pop_size=150 -p ga_run=17 -i ref.dpf -o testing.dpf
| Reimilia/pdb_sth | mapping/prepare_dpf4.py | Python | mit | 7,081 |
from similarity.webpage import WebPage
| Nozdi/webpage-similarity | similarity/__init__.py | Python | mit | 39 |
from aioredis.util import wait_convert, wait_ok, _NOTSET, _ScanIter
class GenericCommandsMixin:
"""Generic commands mixin.
For commands details see: http://redis.io/commands/#generic
"""
def delete(self, key, *keys):
"""Delete a key."""
fut = self.execute(b'DEL', key, *keys)
return wait_convert(fut, int)
def dump(self, key):
"""Dump a key."""
return self.execute(b'DUMP', key)
def exists(self, key, *keys):
"""Check if key(s) exists.
.. versionchanged:: v0.2.9
Accept multiple keys; **return** type **changed** from bool to int.
"""
return self.execute(b'EXISTS', key, *keys)
def expire(self, key, timeout):
"""Set a timeout on key.
if timeout is float it will be multiplied by 1000
coerced to int and passed to `pexpire` method.
Otherwise raises TypeError if timeout argument is not int.
"""
if isinstance(timeout, float):
return self.pexpire(key, int(timeout * 1000))
if not isinstance(timeout, int):
raise TypeError(
"timeout argument must be int, not {!r}".format(timeout))
fut = self.execute(b'EXPIRE', key, timeout)
return wait_convert(fut, bool)
def expireat(self, key, timestamp):
"""Set expire timestamp on a key.
if timeout is float it will be multiplied by 1000
coerced to int and passed to `pexpireat` method.
Otherwise raises TypeError if timestamp argument is not int.
"""
if isinstance(timestamp, float):
return self.pexpireat(key, int(timestamp * 1000))
if not isinstance(timestamp, int):
raise TypeError("timestamp argument must be int, not {!r}"
.format(timestamp))
fut = self.execute(b'EXPIREAT', key, timestamp)
return wait_convert(fut, bool)
def keys(self, pattern, *, encoding=_NOTSET):
"""Returns all keys matching pattern."""
return self.execute(b'KEYS', pattern, encoding=encoding)
def migrate(self, host, port, key, dest_db, timeout, *,
copy=False, replace=False):
"""Atomically transfer a key from a Redis instance to another one."""
if not isinstance(host, str):
raise TypeError("host argument must be str")
if not isinstance(timeout, int):
raise TypeError("timeout argument must be int")
if not isinstance(dest_db, int):
raise TypeError("dest_db argument must be int")
if not host:
raise ValueError("Got empty host")
if dest_db < 0:
raise ValueError("dest_db must be greater equal 0")
if timeout < 0:
raise ValueError("timeout must be greater equal 0")
flags = []
if copy:
flags.append(b'COPY')
if replace:
flags.append(b'REPLACE')
fut = self.execute(b'MIGRATE', host, port,
key, dest_db, timeout, *flags)
return wait_ok(fut)
def migrate_keys(self, host, port, keys, dest_db, timeout, *,
copy=False, replace=False):
"""Atomically transfer keys from one Redis instance to another one.
Keys argument must be list/tuple of keys to migrate.
"""
if not isinstance(host, str):
raise TypeError("host argument must be str")
if not isinstance(timeout, int):
raise TypeError("timeout argument must be int")
if not isinstance(dest_db, int):
raise TypeError("dest_db argument must be int")
if not isinstance(keys, (list, tuple)):
raise TypeError("keys argument must be list or tuple")
if not host:
raise ValueError("Got empty host")
if dest_db < 0:
raise ValueError("dest_db must be greater equal 0")
if timeout < 0:
raise ValueError("timeout must be greater equal 0")
if not keys:
raise ValueError("keys must not be empty")
flags = []
if copy:
flags.append(b'COPY')
if replace:
flags.append(b'REPLACE')
flags.append(b'KEYS')
flags.extend(keys)
fut = self.execute(b'MIGRATE', host, port,
"", dest_db, timeout, *flags)
return wait_ok(fut)
def move(self, key, db):
"""Move key from currently selected database to specified destination.
:raises TypeError: if db is not int
:raises ValueError: if db is less than 0
"""
if not isinstance(db, int):
raise TypeError("db argument must be int, not {!r}".format(db))
if db < 0:
raise ValueError("db argument must be not less than 0, {!r}"
.format(db))
fut = self.execute(b'MOVE', key, db)
return wait_convert(fut, bool)
def object_refcount(self, key):
"""Returns the number of references of the value associated
with the specified key (OBJECT REFCOUNT).
"""
return self.execute(b'OBJECT', b'REFCOUNT', key)
def object_encoding(self, key):
"""Returns the kind of internal representation used in order
to store the value associated with a key (OBJECT ENCODING).
"""
# TODO: set default encoding to 'utf-8'
return self.execute(b'OBJECT', b'ENCODING', key)
def object_idletime(self, key):
"""Returns the number of seconds since the object is not requested
by read or write operations (OBJECT IDLETIME).
"""
return self.execute(b'OBJECT', b'IDLETIME', key)
def persist(self, key):
"""Remove the existing timeout on key."""
fut = self.execute(b'PERSIST', key)
return wait_convert(fut, bool)
def pexpire(self, key, timeout):
"""Set a milliseconds timeout on key.
:raises TypeError: if timeout is not int
"""
if not isinstance(timeout, int):
raise TypeError("timeout argument must be int, not {!r}"
.format(timeout))
fut = self.execute(b'PEXPIRE', key, timeout)
return wait_convert(fut, bool)
def pexpireat(self, key, timestamp):
"""Set expire timestamp on key, timestamp in milliseconds.
:raises TypeError: if timeout is not int
"""
if not isinstance(timestamp, int):
raise TypeError("timestamp argument must be int, not {!r}"
.format(timestamp))
fut = self.execute(b'PEXPIREAT', key, timestamp)
return wait_convert(fut, bool)
def pttl(self, key):
"""Returns time-to-live for a key, in milliseconds.
Special return values (starting with Redis 2.8):
* command returns -2 if the key does not exist.
* command returns -1 if the key exists but has no associated expire.
"""
# TODO: maybe convert negative values to:
# -2 to None - no key
# -1 to False - no expire
return self.execute(b'PTTL', key)
def randomkey(self, *, encoding=_NOTSET):
"""Return a random key from the currently selected database."""
return self.execute(b'RANDOMKEY', encoding=encoding)
def rename(self, key, newkey):
"""Renames key to newkey.
:raises ValueError: if key == newkey
"""
if key == newkey:
raise ValueError("key and newkey are the same")
fut = self.execute(b'RENAME', key, newkey)
return wait_ok(fut)
def renamenx(self, key, newkey):
"""Renames key to newkey only if newkey does not exist.
:raises ValueError: if key == newkey
"""
if key == newkey:
raise ValueError("key and newkey are the same")
fut = self.execute(b'RENAMENX', key, newkey)
return wait_convert(fut, bool)
def restore(self, key, ttl, value):
"""Creates a key associated with a value that is obtained via DUMP."""
return self.execute(b'RESTORE', key, ttl, value)
def scan(self, cursor=0, match=None, count=None):
"""Incrementally iterate the keys space.
Usage example:
>>> match = 'something*'
>>> cur = b'0'
>>> while cur:
... cur, keys = await redis.scan(cur, match=match)
... for key in keys:
... print('Matched:', key)
"""
args = []
if match is not None:
args += [b'MATCH', match]
if count is not None:
args += [b'COUNT', count]
fut = self.execute(b'SCAN', cursor, *args)
return wait_convert(fut, lambda o: (int(o[0]), o[1]))
def iscan(self, *, match=None, count=None):
"""Incrementally iterate the keys space using async for.
Usage example:
>>> async for key in redis.iscan(match='something*'):
... print('Matched:', key)
"""
return _ScanIter(lambda cur: self.scan(cur,
match=match, count=count))
def sort(self, key, *get_patterns,
by=None, offset=None, count=None,
asc=None, alpha=False, store=None):
"""Sort the elements in a list, set or sorted set."""
args = []
if by is not None:
args += [b'BY', by]
if offset is not None and count is not None:
args += [b'LIMIT', offset, count]
if get_patterns:
args += sum(([b'GET', pattern] for pattern in get_patterns), [])
if asc is not None:
args += [asc is True and b'ASC' or b'DESC']
if alpha:
args += [b'ALPHA']
if store is not None:
args += [b'STORE', store]
return self.execute(b'SORT', key, *args)
def touch(self, key, *keys):
"""Alters the last access time of a key(s).
Returns the number of keys that were touched.
"""
return self.execute(b'TOUCH', key, *keys)
def ttl(self, key):
"""Returns time-to-live for a key, in seconds.
Special return values (starting with Redis 2.8):
* command returns -2 if the key does not exist.
* command returns -1 if the key exists but has no associated expire.
"""
# TODO: maybe convert negative values to:
# -2 to None - no key
# -1 to False - no expire
return self.execute(b'TTL', key)
def type(self, key):
"""Returns the string representation of the value's type stored at key.
"""
# NOTE: for non-existent keys TYPE returns b'none'
return self.execute(b'TYPE', key)
def unlink(self, key, *keys):
"""Delete a key asynchronously in another thread."""
return wait_convert(self.execute(b'UNLINK', key, *keys), int)
def wait(self, numslaves, timeout):
"""Wait for the synchronous replication of all the write
commands sent in the context of the current connection.
"""
return self.execute(b'WAIT', numslaves, timeout)
| ymap/aioredis | aioredis/commands/generic.py | Python | mit | 11,140 |
from .engine import BloggingEngine
from .processor import PostProcessor
from .sqlastorage import SQLAStorage
from .storage import Storage
"""
Flask-Blogging is a Flask extension to add blog support to your
web application. This extension uses Markdown to store and then
render the webpage.
Author: Gouthaman Balaraman
Date: June 1, 2015
"""
__author__ = 'Gouthaman Balaraman'
__version__ = '0.4.2'
| wdm0006/Flask-Blogging | flask_blogging/__init__.py | Python | mit | 403 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
from dashboard.views import QuestionApi
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'zuobiao.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^api/question/(?P<pk>\d+)/$', QuestionApi.as_view(), name='question_api'),
)
| phyng/zuobiao | zuobiao/zuobiao/urls.py | Python | mit | 402 |
# -*- coding: utf-8 -*-
import os
import sys
import inspect
cmd_folder = os.path.realpath(
os.path.abspath(
os.path.split(
inspect.getfile(
inspect.currentframe()
)
)[0]
)
)
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
from smbkmeans import *
import pandas as pd
import numpy as np
import scipy.sparse as sp
import random
from bson.son import SON
from pymongo import MongoClient
from monary import Monary
import bz2
try:
import cPickle as pickle
except:
import pickle
settings = {
'mongo_host': 'server.local',
'mongo_db_name': 'mydb',
'mongo_port': 27017,
'tfidf_collection': 'tfidf',
'models_per_k': 25,
'ld_k_min': 0.5,
'ld_k_max': 2.5,
'k_steps': 50,
'batch_size': 1024
}
blacklist = {
'consumers': [],
'brands': [0],
'companies': [10000],
'categories': [0]
}
if __name__ == "__main__":
# establish PyMongo connection:
mongo_client = MongoClient(settings['mongo_host'],
settings['mongo_port'])
mongo_db = mongo_client[settings['mongo_db_name']]
# get collection:
tfidf_collection = mongo_db[settings['tfidf_collection']]
# find out who the consumers are
cursor = tfidf_collection.find(
{"consumer": {
"$nin": blacklist['consumers']
}}
).distinct('consumer')
consumers = np.array(cursor, dtype=np.int64)
n_consumers = len(consumers)
# find out how many items there are
cursor = tfidf_collection.find().distinct('item')
items = np.array(cursor, dtype=np.int64)
n_items = len(items)
# close PyMongo connection
mongo_client.close()
# set up Monary
monary_client = Monary(settings['mongo_host'],
settings['mongo_port'])
def get_consumer_mtx(consumer_batch):
'''Returns a sparse matrix with feature vectors for a consumer batch.'''
pipeline = [
{"$match": {
"consumer": {"$in": consumer_batch},
"brand": {"$nin": blacklist['brands']},
"company": {"$nin": blacklist['companies']},
"category": {"$nin": blacklist['categories']}
}},
{"$project": {
"_id": False,
"consumer": True,
"item": True,
"tfidf": "$purchasetfidf2"
}},
{"$sort": SON([("consumer", 1)])}
]
try:
# careful! Monary returns masked numpy arrays!
result = monary_client.aggregate(
settings['mongo_db_name'],
settings['tfidf_collection'],
pipeline,
["consumer", "item", "tfidf"],
["int64", "int64", "float64"])
except:
return sp.csr_matrix(shape=(len(consumer_batch), n_items),
dtype=np.float64)
# convert into CSR matrix
_, consumer_idcs = np.unique(result[0].data,
return_inverse=True)
mtx = sp.csr_matrix(
(result[2].data, (consumer_idcs,
result[1].data)),
shape=(len(consumer_batch), n_items),
dtype=np.float64)
# normalize each row (this step can't be moved into the database
# because of the item blacklist)
for row_idx in xrange(len(consumer_batch)):
row = mtx.data[mtx.indptr[row_idx]:mtx.indptr[row_idx + 1]]
row /= np.linalg.norm(row)
return mtx
def get_batch(batch_size=100, offset=0, random_pick=True):
if random_pick:
# pick batch_size examples randomly from the consumers in the
# collection
consumer_batch = random.sample(consumers, batch_size)
else:
# advance index by offset
consumer_batch = list(consumers)[offset:]
# get the next batch_size consumers from the collection
consumer_batch = consumer_batch[:batch_size]
# obtain sparse matrix filled with feature vectors from database
mtx = get_consumer_mtx(consumer_batch)
return mtx
# train the models
ns_clusters = np.unique(np.int64(np.floor(
10. ** np.linspace(settings['ld_k_min'],
settings['ld_k_max'],
settings['k_steps'],
endpoint=True))))
np.random.shuffle(ns_clusters)
ns_clusters = ns_clusters.tolist()
models = [SphericalMiniBatchKMeans(n_clusters=n_clusters,
n_init=10,
max_iter=1000,
batch_size=settings['batch_size'],
reassignment_ratio=.01,
max_no_improvement=10,
project_l=5.) for _ in xrange(settings['models_per_k']) for n_clusters in ns_clusters]
filename = cmd_folder + '/tfidf_smbkmeans__tfidf2.pkl.bz2'
for model in models:
_ = model.fit(n_samples=n_consumers,
get_batch=get_batch)
fp = bz2.BZ2File(filename, 'w')
pickle.dump(models, fp, pickle.HIGHEST_PROTOCOL)
fp.close()
| tscholak/smbkmeans | tfidf_smbkmeans.py | Python | mit | 5,558 |
import requests
from urllib.parse import parse_qs, urlparse
from lxml.html import fromstring
_HEADERS = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/41.0.2272.76 Chrome/41.0.2272.76 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml'
}
# get results from search
query = {"q": "site:scholar.google.com \"From Mechanism to Mouse\" "}
url = "https://cn.bing.com/search"
html = requests.get(url, headers=_HEADERS, params=query)
print(html.request.headers)
print(html.url)
print(html.content)
tree = fromstring(html.content)
results = tree.xpath(".//*[@id='b_results']/li/div[1]/h2/a")
print(len(results))
# grab the first link
link = results[0].get('href')
print(link)
# parse the destination url from the querystring
qs = urlparse(link).query
parsed_qs = parse_qs(qs)
print(parsed_qs)
print(parsed_qs.get('user', []))
# as one list
links = []
for result in results:
link = result.get('href')
qs = urlparse(link).query
links.extend(parse_qs(qs).get('user', []))
print(links)
| cit563emef2dasdme/jklasjdf12nfasfdkl | scrape_google_scholar_from_bing.py | Python | mit | 1,084 |
"""
Copyright (c) 2016 Gabriel Esteban
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of
the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from django.contrib import admin
# Register your models here.
| galaxyfeeder/CodeSubmission | main/admin.py | Python | mit | 1,131 |
from __future__ import unicode_literals
from django.db import models
from modpacks.models.modpack import Modpack
class Server(models.Model):
""" Minecraft Server details for display on the server page """
name = models.CharField(verbose_name='Server Name',
max_length=200)
desc = models.TextField(verbose_name='Server Description',
blank=True)
modpack = models.ForeignKey(Modpack, verbose_name='Server Modpack')
address = models.CharField(verbose_name='Server Address',
max_length=200,
blank=True)
screenshot = models.ImageField(verbose_name='Screenshot',
blank=True)
dynmap = models.CharField(verbose_name='DynMap URL',
max_length=200,
blank=True)
slug = models.SlugField()
def get_absolute_url(self):
return reverse("server", self.slug)
def __str__(self):
return self.name
| Jonpro03/Minecrunch_Web | src/servers/models.py | Python | mit | 921 |
from lexer import lang
from ..tree import Node
class Integer(Node):
datatype = lang.SEMANTIC_INT_TYPE
"""docstring for Integer."""
def __init__(self, symbol, token):
super().__init__(symbol, token)
def generate_code(self, **cond):
array, line = Node.assignated_array()
Node.array_append(array, f'{line} LIT {self.symbol}, 0')
| andaviaco/tronido | src/syntax/types/integer.py | Python | mit | 370 |
"""Ingest Stage IV Hourly Files.
1. Copies to hourly stage IV netCDF files
2. Copies hourly stage IV netCDF to hourly IEMRE
"""
import os
import datetime
import sys
import numpy as np
from scipy.interpolate import NearestNDInterpolator
import pygrib
from pyiem import iemre
from pyiem.util import utc, ncopen, logger
LOG = logger()
def get_p01m_status(valid):
"""Figure out what our current status is of this hour."""
nc = ncopen(
("/mesonet/data/stage4/%s_stage4_hourly.nc") % (valid.year,),
timeout=300,
)
tidx = iemre.hourly_offset(valid)
# 2 prism_adjust_stage4 ran
# 1 copied hourly data in
# 0 nothing happened
p01m_status = nc.variables["p01m_status"][tidx]
nc.close()
LOG.debug("p01m_status is %s for valid %s", p01m_status, valid)
return p01m_status
def ingest_hourly_grib(valid):
"""Copy the hourly grib data into the netcdf storage.
Returns:
int value of the new p01m_status
"""
tidx = iemre.hourly_offset(valid)
fn = valid.strftime(
"/mesonet/ARCHIVE/data/%Y/%m/%d/stage4/ST4.%Y%m%d%H.01h.grib"
)
if not os.path.isfile(fn):
LOG.info("stage4_ingest: missing file %s", fn)
return 0
gribs = pygrib.open(fn)
grb = gribs[1]
val = grb.values
# values over 10 inches are bad
val = np.where(val > 250.0, 0, val)
ncfn = f"/mesonet/data/stage4/{valid.year}_stage4_hourly.nc"
with ncopen(ncfn, "a", timeout=300) as nc:
p01m = nc.variables["p01m"]
# account for legacy grid prior to 2002
if val.shape == (880, 1160):
p01m[tidx, 1:, :] = val[:, 39:]
else:
p01m[tidx, :, :] = val
nc.variables["p01m_status"][tidx] = 1
LOG.debug(
"write p01m to stage4 netcdf min: %.2f avg: %.2f max: %.2f",
np.min(val),
np.mean(val),
np.max(val),
)
return 1
def copy_to_iemre(valid):
"""verbatim copy over to IEMRE."""
tidx = iemre.hourly_offset(valid)
ncfn = f"/mesonet/data/stage4/{valid.year}_stage4_hourly.nc"
with ncopen(ncfn, "a", timeout=300) as nc:
lats = nc.variables["lat"][:]
lons = nc.variables["lon"][:]
val = nc.variables["p01m"][tidx]
# Our data is 4km, iemre is 0.125deg, so we stride some to cut down on mem
stride = slice(None, None, 3)
lats = np.ravel(lats[stride, stride])
lons = np.ravel(lons[stride, stride])
vals = np.ravel(val[stride, stride])
nn = NearestNDInterpolator((lons, lats), vals)
xi, yi = np.meshgrid(iemre.XAXIS, iemre.YAXIS)
res = nn(xi, yi)
# Lets clip bad data
# 10 inches per hour is bad data
res = np.where(np.logical_or(res < 0, res > 250), 0.0, res)
# Open up our RE file
nc = ncopen(iemre.get_hourly_ncname(valid.year), "a", timeout=300)
nc.variables["p01m"][tidx, :, :] = res
LOG.debug(
"wrote data to hourly IEMRE min: %.2f avg: %.2f max: %.2f",
np.min(res),
np.mean(res),
np.max(res),
)
nc.close()
def workflow(valid):
"""Our stage IV workflow."""
# Figure out what the current status is
p01m_status = get_p01m_status(valid)
if np.ma.is_masked(p01m_status) or p01m_status < 2:
# merge in the raw hourly data
ingest_hourly_grib(valid)
copy_to_iemre(valid)
def main(argv):
"""Go Main"""
if len(argv) == 5:
ts = utc(int(argv[1]), int(argv[2]), int(argv[3]), int(argv[4]))
workflow(ts)
return
# Otherwise we are running for an explicit 12z to 12z period, copy only
ets = utc(int(argv[1]), int(argv[2]), int(argv[3]), 12)
now = ets - datetime.timedelta(hours=23)
while now <= ets:
copy_to_iemre(now)
now += datetime.timedelta(hours=1)
if __name__ == "__main__":
main(sys.argv)
| akrherz/iem | scripts/iemre/precip_ingest.py | Python | mit | 3,827 |
"""Clock for keeping track of the wall time.
"""
__all__ = ['ClockError', 'Clock', 'log']
import datetime
import logging
import time
from typing import Optional # noqa: F401. Used for mypy.
class ClockError(Exception):
"""Invalid clock operation."""
pass
class Clock:
"""Clock for keeping track of time.
"""
def __init__(self) -> None:
self.start = None # type: Optional[float]
self.stop = None # type: Optional[float]
def tic(self) -> None:
"""Start the clock."""
self.start = time.monotonic()
self.stop = None
def toc(self) -> None:
"""Stop the clock."""
assert self.start is not None
self.stop = time.monotonic()
def __str__(self) -> str:
"""Human-readable representation of elapsed time."""
if self.start is None:
raise ClockError('The clock has not been started')
else:
start = datetime.datetime.fromtimestamp(self.start)
if self.stop is None:
stop = datetime.datetime.fromtimestamp(time.monotonic())
else:
stop = datetime.datetime.fromtimestamp(self.stop)
delta = stop - start
return str(delta)
def __enter__(self):
if self.start is None and self.stop is None:
self.tic()
return self
def __exit__(self, exc_type, exc_value, traceback):
if self.start is not None:
self.toc()
def log(function):
"""Create a decorator that logs the elapsed time.
"""
def wrapper(*args, **kwargs):
with Clock() as clock:
result = function(*args, **kwargs)
logging.debug('Completed {} after {} seconds.'
.format(function.__name__, clock))
return result
return wrapper
| jmbr/diffusion-maps | diffusion_maps/clock.py | Python | mit | 1,831 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from csacompendium.utils.abstractmodels import (
AuthUserDetail,
CreateUpdateTime,
)
from csacompendium.utils.createslug import create_slug
from csacompendium.utils.modelmanagers import (
model_instance_filter,
model_foreign_key_qs,
model_type_filter,
create_model_type,
)
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django.core.urlresolvers import reverse
class CsaTheme(AuthUserDetail, CreateUpdateTime):
"""
CSA theme model. Creates CSA theme entity.
"""
slug = models.SlugField(max_length=120, unique=True, blank=True)
csa_theme = models.CharField(max_length=80, unique=True, verbose_name='CSA theme')
def __unicode__(self):
return self.csa_theme
def __str__(self):
return self.csa_theme
def get_api_url(self):
"""
Get CSA theme URL as a reverse from model
:return: URL
:rtype: String
"""
return reverse('csa_practice_api:csa_theme_detail', kwargs={'slug': self.slug})
class Meta:
ordering = ['-time_created', '-last_update']
verbose_name_plural = 'CSA Practice Themes'
@property
def csa_practice_relation(self):
"""
Get related CSA practice
:return: Query result from the CSA practice model
:rtype: object/record
"""
instance = self
qs = CsaPractice.objects.filter_by_model_type(instance)
return qs
@receiver(pre_save, sender=CsaTheme)
def pre_save_csa_theme_receiver(sender, instance, *args, **kwargs):
"""
Create a slug before save.
:param sender: Signal sending object
:param instance: Object instance
:param args: Any other argument
:param kwargs: Keyword arguments
:return: None
:rtype: None
"""
if not instance.slug:
instance.slug = create_slug(instance, CsaTheme, instance.csa_theme)
class PracticeLevel(AuthUserDetail, CreateUpdateTime):
"""
CSA level of practice model. Creates CSA practice level entity.
"""
slug = models.SlugField(max_length=150, unique=True, blank=True)
practice_level = models.CharField(max_length=150, unique=True)
def __unicode__(self):
return self.practice_level
def __str__(self):
return self.practice_level
def get_api_url(self):
"""
Get CSA practice level URL as a reverse from model
:return: URL
:rtype: String
"""
return reverse('csa_practice_api:practice_level_detail', kwargs={'slug': self.slug})
class Meta:
ordering = ['-time_created', '-last_update']
verbose_name_plural = 'CSA Practice Levels'
@property
def csa_practice_relation(self):
"""
Get related CSA practice
:return: Query result from the CSA practice model
:rtype: object/record
"""
instance = self
qs = CsaPractice.objects.filter_by_model_type(instance)
return qs
@receiver(pre_save, sender=PracticeLevel)
def pre_save_practice_level_receiver(sender, instance, *args, **kwargs):
"""
Create a slug before save.
:param sender: Signal sending object
:param instance: Object instance
:param args: Any other argument
:param kwargs: Keyword arguments
:return: None
:rtype: None
"""
if not instance.slug:
instance.slug = create_slug(instance, PracticeLevel, instance.practice_level)
class PracticeType(AuthUserDetail, CreateUpdateTime):
"""
CSA practice type model. Creates CSA practice type entity.
"""
slug = models.SlugField(max_length=120, unique=True, blank=True)
practice_type = models.CharField(max_length=120, unique=True, verbose_name='Practice category')
def __unicode__(self):
return self.practice_type
def __str__(self):
return self.practice_type
def get_api_url(self):
"""
Get CSA practice type URL as a reverse from model
:return: URL
:rtype: String
"""
return reverse('csa_practice_api:practice_type_detail', kwargs={'slug': self.slug})
class Meta:
ordering = ['-time_created', '-last_update']
verbose_name_plural = 'CSA Practice Types'
@property
def csa_practice_relation(self):
"""
Get related CSA practice
:return: Query result from the CSA practice model
:rtype: object/record
"""
instance = self
qs = CsaPractice.objects.filter_by_model_type(instance)
return qs
@receiver(pre_save, sender=PracticeType)
def pre_save_practice_type_receiver(sender, instance, *args, **kwargs):
"""
Create a slug before save.
:param sender: Signal sending object
:param instance: Object instance
:param args: Any other argument
:param kwargs: Keyword arguments
:return: None
:rtype: None
"""
if not instance.slug:
instance.slug = create_slug(instance, PracticeType, instance.practice_type)
class CsaPracticeManager(models.Manager):
"""
CSA practice model manager
"""
def filter_by_model_type(self, instance):
"""
Query related objects/model type
:param instance: Object instance
:return: Matching object else none
:rtype: Object/record
"""
obj_qs = model_foreign_key_qs(instance, self, CsaPracticeManager)
if obj_qs.exists():
return model_type_filter(self, obj_qs, CsaPracticeManager)
class CsaPractice(AuthUserDetail, CreateUpdateTime):
"""
CSA practice model. Creates CSA practice entity.
"""
slug = models.SlugField(unique=True, blank=True)
practice_code = models.CharField(max_length=6, unique=True, help_text='User defined CSA practice code')
csatheme = models.ForeignKey(CsaTheme, on_delete=models.PROTECT, verbose_name='CSA theme')
practicelevel = models.ForeignKey(PracticeLevel, on_delete=models.PROTECT, verbose_name='Practice level')
sub_practice_level = models.TextField(blank=True, null=True)
sub_subpractice_level = models.TextField(blank=True, null=True)
definition = models.TextField(blank=True, null=True)
practicetype = models.ForeignKey(PracticeType, on_delete=models.PROTECT, verbose_name='Practice category')
objects = CsaPracticeManager()
def __unicode__(self):
return self.sub_practice_level
def __str__(self):
return self.sub_practice_level
def get_api_url(self):
"""
Get CSA practice URL as a reverse from model
:return: URL
:rtype: String
"""
return reverse('csa_practice_api:csa_practice_detail', kwargs={'slug': self.slug})
class Meta:
ordering = ['-time_created', '-last_update']
verbose_name_plural = 'CSA Practices'
@property
def research_csa_practice(self):
"""
Get related research CSA practice object/record
:return: Query result from the research CSA practice model
:rtype: object/record
"""
instance = self
qs = ResearchCsaPractice.objects.filter_by_model_type(instance)
return qs
@receiver(pre_save, sender=CsaPractice)
def pre_save_csa_practice_receiver(sender, instance, *args, **kwargs):
"""
Create a slug before save.
:param sender: Signal sending object
:param instance: Object instance
:param args: Any other argument
:param kwargs: Keyword arguments
:return: None
:rtype: None
"""
if not instance.slug:
instance.slug = create_slug(instance, CsaPractice, instance.practice_code)
class ResearchCsaPracticeManager(models.Manager):
"""
Research CSA practice model manager
"""
def filter_by_instance(self, instance):
"""
Query a related research CSA practice object/record from another model's object
:param instance: Object instance
:return: Query result from content type/model
:rtye: object/record
"""
return model_instance_filter(instance, self, ResearchCsaPracticeManager)
def filter_by_model_type(self, instance):
"""
Query related objects/model type
:param instance: Object instance
:return: Matching object else none
:rtype: Object/record
"""
obj_qs = model_foreign_key_qs(instance, self, ResearchCsaPracticeManager)
if obj_qs.exists():
return model_type_filter(self, obj_qs, ResearchCsaPracticeManager)
def create_by_model_type(self, model_type, pk, **kwargs):
"""
Create object by model type
:param model_type: Content/model type
:param pk: Primary key
:param kwargs: Fields to be created
:return: Data object
:rtype: Object
"""
return create_model_type(self, model_type, pk, slugify=False, **kwargs)
class ResearchCsaPractice(AuthUserDetail, CreateUpdateTime):
"""
Research CSA practice entry relationship model. A many to many bridge
table between research and other models
"""
limit = models.Q(app_label='research', model='research')
csapractice = models.ForeignKey(CsaPractice, on_delete=models.PROTECT, verbose_name='CSA practice')
content_type = models.ForeignKey(ContentType, on_delete=models.PROTECT, limit_choices_to=limit)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
objects = ResearchCsaPracticeManager()
class Meta:
ordering = ['-time_created', '-last_update']
verbose_name_plural = 'Research CSA Practices'
| nkoech/csacompendium | csacompendium/csa_practice/models.py | Python | mit | 9,828 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
import views
urlpatterns = patterns('',
url(r'^add_model_abox', 'tadaa.views.add_model_abox'),
url(r'^add_model', 'tadaa.views.add_model'),
url(r'^list_models', 'tadaa.views.list_models', name='list_models'),
url(r'^about', 'tadaa.views.about'),
url(r'^predict', 'tadaa.views.predict', name='predict'),
url(r'^list_predictions', 'tadaa.views.list_predictionruns', name='list_predictionruns'),
url(r'^list_memberships/([0-9]+)', 'tadaa.views.list_memberships'),
url(r'^get_classes', 'tadaa.views.get_classes'),
url(r'^online_entity_annotation', views.OnlineEntityAnnotation.as_view()),
url(r'^view_classes_stat', views.online_annotation_entity_stat),
url(r'^view_annotation_stat', views.online_annotation_annotation_stat),
url(r'^view_annotation', views.view_annotation),
url(r'^list_annotations', views.list_annotations),
url(r'^annotation_results', views.annotation_results),
url(r'^advanced_annotation', views.advance_annotation),
url(r'^do_type', views.do_type),
url(r'^annotation_stats', views.annotation_stats),
url(r'live_monitor', views.live_monitor),
url(r'^admin/', include(admin.site.urls)),
url(r'^home', 'tadaa.views.home'),
url('', 'tadaa.views.home'),
)
| ahmad88me/tada | tadacode/tadaa/urls.py | Python | mit | 1,340 |
# Copyright (c) 2012 Roberto Alsina y otros.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from docutils import nodes
from docutils.parsers.rst import Directive, directives
CODE = """\
<iframe width="{width}"
height="{height}"
src="http://www.youtube.com/embed/{yid}?rel=0&hd=1&wmode=transparent"
></iframe>"""
class Youtube(Directive):
""" Restructured text extension for inserting youtube embedded videos
Usage:
.. youtube:: lyViVmaBQDg
:height: 400
:width: 600
"""
has_content = True
required_arguments = 1
option_spec = {
"width": directives.positive_int,
"height": directives.positive_int,
}
def run(self):
self.check_content()
options = {
'yid': self.arguments[0],
'width': 425,
'height': 344,
}
options.update(self.options)
return [nodes.raw('', CODE.format(**options), format='html')]
def check_content(self):
if self.content:
raise self.warning("This directive does not accept content. The "
"'key=value' format for options is deprecated, "
"use ':key: value' instead")
directives.register_directive('youtube', Youtube)
| servalproject/nikola | nikola/plugins/compile_rest/youtube.py | Python | mit | 2,306 |
from django.db import models
from django.utils import timezone
class TimeStampedModel(models.Model):
created_at = models.DateTimeField(default=timezone.now)
updated_at = models.DateTimeField(default=timezone.now)
class Meta:
abstract = True | HoangNguyenHuy/SocialNetwork | src/SocialNetwork_API/models/timestamped.py | Python | mit | 262 |
import os
import sys
import pandas as pd
import numpy as np
from numpy.random import poisson, uniform
from numpy import mean
import time
import math
po = True
teamsheetpath = sys.path[0] + '/teamcsvs/'
compstat = {'TDF': 'TDA', 'TDA': 'TDF', #Dictionary to use to compare team stats with opponent stats
'FGF': 'FGA', 'FGA': 'FGF',
'SFF': 'SFA', 'SFA': 'SFF',
'PAT1%F': 'PAT1%A', 'PAT1%A': 'PAT1%F',
'PAT2%F': 'PAT2%A', 'PAT2%A': 'PAT2%F'}
def get_opponent_stats(opponent): #Gets summaries of statistics for opponent each week
opponent_stats = {}
global teamsheetpath
opp_stats = pd.DataFrame.from_csv(teamsheetpath + opponent + '.csv')
for stat in opp_stats.columns:
if stat in ['TDF', 'FGF', 'SFF', 'TDA', 'FGA', 'SFA']:
opponent_stats.update({stat: opp_stats[stat].mean()})
try:
opponent_stats.update({'PAT1%F': float(opp_stats['PAT1FS'].sum()) / opp_stats['PAT1FA'].sum()})
except ZeroDivisionError:
opponent_stats.update({'PAT1%F': .99})
try:
opponent_stats.update({'PAT2%F': float(opp_stats['PAT2FS'].sum()) / opp_stats['PAT2FA'].sum()})
except ZeroDivisionError:
opponent_stats.update({'PAT2%F': .5})
try:
opponent_stats.update({'PAT1%A': float(opp_stats['PAT1AS'].sum()) / opp_stats['PAT1AA'].sum()})
except ZeroDivisionError:
opponent_stats.update({'PAT1%A': .99})
try:
opponent_stats.update({'PAT2%A': float(opp_stats['PAT2AS'].sum()) / opp_stats['PAT2AA'].sum()})
except ZeroDivisionError:
opponent_stats.update({'PAT2%A': .5})
return opponent_stats
def get_residual_performance(team): #Get how each team has done compared to the average performance of their opponents
global teamsheetpath
score_df = pd.DataFrame.from_csv(teamsheetpath + team + '.csv')
residual_stats = {}
score_df['PAT1%F'] = np.nan
score_df['PAT2%F'] = np.nan
score_df['PAT1%A'] = np.nan
score_df['PAT2%A'] = np.nan
for week in score_df.index:
try:
score_df['PAT1%F'][week] = float(score_df['PAT1FS'][week]) / score_df['PAT1FA'][week]
except ZeroDivisionError:
score_df['PAT1%F'][week] = 0.99
#print ('For: ' + str(score_df['PAT1%F'][week]))
try:
score_df['PAT2%F'][week] = float(score_df['PAT2FS'][week]) / score_df['PAT2FA'][week]
except ZeroDivisionError:
score_df['PAT2%F'][week] = 0.5
try:
score_df['PAT1%A'][week] = float(score_df['PAT1AS'][week]) / score_df['PAT1AA'][week]
except ZeroDivisionError:
score_df['PAT1%A'][week] = 0.99
#print ('Against: ' + str(score_df['PAT1%F'][week]))
try:
score_df['PAT2%A'][week] = float(score_df['PAT2AS'][week]) / score_df['PAT2AA'][week]
except ZeroDivisionError:
score_df['PAT2%A'][week] = 0.5
opponent_stats = get_opponent_stats(score_df['OPP'][week])
for stat in opponent_stats:
if week == 1:
score_df['OPP_' + stat] = np.nan
score_df['OPP_' + stat][week] = opponent_stats[stat]
for stat in opponent_stats:
score_df['R_' + stat] = score_df[stat] - score_df['OPP_' + compstat[stat]]
if stat in ['TDF', 'FGF', 'SFF', 'TDA', 'FGA', 'SFA']:
residual_stats.update({stat: score_df['R_' + stat].mean()})
elif stat == 'PAT1%F':
residual_stats.update({stat: (score_df['R_PAT1%F'].multiply(score_df['PAT1FA'])).sum() / score_df['PAT1FA'].sum()})
elif stat == 'PAT2%F':
residual_stats.update({stat: (score_df['R_PAT2%F'].multiply(score_df['PAT2FA'])).sum() / score_df['PAT2FA'].sum()})
elif stat == 'PAT1%A':
residual_stats.update({stat: (score_df['R_PAT1%A'].multiply(score_df['PAT1AA'])).sum() / score_df['PAT1AA'].sum()})
elif stat == 'PAT2%A':
residual_stats.update({stat: (score_df['R_PAT2%A'].multiply(score_df['PAT2AA'])).sum() / score_df['PAT2AA'].sum()})
try:
residual_stats.update({'GOFOR2': float(score_df['PAT2FA'].sum()) / score_df['TDF'].sum()})
except ZeroDivisionError:
residual_stats.update({'GOFOR2': .1})
#print team
#print residual_stats
return residual_stats
def get_score(expected_scores): #Get the score for a team based on expected scores
score = 0
if expected_scores['TD'] > 0:
tds = poisson(expected_scores['TD'])
else:
tds = poisson(0.01)
score = score + 6 * tds
if expected_scores['FG'] > 0:
fgs = poisson(expected_scores['FG'])
else:
fgs = poisson(0.01)
score = score + 3 * fgs
if expected_scores['S'] > 0:
sfs = poisson(expected_scores['S'])
else:
sfs = poisson(0.01)
score = score + 2 * sfs
for td in range(tds):
go_for_2_determinant = uniform(0, 1)
if go_for_2_determinant <= expected_scores['GOFOR2']: #Going for 2
successful_pat_determinant = uniform(0, 1)
if successful_pat_determinant <= expected_scores['PAT2PROB']:
score = score + 2
else:
continue
else: #Going for 1
#print(expected_scores['PAT1PROB'])
successful_pat_determinant = uniform(0, 1)
if successful_pat_determinant <= expected_scores['PAT1PROB']:
score = score + 1
else:
continue
return score
def game(team_1, team_2,
expected_scores_1, expected_scores_2,
playoff): #Get two scores and determine a winner
score_1 = get_score(expected_scores_1)
score_2 = get_score(expected_scores_2)
if score_1 > score_2:
win_1 = 1
win_2 = 0
draw_1 = 0
draw_2 = 0
elif score_2 > score_1:
win_1 = 0
win_2 = 1
draw_1 = 0
draw_2 = 0
else:
if playoff:
win_1 = 0.5
win_2 = 0.5
draw_1 = 0
draw_2 = 0
else:
win_1 = 0
win_2 = 0
draw_1 = 1
draw_2 = 1
summary = {team_1: [win_1, draw_1, score_1]}
summary.update({team_2: [win_2, draw_2, score_2]})
return summary
def get_expected_scores(team_1_stats, team_2_stats, team_1_df, team_2_df): #Get the expected scores for a matchup based on the previous teams' performances
expected_scores = {}
for stat in team_1_stats:
expected_scores.update({'TD': mean([team_1_stats['TDF'] + team_2_df['TDA'].mean(),
team_2_stats['TDA'] + team_1_df['TDF'].mean()])})
expected_scores.update({'FG': mean([team_1_stats['FGF'] + team_2_df['FGA'].mean(),
team_2_stats['FGA'] + team_1_df['FGF'].mean()])})
expected_scores.update({'S': mean([team_1_stats['SFF'] + team_2_df['SFA'].mean(),
team_2_stats['SFA'] + team_1_df['SFF'].mean()])})
#print mean([team_1_stats['PAT1%F'] + team_2_df['PAT1AS'].astype('float').sum() / team_2_df['PAT1AA'].sum(),
# team_2_stats['PAT1%A'] + team_1_df['PAT1FS'].astype('float').sum() / team_1_df['PAT1FA'].sum()])
expected_scores.update({'GOFOR2': team_1_stats['GOFOR2']})
pat1prob = mean([team_1_stats['PAT1%F'] + team_2_df['PAT1AS'].astype('float').sum() / team_2_df['PAT1AA'].sum(),
team_2_stats['PAT1%A'] + team_1_df['PAT1FS'].astype('float').sum() / team_1_df['PAT1FA'].sum()])
if not math.isnan(pat1prob):
expected_scores.update({'PAT1PROB': pat1prob})
else:
expected_scores.update({'PAT1PROB': 0.99})
#print(expected_scores['PAT1PROB'])
pat2prob = mean([team_1_stats['PAT2%F'] + team_2_df['PAT2AS'].astype('float').sum() / team_2_df['PAT2AA'].sum(),
team_2_stats['PAT2%A'] + team_1_df['PAT2FS'].astype('float').sum() / team_1_df['PAT2FA'].sum()])
if not math.isnan(pat2prob):
expected_scores.update({'PAT2PROB': pat2prob})
else:
expected_scores.update({'PAT2PROB': 0.5})
#print(expected_scores)
return expected_scores
def matchup(team_1, team_2):
ts = time.time()
team_1_season = pd.DataFrame.from_csv(teamsheetpath + team_1 + '.csv')
team_2_season = pd.DataFrame.from_csv(teamsheetpath + team_2 + '.csv')
stats_1 = get_residual_performance(team_1)
stats_2 = get_residual_performance(team_2)
expected_scores_1 = get_expected_scores(stats_1, stats_2, team_1_season, team_2_season)
expected_scores_2 = get_expected_scores(stats_2, stats_1, team_2_season, team_1_season)
team_1_wins = 0
team_2_wins = 0
team_1_draws = 0
team_2_draws = 0
team_1_scores = []
team_2_scores = []
i = 0
error = 1
while error > 0.000001 or i < 5000000: #Run until convergence after 5 million iterations
summary = game(team_1, team_2,
expected_scores_1, expected_scores_2,
po)
team_1_prev_wins = team_1_wins
team_1_wins += summary[team_1][0]
team_2_wins += summary[team_2][0]
team_1_draws += summary[team_1][1]
team_2_draws += summary[team_2][1]
team_1_scores.append(summary[team_1][2])
team_2_scores.append(summary[team_2][2])
team_1_prob = float(team_1_wins) / len(team_1_scores)
team_2_prob = float(team_2_wins) / len(team_2_scores)
if i > 0:
team_1_prev_prob = float(team_1_prev_wins) / i
error = team_1_prob - team_1_prev_prob
i = i + 1
if i == 5000000:
print('Probability converged within 5 million iterations')
else:
print('Probability converged after ' + str(i) + ' iterations')
games = pd.DataFrame.from_items([(team_1, team_1_scores), (team_2, team_2_scores)])
summaries = games.describe(percentiles = [0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975])
output = {'ProbWin': {team_1: team_1_prob, team_2: team_2_prob}, 'Scores': summaries}
print(team_1 + '/' + team_2 + ' score distributions computed in ' + str(round(time.time() - ts, 1)) + ' seconds')
return output | JoeJimFlood/NFLPrediction2014 | matchup.py | Python | mit | 10,272 |
#!/usr/bin/env python
from cogent.app.util import CommandLineApplication,\
CommandLineAppResult, ResultPath
from cogent.app.parameters import Parameter,ValuedParameter,Parameters
__author__ = "Shandy Wikman"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__contributors__ = ["Shandy Wikman"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Shandy Wikman"
__email__ = "ens01svn@cs.umu.se"
__status__ = "Development"
class ILM(CommandLineApplication):
"""Application controller ILM application
Predict a secondary structure given a score matrix
Main options:
-L l: minimum loop length (default=3)
-V v: minimum virtual loop length (default=3)
-H h: minimum helix length (default=3)
-N n: number of helices selected per iteration (default=1)
-I i: number of iterations before termination(default=unlimited)
"""
_parameters = {
'-L':ValuedParameter(Prefix='-',Name='L',Delimiter=' '),
'-V':ValuedParameter(Prefix='-',Name='V',Delimiter=' '),
'-H':ValuedParameter(Prefix='-',Name='H',Delimiter=' '),
'-N':ValuedParameter(Prefix='-',Name='N',Delimiter=' '),
'-I':ValuedParameter(Prefix='-',Name='I',Delimiter=' ')}
_command = 'ilm'
_input_handler = '_input_as_string'
class hlxplot(CommandLineApplication):
"""Application controller hlxplot application
Compute a helix plot score matrix from a sequence alignment
Options:
-b B: Set bad pair penalty to B
(Default = 2)
-g G: Set good pair score to G
(Default = 1)
-h H: Set minimum helix length to H
(Default = 2)
-l L: Set minimum loop length to L
(Default = 3)
-s S: Set helix length score to S
(Default = 2.0)
-t : Write output in text format
(Default = Binary format)
-x X: Set paired gap penalty to X
(Default = 3)
"""
_parameters = {
'-b':ValuedParameter(Prefix='-',Name='b',Delimiter=' '),
'-g':ValuedParameter(Prefix='-',Name='g',Delimiter=' '),
'-h':ValuedParameter(Prefix='-',Name='h',Delimiter=' '),
'-l':ValuedParameter(Prefix='-',Name='l',Delimiter=' '),
'-s':ValuedParameter(Prefix='-',Name='s',Delimiter=' '),
'-t':ValuedParameter(Prefix='-',Name='t',Delimiter=' '),
'-x':ValuedParameter(Prefix='-',Name='x',Delimiter=' ')}
_command = 'hlxplot'
_input_handler = '_input_as_string'
class xhlxplot(CommandLineApplication):
"""Application controller xhlxplot application
Compute an extended helix plot score matrix from a single sequence
Options:
-b B: Set bad pair penalty to B
(Default = 200)
-h H: Set minimum helix length to H
(Default = 2)
-l L: Set minimum loop length to L
(Default = 3)
-x X: Set paired gap penalty to X
(Default = 500)
-t : Write output in text format
(Default = Binary format)
-c : No Closing GU
(Default = allows closing GU)
"""
_parameters = {
'-b':ValuedParameter(Prefix='-',Name='b',Delimiter=' '),
'-h':ValuedParameter(Prefix='-',Name='h',Delimiter=' '),
'-l':ValuedParameter(Prefix='-',Name='l',Delimiter=' '),
'-x':ValuedParameter(Prefix='-',Name='x',Delimiter=' '),
'-t':ValuedParameter(Prefix='-',Name='t',Delimiter=' '),
'-c':ValuedParameter(Prefix='-',Name='c',Delimiter=' ')}
_command = 'xhlxplot'
_input_handler = '_input_as_string'
| sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/cogent/app/ilm.py | Python | mit | 3,567 |
#!/usr/bin/env python
""" Assignment 1, Exercise 3, INF1340, Fall, 2015. Troubleshooting Car Issues.
This module contains one function diagnose_car(). It is an expert system to
interactive diagnose car issues.
"""
__author__ = 'Susan Sim'
__email__ = "ses@drsusansim.org"
__copyright__ = "2015 Susan Sim"
__license__ = "MIT License"
"""
"""
# Interactively queries the user with yes/no questions to identify a possible issue with a car.
# Inputs: As is but not nested - same indentation all the way through
# Expected Outputs: To follow the decision logic of the question tree
# Errors: Did not proceed according to logic. fixed by nesting properly
"""
"""
def diagnose_car():
silent = raw_input("Is the car silent when you turn the key? ")
#this begins the line of questions on the left side of the question tree
if silent == 'Y':
corroded = raw_input("Are the battery terminals corroded?")
if corroded == 'Y':
print "Clean terminals and try starting again."
elif corroded == 'N':
print "Replace cables and try again."
elif silent == 'N':
#this begins the line of questions on the right side of the question tree
clicking = raw_input("Does the car make a clicking noise?")
if clicking == 'Y':
print "Replace the battery."
elif clicking == 'N':
crank = raw_input("Does the car crank up but fails to start?")
if crank == 'Y':
print "Check spark plug connections."
elif crank == 'N':
start_and_die = raw_input("Does the engine start and then die?")
if start_and_die == 'Y':
fuel_injection = raw_input("Does your car have fuel injection?")
if fuel_injection == 'N':
print "Check to ensure the choke is opening and closing."
elif fuel_injection == 'Y':
print "Get it in for service."
elif start_and_die == 'N':
print "Engine is not getting enough fuel. Clean fuel pump."
diagnose_car()
| SLiana/inf1340_2015_asst1 | exercise3.py | Python | mit | 2,130 |
"""
Disjoint set.
Reference: https://en.wikipedia.org/wiki/Disjoint-set_data_structure
"""
class Node:
def __init__(self, data: int) -> None:
self.data = data
self.rank: int
self.parent: Node
def make_set(x: Node) -> None:
"""
Make x as a set.
"""
# rank is the distance from x to its' parent
# root's rank is 0
x.rank = 0
x.parent = x
def union_set(x: Node, y: Node) -> None:
"""
Union of two sets.
set with bigger rank should be parent, so that the
disjoint set tree will be more flat.
"""
x, y = find_set(x), find_set(y)
if x == y:
return
elif x.rank > y.rank:
y.parent = x
else:
x.parent = y
if x.rank == y.rank:
y.rank += 1
def find_set(x: Node) -> Node:
"""
Return the parent of x
"""
if x != x.parent:
x.parent = find_set(x.parent)
return x.parent
def find_python_set(node: Node) -> set:
"""
Return a Python Standard Library set that contains i.
"""
sets = ({0, 1, 2}, {3, 4, 5})
for s in sets:
if node.data in s:
return s
raise ValueError(f"{node.data} is not in {sets}")
def test_disjoint_set() -> None:
"""
>>> test_disjoint_set()
"""
vertex = [Node(i) for i in range(6)]
for v in vertex:
make_set(v)
union_set(vertex[0], vertex[1])
union_set(vertex[1], vertex[2])
union_set(vertex[3], vertex[4])
union_set(vertex[3], vertex[5])
for node0 in vertex:
for node1 in vertex:
if find_python_set(node0).isdisjoint(find_python_set(node1)):
assert find_set(node0) != find_set(node1)
else:
assert find_set(node0) == find_set(node1)
if __name__ == "__main__":
test_disjoint_set()
| TheAlgorithms/Python | data_structures/disjoint_set/disjoint_set.py | Python | mit | 1,913 |
#pylint: disable=C0301, C0103, W0212, W0401
"""
.. module:: pilot
:platform: Unix
:synopsis: RADICAL-Pilot is a distributed Pilot-Job framework.
.. moduleauthor:: Ole Weidner <ole.weidner@rutgers.edu>
"""
__copyright__ = "Copyright 2013-2014, http://radical.rutgers.edu"
__license__ = "MIT"
# ------------------------------------------------------------------------------
# Scheduler name constant
from types import *
from states import *
from logentry import *
from scheduler import *
# ------------------------------------------------------------------------------
#
from url import Url
from exceptions import *
from session import Session
from context import Context
from unit_manager import UnitManager
from compute_unit import ComputeUnit
from compute_unit_description import ComputeUnitDescription
from pilot_manager import PilotManager
from compute_pilot import ComputePilot
from compute_pilot_description import ComputePilotDescription
from resource_config import ResourceConfig
from staging_directives import COPY, LINK, MOVE, TRANSFER, SKIP_FAILED, CREATE_PARENTS
# ------------------------------------------------------------------------------
#
from utils.logger import logger
import os
import radical.utils as ru
import radical.utils.logger as rul
pwd = os.path.dirname (__file__)
root = "%s/.." % pwd
version, version_detail, version_branch, sdist_name, sdist_path = ru.get_version ([root, pwd])
# FIXME: the logger init will require a 'classical' ini based config, which is
# different from the json based config we use now. May need updating once the
# radical configuration system has changed to json
_logger = rul.logger.getLogger ('radical.pilot')
_logger.info ('radical.pilot version: %s' % version_detail)
# ------------------------------------------------------------------------------
| JensTimmerman/radical.pilot | src/radical/pilot/__init__.py | Python | mit | 1,882 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# based on ideas in https://github.com/lethienhoa/Very-Deep-Convolutional-Networks-for-Natural-Language-Processing/blob/master/train.py
import tensorflow as tf
from vdcnn import VDCNN
import numpy as np
import os
import time
import datetime
import cPickle as pkl
import tables
# Parameters
# ==================================================
# Model Hyperparameters
tf.flags.DEFINE_float("dropout_keep_prob", 0.5, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda", 0.0, "L2 regularizaion lambda (default: 0.0)")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 400, "Batch Size (default: 128)")
tf.flags.DEFINE_integer("num_epochs", 20, "Number of training epochs (default: 200)")
tf.flags.DEFINE_integer("evaluate_every", 5000, "Evaluate model on dev set after this many steps (default: 100)")
tf.flags.DEFINE_integer("checkpoint_every", 1000, "Save model after this many steps (default: 100)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
# ===================== Preparation des données =============================
# Load data
print("Loading data...")
alphabet = "abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{} "
sequence_max_length = 1024
# shuffeling data for training
# Training
# ==================================================
# ----------------- Phase de construction du graphe -------------------------------
# Input data.
with tf.Graph().as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
cnn = VDCNN()
# Ensures that we execute the update_ops before performing the train
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(1e-3)
grads_and_vars = optimizer.compute_gradients(cnn.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
# Initialize all variables
print("START %s" % datetime.datetime.now())
sess.run(tf.initialize_all_variables())
saver = tf.train.Saver()
print('Initialized')
batch_size = FLAGS.batch_size
epochs = FLAGS.num_epochs
hdf5_path = "my_extendable_compressed_data_train.hdf5"
for e in range(epochs):
extendable_hdf5_file = tables.open_file(hdf5_path, mode='r')
for ptr in range(0, 500000, batch_size):
#print(ptr)
feed_dict = {
cnn.input_x: extendable_hdf5_file.root.data[ptr: ptr+batch_size],
cnn.input_y:extendable_hdf5_file.root.clusters[ptr: ptr+batch_size] ,
cnn.is_training: True } # Update moving_mean, moving_var }
sess.run(train_op,feed_dict)
time_str = datetime.datetime.now().isoformat()
if e % 1 == 0:
step ,loss, accuracy = sess.run([global_step, cnn.loss, cnn.accuracy],feed_dict)
save_path = saver.save(sess, "model_vdcnn_full_dataset.ckpt")
print("model saved in file: %s" % save_path)
print("{}: epoch {}, loss {}, acc {}".format(time_str,e, loss, accuracy))
print("epoch %d:" % e)
extendable_hdf5_file.close()
print("END %s" % str(datetime.datetime.now()))
| hyperlex/vdcnn | train.py | Python | mit | 3,788 |
"""Use stochastic Lanczos quadrature to approximate spectral function sums of
any operator which has an efficient representation of action on a vector.
"""
import functools
from math import sqrt, log2, exp, inf, nan
import random
import warnings
import numpy as np
import scipy.linalg as scla
from scipy.ndimage.filters import uniform_filter1d
from ..core import ptr, prod, vdot, njit, dot, subtract_update_, divide_update_
from ..utils import int2tup, find_library, raise_cant_find_library_function
from ..gen.rand import randn, rand_rademacher, rand_phase, seed_rand
from ..linalg.mpi_launcher import get_mpi_pool
if find_library('opt_einsum') and find_library('autoray'):
from ..tensor.tensor_core import Tensor
from ..tensor.tensor_1d import MatrixProductOperator
from ..tensor.tensor_approx_spectral import construct_lanczos_tridiag_MPO
else:
reqs = '[opt_einsum,autoray]'
Tensor = raise_cant_find_library_function(reqs)
construct_lanczos_tridiag_MPO = raise_cant_find_library_function(reqs)
# --------------------------------------------------------------------------- #
# 'Lazy' representation tensor contractions #
# --------------------------------------------------------------------------- #
def lazy_ptr_linop(psi_ab, dims, sysa, **linop_opts):
r"""A linear operator representing action of partially tracing a bipartite
state, then multiplying another 'unipartite' state::
( | )
+-------+
| psi_a | ______
+_______+ / \
a| |b |
+-------------+ |
| psi_ab.H | |
+_____________+ |
|
+-------------+ |
| psi_ab | |
+_____________+ |
a| |b |
| \______/
Parameters
----------
psi_ab : ket
State to partially trace and dot with another ket, with
size ``prod(dims)``.
dims : sequence of int, optional
The sub dimensions of ``psi_ab``.
sysa : int or sequence of int, optional
Index(es) of the 'a' subsystem(s) to keep.
"""
sysa = int2tup(sysa)
Kab = Tensor(np.asarray(psi_ab).reshape(dims),
inds=[('kA{}' if i in sysa else 'xB{}').format(i)
for i in range(len(dims))])
Bab = Tensor(Kab.data.conjugate(),
inds=[('bA{}' if i in sysa else 'xB{}').format(i)
for i in range(len(dims))])
return (Kab & Bab).aslinearoperator(
[f'kA{i}' for i in sysa],
[f'bA{i}' for i in sysa],
**linop_opts
)
def lazy_ptr_ppt_linop(psi_abc, dims, sysa, sysb, **linop_opts):
r"""A linear operator representing action of partially tracing a tripartite
state, partially transposing the remaining bipartite state, then
multiplying another bipartite state::
( | )
+--------------+
| psi_ab |
+______________+ _____
a| ____ b| / \
| / a\ | |c |
| | +-------------+ |
| | | psi_abc.H | |
\ / +-------------+ |
X |
/ \ +-------------+ |
| | | psi_abc | |
| | +-------------+ |
| \____/a |b |c |
a| | \_____/
Parameters
----------
psi_abc : ket
State to partially trace, partially transpose, then dot with another
ket, with size ``prod(dims)``.
``prod(dims[sysa] + dims[sysb])``.
dims : sequence of int
The sub dimensions of ``psi_abc``.
sysa : int or sequence of int, optional
Index(es) of the 'a' subsystem(s) to keep, with respect to all
the dimensions, ``dims``, (i.e. pre-partial trace).
sysa : int or sequence of int, optional
Index(es) of the 'b' subsystem(s) to keep, with respect to all
the dimensions, ``dims``, (i.e. pre-partial trace).
"""
sysa, sysb = int2tup(sysa), int2tup(sysb)
sys_ab = sorted(sysa + sysb)
Kabc = Tensor(np.asarray(psi_abc).reshape(dims),
inds=[('kA{}' if i in sysa else 'kB{}' if i in sysb else
'xC{}').format(i) for i in range(len(dims))])
Babc = Tensor(Kabc.data.conjugate(),
inds=[('bA{}' if i in sysa else 'bB{}' if i in sysb else
'xC{}').format(i) for i in range(len(dims))])
return (Kabc & Babc).aslinearoperator(
[('bA{}' if i in sysa else 'kB{}').format(i) for i in sys_ab],
[('kA{}' if i in sysa else 'bB{}').format(i) for i in sys_ab],
**linop_opts
)
# --------------------------------------------------------------------------- #
# Lanczos tri-diag technique #
# --------------------------------------------------------------------------- #
def inner(a, b):
"""Inner product between two vectors
"""
return vdot(a, b).real
def norm_fro(a):
"""'Frobenius' norm of a vector.
"""
return sqrt(inner(a, a))
def norm_fro_approx(A, **kwargs):
r"""Calculate the approximate frobenius norm of any hermitian linear
operator:
.. math::
\mathrm{Tr} \left[ A^{\dagger} A \right]
Parameters
----------
A : linear operator like
Operator with a dot method, assumed to be hermitian, to estimate the
frobenius norm of.
kwargs
Supplied to :func:`approx_spectral_function`.
Returns
-------
float
"""
return approx_spectral_function(A, lambda x: x**2, **kwargs)**0.5
def random_rect(shape, dist='rademacher', orthog=False, norm=True,
seed=False, dtype=complex):
"""Generate a random array optionally orthogonal.
Parameters
----------
shape : tuple of int
The shape of array.
dist : {'guassian', 'rademacher'}
Distribution of the random variables.
orthog : bool or operator.
Orthogonalize the columns if more than one.
norm : bool
Explicitly normalize the frobenius norm to 1.
"""
if seed:
# needs to be truly random so e.g. MPI processes don't overlap
seed_rand(random.SystemRandom().randint(0, 2**32 - 1))
if dist == 'rademacher':
V = rand_rademacher(shape, scale=1 / sqrt(prod(shape)), dtype=dtype)
# already normalized
elif dist == 'gaussian':
V = randn(shape, scale=1 / (prod(shape)**0.5 * 2**0.5), dtype=dtype)
if norm:
V /= norm_fro(V)
elif dist == 'phase':
V = rand_phase(shape, scale=1 / sqrt(prod(shape)), dtype=dtype)
# already normalized
else:
raise ValueError(f"`dist={dist}` not understood.")
if orthog and min(shape) > 1:
V = scla.orth(V)
V /= sqrt(min(V.shape))
return V
def construct_lanczos_tridiag(A, K, v0=None, bsz=1, k_min=10, orthog=False,
beta_tol=1e-6, seed=False, v0_opts=None):
"""Construct the tridiagonal lanczos matrix using only matvec operators.
This is a generator that iteratively yields the alpha and beta digaonals
at each step.
Parameters
----------
A : dense array, sparse matrix or linear operator
The operator to approximate, must implement ``.dot`` method to compute
its action on a vector.
K : int, optional
The maximum number of iterations and thus rank of the matrix to find.
v0 : vector, optional
The starting vector to iterate with, default to random.
bsz : int, optional
The block size (number of columns) of random vectors to iterate with.
k_min : int, optional
The minimum size of the krylov subspace for form.
orthog : bool, optional
If True, perform full re-orthogonalization for each new vector.
beta_tol : float, optional
The 'breakdown' tolerance. If the next beta ceofficient in the lanczos
matrix is less that this, implying that the full non-null space has
been found, terminate early.
seed : bool, optional
If True, seed the numpy random generator with a system random int.
Yields
------
alpha : sequence of float of length k
The diagonal entries of the lanczos matrix.
beta : sequence of float of length k
The off-diagonal entries of the lanczos matrix, with the last entry
the 'look' forward value.
scaling : float
How to scale the overall weights.
"""
d = A.shape[0]
if bsz == 1:
v_shp = (d,)
else:
orthog = False
v_shp = (d, bsz)
alpha = np.zeros(K + 1, dtype=get_equivalent_real_dtype(A.dtype))
beta = np.zeros(K + 2, dtype=get_equivalent_real_dtype(A.dtype))
beta[1] = sqrt(prod(v_shp)) # by construction
if v0 is None:
if v0_opts is None:
v0_opts = {}
q = random_rect(v_shp, seed=seed, dtype=A.dtype, **v0_opts)
else:
q = v0.astype(A.dtype)
divide_update_(q, norm_fro(q), q)
v = np.zeros_like(q)
if orthog:
Q = np.copy(q).reshape(-1, 1)
for j in range(1, K + 1):
r = dot(A, q)
subtract_update_(r, beta[j], v)
alpha[j] = inner(q, r)
subtract_update_(r, alpha[j], q)
# perform full orthogonalization
if orthog:
r -= Q.dot(Q.conj().T.dot(r))
beta[j + 1] = norm_fro(r)
# check for convergence
if abs(beta[j + 1]) < beta_tol:
yield alpha[1:j + 1].copy(), beta[2:j + 2].copy(), beta[1]**2 / bsz
break
v[()] = q
divide_update_(r, beta[j + 1], q)
# keep all vectors
if orthog:
Q = np.concatenate((Q, q.reshape(-1, 1)), axis=1)
if j >= k_min:
yield alpha[1:j + 1].copy(), beta[2:j + 2].copy(), beta[1]**2 / bsz
def lanczos_tridiag_eig(alpha, beta, check_finite=True):
"""Find the eigen-values and -vectors of the Lanczos triadiagonal matrix.
Parameters
----------
alpha : array of float
The diagonal.
beta : array of float
The k={-1, 1} off-diagonal. Only first ``len(alpha) - 1`` entries used.
"""
Tk_banded = np.empty((2, alpha.size), dtype=alpha.dtype)
Tk_banded[1, -1] = 0.0 # sometimes can get nan here? -> breaks eig_banded
Tk_banded[0, :] = alpha
Tk_banded[1, :beta.size] = beta
try:
tl, tv = scla.eig_banded(
Tk_banded, lower=True, check_finite=check_finite)
# sometimes get no convergence -> use dense hermitian method
except scla.LinAlgError: # pragma: no cover
tl, tv = np.linalg.eigh(
np.diag(alpha) + np.diag(beta[:alpha.size - 1], -1), UPLO='L')
return tl, tv
def calc_trace_fn_tridiag(tl, tv, f, pos=True):
"""Spectral ritz function sum, weighted by ritz vectors.
"""
return sum(
tv[0, i]**2 * f(max(tl[i], 0.0) if pos else tl[i])
for i in range(tl.size)
)
@njit
def ext_per_trim(x, p=0.6, s=1.0): # pragma: no cover
r"""Extended percentile trimmed-mean. Makes the mean robust to asymmetric
outliers, while using all data when it is nicely clustered. This can be
visualized roughly as::
|--------|=========|--------|
x x xx xx xxxxx xxx xx x x x
Where the inner range contains the central ``p`` proportion of the data,
and the outer ranges entends this by a factor of ``s`` either side.
Parameters
----------
x : array
Data to trim.
p : Proportion of data used to define the 'central' percentile.
For example, p=0.5 gives the inter-quartile range.
s : Include data up to this factor times the central 'percentile' range
away from the central percentile itself.
Returns
xt : array
Trimmed data.
"""
lb = np.percentile(x, 100 * (1 - p) / 2)
ub = np.percentile(x, 100 * (1 + p) / 2)
ib = ub - lb
trimmed_x = x[(lb - s * ib < x) & (x < ub + s * ib)]
return trimmed_x
@njit # pragma: no cover
def nbsum(xs):
tot = 0
for x in xs:
tot += x
return tot
@njit # pragma: no cover
def std(xs):
"""Simple standard deviation - don't invoke numpy for small lists.
"""
N = len(xs)
xm = nbsum(xs) / N
var = nbsum([(x - xm)**2 for x in xs]) / N
return var**0.5
def calc_est_fit(estimates, conv_n, tau):
"""Make estimate by fitting exponential convergence to estimates.
"""
n = len(estimates)
if n < conv_n:
return nan, inf
# iteration number, fit function to inverse this to get k->infinity
ks = np.arange(1, len(estimates) + 1)
# smooth data with a running mean
smoothed_estimates = uniform_filter1d(estimates, n // 2)
# ignore this amount of the initial estimates and fit later part only
ni = n // 2
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# fit the inverse data with a line, weighting recent ests more
popt, pcov = np.polyfit(x=(1 / ks[ni:]),
y=smoothed_estimates[ni:],
w=ks[ni:], deg=1, cov=True)
# estimate of function at 1 / k = 0 and standard error
est, err = popt[-1], abs(pcov[-1, -1])**0.5
except (ValueError, RuntimeError):
est, err = nan, inf
return est, err
def calc_est_window(estimates, mean_ests, conv_n):
"""Make estimate from mean of last ``m`` samples, following:
1. Take between ``conv_n`` and 12 estimates.
2. Pair the estimates as they are alternate upper/lower bounds
3. Compute the standard error on the paired estimates.
"""
m_est = min(max(conv_n, len(estimates) // 8), 12)
est = sum(estimates[-m_est:]) / len(estimates[-m_est:])
mean_ests.append(est)
if len(estimates) > conv_n:
# check for convergence using variance of paired last m estimates
# -> paired because estimates alternate between upper and lower bound
paired_ests = [
(a + b) / 2 for a, b in
zip(estimates[-m_est::2], estimates[-m_est + 1::2])
]
err = std(paired_ests) / (m_est / 2) ** 0.5
else:
err = inf
return est, err
def single_random_estimate(A, K, bsz, beta_tol, v0, f, pos, tau, tol_scale,
k_min=10, verbosity=0, *, seed=None,
v0_opts=None, **lanczos_opts):
# choose normal (any LinearOperator) or MPO lanczos tridiag construction
if isinstance(A, MatrixProductOperator):
lanc_fn = construct_lanczos_tridiag_MPO
else:
lanc_fn = construct_lanczos_tridiag
lanczos_opts['bsz'] = bsz
estimates = []
mean_ests = []
# the number of samples to check standard deviation convergence with
conv_n = 6 # 3 pairs
# iteratively build the lanczos matrix, checking for convergence
for alpha, beta, scaling in lanc_fn(
A, K=K, beta_tol=beta_tol, seed=seed, k_min=k_min - 2 * conv_n,
v0=v0() if callable(v0) else v0, v0_opts=v0_opts, **lanczos_opts):
try:
Tl, Tv = lanczos_tridiag_eig(alpha, beta, check_finite=False)
Gf = scaling * calc_trace_fn_tridiag(Tl, Tv, f=f, pos=pos)
except scla.LinAlgError: # pragma: no cover
warnings.warn("Approx Spectral Gf tri-eig didn't converge.")
estimates.append(np.nan)
continue
k = alpha.size
estimates.append(Gf)
# check for break-down convergence (e.g. found entire subspace)
# in which case latest estimate should be accurate
if abs(beta[-1]) < beta_tol:
if verbosity >= 2:
print(f"k={k}: Beta breadown, returning {Gf}.")
return Gf
# compute an estimate and error using a window of the last few results
win_est, win_err = calc_est_window(estimates, mean_ests, conv_n)
# try and compute an estimate and error using exponential fit
fit_est, fit_err = calc_est_fit(mean_ests, conv_n, tau)
# take whichever has lowest error
est, err = min((win_est, win_err), (fit_est, fit_err),
key=lambda est_err: est_err[1])
converged = err < tau * (abs(win_est) + tol_scale)
if verbosity >= 2:
if verbosity >= 3:
print(f"est_win={win_est}, err_win={win_err}")
print(f"est_fit={fit_est}, err_fit={fit_err}")
print(f"k={k}: Gf={Gf}, Est={est}, Err={err}")
if converged:
print(f"k={k}: Converged to tau {tau}.")
if converged:
break
if verbosity >= 1:
print(f"k={k}: Returning estimate {est}.")
return est
def calc_stats(samples, mean_p, mean_s, tol, tol_scale):
"""Get an estimate from samples.
"""
samples = np.array(samples)
xtrim = ext_per_trim(samples, p=mean_p, s=mean_s)
# sometimes everything is an outlier...
if xtrim.size == 0: # pragma: no cover
estimate, sdev = np.mean(samples), std(samples)
else:
estimate, sdev = np.mean(xtrim), std(xtrim)
err = sdev / len(samples) ** 0.5
converged = err < tol * (abs(estimate) + tol_scale)
return estimate, err, converged
def get_single_precision_dtype(dtype):
if np.issubdtype(dtype, np.complexfloating):
return np.complex64
elif np.issubdtype(dtype, np.floating):
return np.float32
else:
raise ValueError(f"dtype {dtype} not understood.")
def get_equivalent_real_dtype(dtype):
if dtype in ('float64', 'complex128'):
return 'float64'
elif dtype in ('float32', 'complex64'):
return 'float32'
else:
raise ValueError(f"dtype {dtype} not understood.")
def approx_spectral_function(A, f, tol=1e-2, *, bsz=1, R=1024, tol_scale=1,
tau=1e-4, k_min=10, k_max=512, beta_tol=1e-6,
mpi=False, mean_p=0.7, mean_s=1.0, pos=False,
v0=None, verbosity=0, single_precision='AUTO',
**lanczos_opts):
"""Approximate a spectral function, that is, the quantity ``Tr(f(A))``.
Parameters
----------
A : dense array, sparse matrix or LinearOperator
Operator to approximate spectral function for. Should implement
``A.dot(vec)``.
f : callable
Scalar function with which to act on approximate eigenvalues.
tol : float, optional
Relative convergence tolerance threshold for error on mean of repeats.
This can pretty much be relied on as the overall accuracy. See also
``tol_scale`` and ``tau``. Default: 1%.
bsz : int, optional
Number of simultenous vector columns to use at once, 1 equating to the
standard lanczos method. If ``bsz > 1`` then ``A`` must implement
matrix-matrix multiplication. This is a more performant way of
essentially increasing ``R``, at the cost of more memory. Default: 1.
R : int, optional
The number of repeats with different initial random vectors to perform.
Increasing this should increase accuracy as ``sqrt(R)``. Cost of
algorithm thus scales linearly with ``R``. If ``tol`` is non-zero, this
is the maximum number of repeats.
tau : float, optional
The relative tolerance required for a single lanczos run to converge.
This needs to be small enough that each estimate with a single random
vector produces an unbiased sample of the operators spectrum..
k_min : int, optional
The minimum size of the krylov subspace to form for each sample.
k_max : int, optional
The maximum size of the kyrlov space to form. Cost of algorithm scales
linearly with ``K``. If ``tau`` is non-zero, this is the maximum size
matrix to form.
tol_scale : float, optional
This sets the overall expected scale of each estimate, so that an
absolute tolerance can be used for values near zero. Default: 1.
beta_tol : float, optional
The 'breakdown' tolerance. If the next beta ceofficient in the lanczos
matrix is less that this, implying that the full non-null space has
been found, terminate early. Default: 1e-6.
mpi : bool, optional
Whether to parallelize repeat runs over MPI processes.
mean_p : float, optional
Factor for robustly finding mean and err of repeat estimates,
see :func:`ext_per_trim`.
mean_s : float, optional
Factor for robustly finding mean and err of repeat estimates,
see :func:`ext_per_trim`.
v0 : vector, or callable
Initial vector to iterate with, sets ``R=1`` if given. If callable, the
function to produce a random intial vector (sequence).
pos : bool, optional
If True, make sure any approximate eigenvalues are positive by
clipping below 0.
verbosity : {0, 1, 2}, optional
How much information to print while computing.
single_precision : {'AUTO', False, True}, optional
Try and convert the operator to single precision. This can lead to much
faster operation, especially if a GPU is available. Additionally,
double precision is not really needed given the stochastic nature of
the algorithm.
lanczos_opts
Supplied to
:func:`~quimb.linalg.approx_spectral.single_random_estimate` or
:func:`~quimb.linalg.approx_spectral.construct_lanczos_tridiag`.
Returns
-------
scalar
The approximate value ``Tr(f(a))``.
See Also
--------
construct_lanczos_tridiag
"""
if single_precision == 'AUTO':
single_precision = hasattr(A, 'astype')
if single_precision:
A = A.astype(get_single_precision_dtype(A.dtype))
if (v0 is not None) and not callable(v0):
R = 1
else:
R = max(1, int(R / bsz))
# require better precision for the lanczos procedure, otherwise biased
if tau is None:
tau = tol / 1000
if verbosity:
print(f"LANCZOS f(A) CALC: tol={tol}, tau={tau}, R={R}, bsz={bsz}")
# generate repeat estimates
kwargs = {'A': A, 'K': k_max, 'bsz': bsz, 'beta_tol': beta_tol,
'v0': v0, 'f': f, 'pos': pos, 'tau': tau, 'k_min': k_min,
'tol_scale': tol_scale, 'verbosity': verbosity, **lanczos_opts}
if not mpi:
def gen_results():
for _ in range(R):
yield single_random_estimate(**kwargs)
else:
pool = get_mpi_pool()
kwargs['seed'] = True
fs = [pool.submit(single_random_estimate, **kwargs) for _ in range(R)]
def gen_results():
for f in fs:
yield f.result()
# iterate through estimates, waiting for convergence
results = gen_results()
estimate = None
samples = []
for _ in range(R):
samples.append(next(results))
if verbosity >= 1:
print(f"Repeat {len(samples)}: estimate is {samples[-1]}")
# wait a few iterations before checking error on mean breakout
if len(samples) >= 3:
estimate, err, converged = calc_stats(
samples, mean_p, mean_s, tol, tol_scale)
if verbosity >= 1:
print(f"Total estimate = {estimate} ± {err}")
if converged:
if verbosity >= 1:
print(f"Repeat {len(samples)}: converged to tol {tol}")
break
if mpi:
# deal with remaining futures
extra_futures = []
for f in fs:
if f.done() or f.running():
extra_futures.append(f)
else:
f.cancel()
if extra_futures:
samples.extend(f.result() for f in extra_futures)
estimate, err, converged = calc_stats(
samples, mean_p, mean_s, tol, tol_scale)
if estimate is None:
estimate, err, _ = calc_stats(
samples, mean_p, mean_s, tol, tol_scale)
if verbosity >= 1:
print(f"ESTIMATE is {estimate} ± {err}")
return estimate
@functools.wraps(approx_spectral_function)
def tr_abs_approx(*args, **kwargs):
return approx_spectral_function(*args, f=abs, **kwargs)
@functools.wraps(approx_spectral_function)
def tr_exp_approx(*args, **kwargs):
return approx_spectral_function(*args, f=exp, **kwargs)
@functools.wraps(approx_spectral_function)
def tr_sqrt_approx(*args, **kwargs):
return approx_spectral_function(*args, f=sqrt, pos=True, **kwargs)
def xlogx(x):
return x * log2(x) if x > 0 else 0.0
@functools.wraps(approx_spectral_function)
def tr_xlogx_approx(*args, **kwargs):
return approx_spectral_function(*args, f=xlogx, **kwargs)
# --------------------------------------------------------------------------- #
# Specific quantities #
# --------------------------------------------------------------------------- #
def entropy_subsys_approx(psi_ab, dims, sysa, backend=None, **kwargs):
"""Approximate the (Von Neumann) entropy of a pure state's subsystem.
Parameters
----------
psi_ab : ket
Bipartite state to partially trace and find entopy of.
dims : sequence of int, optional
The sub dimensions of ``psi_ab``.
sysa : int or sequence of int, optional
Index(es) of the 'a' subsystem(s) to keep.
kwargs
Supplied to :func:`approx_spectral_function`.
"""
lo = lazy_ptr_linop(psi_ab, dims=dims, sysa=sysa, backend=backend)
return - tr_xlogx_approx(lo, **kwargs)
def tr_sqrt_subsys_approx(psi_ab, dims, sysa, backend=None, **kwargs):
"""Approximate the trace sqrt of a pure state's subsystem.
Parameters
----------
psi_ab : ket
Bipartite state to partially trace and find trace sqrt of.
dims : sequence of int, optional
The sub dimensions of ``psi_ab``.
sysa : int or sequence of int, optional
Index(es) of the 'a' subsystem(s) to keep.
kwargs
Supplied to :func:`approx_spectral_function`.
"""
lo = lazy_ptr_linop(psi_ab, dims=dims, sysa=sysa, backend=backend)
return tr_sqrt_approx(lo, **kwargs)
def norm_ppt_subsys_approx(psi_abc, dims, sysa, sysb, backend=None, **kwargs):
"""Estimate the norm of the partial transpose of a pure state's subsystem.
"""
lo = lazy_ptr_ppt_linop(psi_abc, dims=dims, sysa=sysa,
sysb=sysb, backend=backend)
return tr_abs_approx(lo, **kwargs)
def logneg_subsys_approx(psi_abc, dims, sysa, sysb, **kwargs):
"""Estimate the logarithmic negativity of a pure state's subsystem.
Parameters
----------
psi_abc : ket
Pure tripartite state, for which estimate the entanglement between
'a' and 'b'.
dims : sequence of int
The sub dimensions of ``psi_abc``.
sysa : int or sequence of int, optional
Index(es) of the 'a' subsystem(s) to keep, with respect to all
the dimensions, ``dims``, (i.e. pre-partial trace).
sysa : int or sequence of int, optional
Index(es) of the 'b' subsystem(s) to keep, with respect to all
the dimensions, ``dims``, (i.e. pre-partial trace).
kwargs
Supplied to :func:`approx_spectral_function`.
"""
nrm = norm_ppt_subsys_approx(psi_abc, dims, sysa, sysb, **kwargs)
return max(0.0, log2(nrm))
def negativity_subsys_approx(psi_abc, dims, sysa, sysb, **kwargs):
"""Estimate the negativity of a pure state's subsystem.
Parameters
----------
psi_abc : ket
Pure tripartite state, for which estimate the entanglement between
'a' and 'b'.
dims : sequence of int
The sub dimensions of ``psi_abc``.
sysa : int or sequence of int, optional
Index(es) of the 'a' subsystem(s) to keep, with respect to all
the dimensions, ``dims``, (i.e. pre-partial trace).
sysa : int or sequence of int, optional
Index(es) of the 'b' subsystem(s) to keep, with respect to all
the dimensions, ``dims``, (i.e. pre-partial trace).
kwargs
Supplied to :func:`approx_spectral_function`.
"""
nrm = norm_ppt_subsys_approx(psi_abc, dims, sysa, sysb, **kwargs)
return max(0.0, (nrm - 1) / 2)
def gen_bipartite_spectral_fn(exact_fn, approx_fn, pure_default):
"""Generate a function that computes a spectral quantity of the subsystem
of a pure state. Automatically computes for the smaller subsystem, or
switches to the approximate method for large subsystems.
Parameters
----------
exact_fn : callable
The function that computes the quantity on a density matrix, with
signature: ``exact_fn(rho_a, rank=...)``.
approx_fn : callable
The function that approximately computes the quantity using a lazy
representation of the whole system. With signature
``approx_fn(psi_ab, dims, sysa, **approx_opts)``.
pure_default : float
The default value when the whole state is the subsystem.
Returns
-------
bipartite_spectral_fn : callable
The function, with signature:
``(psi_ab, dims, sysa, approx_thresh=2**13, **approx_opts)``
"""
def bipartite_spectral_fn(psi_ab, dims, sysa, approx_thresh=2**13,
**approx_opts):
sysa = int2tup(sysa)
sz_a = prod(d for i, d in enumerate(dims) if i in sysa)
sz_b = prod(dims) // sz_a
# pure state
if sz_b == 1:
return pure_default
# also check if system b is smaller, since spectrum is same for both
if sz_b < sz_a:
# if so swap things around
sz_a = sz_b
sysb = [i for i in range(len(dims)) if i not in sysa]
sysa = sysb
# check whether to use approx lanczos method
if (approx_thresh is not None) and (sz_a >= approx_thresh):
return approx_fn(psi_ab, dims, sysa, **approx_opts)
rho_a = ptr(psi_ab, dims, sysa)
return exact_fn(rho_a)
return bipartite_spectral_fn
| jcmgray/quijy | quimb/linalg/approx_spectral.py | Python | mit | 30,236 |
# Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACCOUNT_SID"
auth_token = "your_auth_token"
client = Client(account_sid, auth_token)
number = client.lookups.phone_numbers("+15108675309").fetch(type="carrier")
print(number.carrier['type'])
print(number.carrier['name'])
| teoreteetik/api-snippets | lookups/lookup-get-basic-example-1/lookup-get-basic-example-1.6.x.py | Python | mit | 406 |
# -*- coding: mbcs -*-
from part import *
from material import *
from section import *
from assembly import *
from step import *
from interaction import *
from load import *
from mesh import *
from optimization import *
from job import *
from sketch import *
from visualization import *
from connectorBehavior import *
import time
# Define constants
MODEL_NAME = "2D-MODEL"
STEP_PATH = "C:/Users/User/Desktop/LABPro/PI1501 - Rassini-Bypasa/geom/stp/"
STEP_FILES = ["sketch_lower_02","sketch_lower_03","sketch_lower_left_01","sketch_lower_right_01",
"sketch_pisador","sketch_upper_03","sketch_upper_left_01","sketch_upper_right_01",
"sketch_upper_left_02","sketch_upper_right_02"]
DYNEXP_STEPS = ["Initial","Step-01-Down","Step-01-Up","Step-02-Down","Step-02-Up","Step-03-Down","Step-03-Up"]
NFRAMES = 50.0
TIME_PERIOD = 0.86
YDISP = 1.428
MESH_SIZE_QUAD = 0.02
MESH_SIZE_TRI = 0.025
JOB_NAME = "MSZ-"+str(MESH_SIZE_QUAD).replace(".","")+time.strftime("_%d-%m-%Y-%H%M",time.localtime())
mdb.models.changeKey(fromName='Model-1', toName=MODEL_NAME)
# Define parts
# Blank
mdb.openStep(STEP_PATH + 'sketch_mp.STEP', scaleFromFile=OFF)
mdb.models[MODEL_NAME].ConstrainedSketchFromGeometryFile(geometryFile=mdb.acis, name='plate')
mdb.models[MODEL_NAME].ConstrainedSketch(name='__profile__', sheetSize=10.0)
mdb.models[MODEL_NAME].sketches['__profile__'].sketchOptions.setValues(gridOrigin=(0.0, 0.0))
mdb.models[MODEL_NAME].sketches['__profile__'].retrieveSketch(sketch=mdb.models[MODEL_NAME].sketches['plate'])
mdb.models[MODEL_NAME].Part(dimensionality=TWO_D_PLANAR, name='plate', type=DEFORMABLE_BODY)
mdb.models[MODEL_NAME].parts['plate'].BaseShell(sketch=mdb.models[MODEL_NAME].sketches['__profile__'])
del mdb.models[MODEL_NAME].sketches['__profile__']
# Analytic surfaces
for _stp in STEP_FILES:
mdb.openStep(STEP_PATH + _stp + ".STEP", scaleFromFile=OFF)
mdb.models[MODEL_NAME].ConstrainedSketchFromGeometryFile(geometryFile=mdb.acis, name=_stp)
mdb.models[MODEL_NAME].ConstrainedSketch(name='__profile__', sheetSize=10.0)
mdb.models[MODEL_NAME].sketches['__profile__'].sketchOptions.setValues(gridOrigin=(0.0, 0.0))
mdb.models[MODEL_NAME].sketches['__profile__'].retrieveSketch(sketch=mdb.models[MODEL_NAME].sketches[_stp])
mdb.models[MODEL_NAME].Part(dimensionality=TWO_D_PLANAR, name=_stp[7::], type=ANALYTIC_RIGID_SURFACE)
mdb.models[MODEL_NAME].parts[_stp[7::]].AnalyticRigidSurf2DPlanar(sketch=mdb.models[MODEL_NAME].sketches['__profile__'])
del mdb.models[MODEL_NAME].sketches['__profile__']
# Material
mdb.models[MODEL_NAME].Material(name='Acero 1018 US')
mdb.models[MODEL_NAME].materials['Acero 1018 US'].Density(table=((0.10555, ),
))
mdb.models[MODEL_NAME].materials['Acero 1018 US'].Elastic(table=((29700000.0,
0.33), ))
mdb.models[MODEL_NAME].materials['Acero 1018 US'].Plastic(table=((50800.03458,
0.0), (51320.13977, 0.82), (51376.4144, 0.841), (51781.35965, 0.898), (
51784.84056, 0.92), (52105.22884, 0.977), (52140.03789, 0.999), (
52442.8766, 1.056), (52529.17404, 1.078), (52876.8294, 1.135), (
52988.79851, 1.157), (53391.85827, 1.213), (53507.88843, 1.236), (
53929.8031, 1.292), (54093.40563, 1.315), (54475.57997, 1.371), (
54659.92289, 1.394), (55019.18127, 1.45), (55191.92117, 1.473), (
55558.2864, 1.528), (55721.59885, 1.551), (56028.93374, 1.607), (
56243.58953, 1.63), (56553.82517, 1.686), (56798.35873, 1.709), (
57048.25869, 1.764), (57256.53283, 1.788), (57509.91369, 1.843), (
57744.43965, 1.867), (57970.26335, 1.922), (58210.01067, 1.946), (
58405.23141, 2.0), (58661.22295, 2.025), (58860.35972, 2.079), (
59113.88562, 2.104), (59211.64103, 2.158), (59521.15148, 2.183), (
59624.27328, 2.236), (59925.51658, 2.262), (59986.72249, 2.315), (
60343.08012, 2.341), (60397.46926, 2.394), (60699.58279, 2.42), (
60755.27727, 2.472), (61121.35242, 2.498), (61126.2837, 2.551), (
61471.90854, 2.577), (61499.75578, 2.63), (61825.65549, 2.656), (
62178.96733, 2.735), (62518.50059, 2.814), (62831.05683, 2.893), (
63136.65126, 2.971), (63479.37535, 3.05), (63745.51953, 3.129), (
64049.08343, 3.208), (64353.37253, 3.287), (64638.22657, 3.365), (
64915.68369, 3.444), (65132.07994, 3.523), (65419.25459, 3.601), (
65630.42948, 3.68), (65650.44468, 3.702), (65905.85607, 3.759), (
66157.78655, 3.838), (66380.12935, 3.917), (66405.22087, 3.939), (
66614.07516, 3.995), (66626.69344, 4.018), (66811.47147, 4.074), (
66847.73089, 4.096), (67017.27996, 4.153), (67059.05082, 4.175), (
67223.81365, 4.232), (67244.98915, 4.254), (67412.21762, 4.31), (
67430.63741, 4.333), (67593.65978, 4.389), (67648.48403, 4.412), (
67801.64385, 4.468), (67842.39944, 4.491), (67989.75774, 4.547), (
68045.45222, 4.57), (68147.55876, 4.625), (68235.74168, 4.649), (
68309.85595, 4.704), (68399.77932, 4.727), (68457.64936, 4.782), (
68573.67952, 4.807), (68624.44272, 4.861), (68734.96144, 4.885), (
68763.67891, 4.94), (68878.40373, 4.964), (68931.05242, 5.019), (
69078.26568, 5.043), (69230.7003, 5.122), (69405.18066, 5.201), (69521.936,
5.28), (69665.95844, 5.359), (69781.40845, 5.438), (69915.13321, 5.517), (
70051.75872, 5.595), (70168.51407, 5.674), (70323.41434, 5.753), (
70415.65831, 5.832), (70540.39073, 5.911), (70648.00871, 5.989), (
70769.5503, 6.068), (70848.0157, 6.147), (70950.55735, 6.226), (
71035.11433, 6.305), (71122.42702, 6.384), (71228.44958, 6.462), (
71316.92258, 6.541), (71433.82297, 6.62), (71501.12046, 6.699), (
71590.31864, 6.777), (71635.57041, 6.856), (71722.01288, 6.935), (
71798.30271, 7.014), (71877.9284, 7.092), (71923.47024, 7.171), (
71956.24876, 7.194), (72010.05775, 7.25), (72020.06535, 7.273), (
72045.44695, 7.329), (72086.20254, 7.352), (72130.00393, 7.407), (
72159.01147, 7.43), (72224.27843, 7.51), (72256.76688, 7.565), (
72294.91179, 7.588), (72343.7895, 7.667), (72402.52976, 7.746), (
72468.23184, 7.825), (72507.53706, 7.904), (72582.08644, 7.983), (
72584.40704, 8.062), (72681.43726, 8.141), (72731.62031, 8.22), (
72770.78048, 8.298), (72782.23846, 8.377), (72814.72691, 8.456), (
72891.01674, 8.535), (72944.82572, 8.614), (72957.87912, 8.693), (
72995.00877, 8.771), (72997.90952, 8.85), (73058.82536, 8.929), (
73082.61154, 9.008), (73141.93196, 9.087), (73161.22197, 9.244), (
73232.14541, 9.402), (73240.70263, 9.56), (73281.7483, 9.717), (
73347.01527, 9.796), (73350.06106, 10.347), (73396.03801, 10.425), (
73399.0838, 10.922), (73400.82425, 11.001), (73403.87005, 11.08), (
73414.89291, 11.159), (73416.7784, 11.237), (73221.84773, 11.261), (
73215.46607, 11.419), (73212.27524, 11.498), (73195.45087, 11.655), (
73166.29829, 12.0), (73143.09226, 12.078), (73120.61142, 12.157), (
73089.13824, 12.236), (73066.07724, 12.314), (73033.87887, 12.472), (
73014.87893, 12.55), (72980.505, 12.629), (72971.80274, 12.707), (
72927.71128, 12.786), (72924.08533, 12.865), (72857.07792, 12.943), (
72843.58941, 13.022), (72820.0933, 13.101), (72793.11629, 13.179), (
72746.41415, 13.258), (72680.27696, 13.415), (72649.23889, 13.494), (
72601.23141, 13.572), (72574.97959, 13.651), (72519.57519, 13.729), (
72507.82713, 13.808), (72442.99528, 13.887), (72421.09459, 13.965), (
72361.33906, 14.044), (72300.7133, 14.123), (72243.56845, 14.201), (
72163.07252, 14.28), (72124.49249, 14.359), (72032.82867, 14.437), (
71990.04255, 14.516), (71925.06566, 14.595), (71852.98192, 14.673), (
71748.98989, 14.752), (71652.1047, 14.831), (71559.86073, 14.91), (
71452.53283, 14.988), (71351.15148, 15.067), (71227.86943, 15.146), (
71078.4806, 15.225), (70955.92374, 15.303), (70791.74107, 15.382), (
70787.09986, 15.412), (70650.76442, 15.461), (70596.66536, 15.491), (
70462.9406, 15.539), (70416.81861, 15.569), (70313.55177, 15.618), (
70218.69712, 15.648), (70150.38436, 15.697), (70014.77411, 15.726), (
69924.99577, 15.775), (69814.47705, 15.805), (69767.33979, 15.854), (
69637.09594, 15.884), (69515.98946, 15.933), (69392.56238, 15.962), (
69319.31834, 16.012), (69142.51738, 16.041), (69060.71612, 16.09), (
68928.44174, 16.12), (68816.76271, 16.169), (68637.35107, 16.198), (
68582.09171, 16.248), (68373.23742, 16.277), (68544.38191, 16.305), (
68325.81009, 16.327), (68118.98633, 16.355), (68253.29124, 16.384), (
68029.208, 16.405), (67845.73531, 16.434), (67980.62037, 16.462), (
67759.87299, 16.484), (67622.9574, 16.512), (67687.78925, 16.541), (
67462.83578, 16.563), (67273.41654, 16.591), (67412.94281, 16.62), (
67190.74505, 16.642), (66966.66181, 16.67), (67085.73776, 16.699), (
66852.08202, 16.721), (66597.83094, 16.748), (66758.67774, 16.778), (
66521.68614, 16.8), (66258.29768, 16.827), (66394.77816, 16.856), (
66174.75596, 16.878), (65891.06222, 16.906), (66038.85564, 16.935), (
65830.00135, 16.957), (65559.07093, 16.984), (65665.09349, 17.014), (
65456.67431, 17.036), (65210.98045, 17.063), (65342.09453, 17.092), (
65132.37001, 17.115), (64827.50077, 17.142), (64947.73702, 17.171), (
64732.06596, 17.194), (64440.83026, 17.22), (64575.13517, 17.25), (
64348.15117, 17.273), (64072.86962, 17.299), (64155.39607, 17.329), (
63938.99982, 17.351), (63648.92442, 17.377), (63737.68749, 17.407), (
63509.9783, 17.43), (63205.39913, 17.456), (63274.29204, 17.486), (
63073.84994, 17.509), (62738.08766, 17.535), (62830.62172, 17.565), (
62611.46975, 17.588), (62280.05861, 17.614), (62352.86753, 17.644), (
62131.39496, 17.667), (61823.04481, 17.692), (61841.02949, 17.722), (
61656.68657, 17.746), (61339.92423, 17.771), (61393.44315, 17.801), (
61191.98578, 17.825), (60844.62049, 17.85), (60879.2845, 17.88), (
60643.30816, 17.904), (60336.55343, 17.928), (60316.68326, 17.959), (
60104.34807, 17.983), (59781.49415, 18.007), (59765.68504, 18.037), (
59561.32692, 18.062), (59187.12965, 18.086), (59195.68688, 18.116), (
58989.00816, 18.14), (58618.58187, 18.164), (58581.59726, 18.194), (
58414.36879, 18.219), (58036.2555, 18.243), (57970.40839, 18.273), (
57758.50831, 18.298), (57411.72317, 18.322), (57320.92957, 18.352), (
57138.3271, 18.377), (56738.89328, 18.4), (56703.35904, 18.43), (
56482.46662, 18.456), (56095.361, 18.479), (56006.45289, 18.509), (
55773.66738, 18.535), (55431.37841, 18.558), (55320.42457, 18.588), (
55075.60093, 18.614), (54688.49531, 18.637), (54564.198, 18.667), (
54344.61093, 18.692), (53943.14657, 18.715), (53824.50573, 18.745), (
53540.0868, 18.771), (53109.1798, 18.794), (52984.01226, 18.824), (
52730.34133, 18.85), (52306.97628, 18.873), (52121.76314, 18.903), (
51858.66475, 18.929), (51442.26151, 18.952), (51204.39968, 18.982), (
50952.03409, 19.008), (50487.47833, 19.03), (50252.08215, 19.06), (
49540.52719, 19.088)))
# Steps
for jj in range(1,len(DYNEXP_STEPS)):
mdb.models[MODEL_NAME].ExplicitDynamicsStep(name=DYNEXP_STEPS[jj], previous=DYNEXP_STEPS[jj-1], timePeriod=0.86)
# Reference points
mdb.models[MODEL_NAME].parts['lower_02'].ReferencePoint(point=
mdb.models[MODEL_NAME].parts['lower_02'].vertices[0])
mdb.models[MODEL_NAME].parts['lower_03'].ReferencePoint(point=
mdb.models[MODEL_NAME].parts['lower_03'].vertices[0])
mdb.models[MODEL_NAME].parts['lower_left_01'].ReferencePoint(point=
mdb.models[MODEL_NAME].parts['lower_left_01'].vertices[5])
mdb.models[MODEL_NAME].parts['lower_right_01'].ReferencePoint(point=
mdb.models[MODEL_NAME].parts['lower_right_01'].vertices[0])
mdb.models[MODEL_NAME].parts['pisador'].ReferencePoint(point=
mdb.models[MODEL_NAME].parts['pisador'].InterestingPoint(
mdb.models[MODEL_NAME].parts['pisador'].edges[0], MIDDLE))
mdb.models[MODEL_NAME].parts['upper_03'].ReferencePoint(point=
mdb.models[MODEL_NAME].parts['upper_03'].vertices[5])
mdb.models[MODEL_NAME].parts['upper_left_01'].ReferencePoint(point=
mdb.models[MODEL_NAME].parts['upper_left_01'].vertices[0])
mdb.models[MODEL_NAME].parts['upper_left_02'].ReferencePoint(point=
mdb.models[MODEL_NAME].parts['upper_left_02'].vertices[0])
mdb.models[MODEL_NAME].parts['upper_right_01'].ReferencePoint(point=
mdb.models[MODEL_NAME].parts['upper_right_01'].vertices[3])
mdb.models[MODEL_NAME].parts['upper_right_02'].ReferencePoint(point=
mdb.models[MODEL_NAME].parts['upper_right_02'].vertices[6])
# Partition of plate ============================================================
# Datum points
mdb.models[MODEL_NAME].parts['plate'].DatumPointByOffset(point=
mdb.models[MODEL_NAME].parts['plate'].vertices[0], vector=(0.0, 0.06, 0.0))
mdb.models[MODEL_NAME].parts['plate'].DatumPointByOffset(point=
mdb.models[MODEL_NAME].parts['plate'].vertices[5], vector=(0.0, 0.06, 0.0))
mdb.models[MODEL_NAME].parts['plate'].PartitionFaceByShortestPath(faces=
mdb.models[MODEL_NAME].parts['plate'].faces.getSequenceFromMask(('[#1 ]',
), ), point1=mdb.models[MODEL_NAME].parts['plate'].vertices[4], point2=
mdb.models[MODEL_NAME].parts['plate'].vertices[1])
mdb.models[MODEL_NAME].parts['plate'].PartitionFaceByShortestPath(faces=
mdb.models[MODEL_NAME].parts['plate'].faces.getSequenceFromMask(('[#2 ]',
), ), point1=mdb.models[MODEL_NAME].parts['plate'].vertices[4], point2=
mdb.models[MODEL_NAME].parts['plate'].datums[3])
mdb.models[MODEL_NAME].parts['plate'].PartitionFaceByShortestPath(faces=
mdb.models[MODEL_NAME].parts['plate'].faces.getSequenceFromMask(('[#4 ]',
), ), point1=mdb.models[MODEL_NAME].parts['plate'].vertices[6], point2=
mdb.models[MODEL_NAME].parts['plate'].datums[2])
mdb.models[MODEL_NAME].parts['plate'].PartitionFaceByCurvedPathEdgePoints(
edge1=mdb.models[MODEL_NAME].parts['plate'].edges[3], edge2=
mdb.models[MODEL_NAME].parts['plate'].edges[1], face=
mdb.models[MODEL_NAME].parts['plate'].faces[0], point1=
mdb.models[MODEL_NAME].parts['plate'].InterestingPoint(
mdb.models[MODEL_NAME].parts['plate'].edges[3], MIDDLE), point2=
mdb.models[MODEL_NAME].parts['plate'].InterestingPoint(
mdb.models[MODEL_NAME].parts['plate'].edges[1], MIDDLE))
# Assembly =========================================================================
mdb.models[MODEL_NAME].rootAssembly.Instance(dependent=ON, name='lower_02-1',
part=mdb.models[MODEL_NAME].parts['lower_02'])
mdb.models[MODEL_NAME].rootAssembly.Instance(dependent=ON, name='lower_03-1',
part=mdb.models[MODEL_NAME].parts['lower_03'])
mdb.models[MODEL_NAME].rootAssembly.Instance(dependent=ON, name=
'lower_left_01-1', part=mdb.models[MODEL_NAME].parts['lower_left_01'])
mdb.models[MODEL_NAME].rootAssembly.Instance(dependent=ON, name=
'lower_right_01-1', part=mdb.models[MODEL_NAME].parts['lower_right_01'])
mdb.models[MODEL_NAME].rootAssembly.Instance(dependent=ON, name='pisador-1',
part=mdb.models[MODEL_NAME].parts['pisador'])
mdb.models[MODEL_NAME].rootAssembly.Instance(dependent=ON, name='plate-1',
part=mdb.models[MODEL_NAME].parts['plate'])
mdb.models[MODEL_NAME].rootAssembly.Instance(dependent=ON, name='upper_03-1',
part=mdb.models[MODEL_NAME].parts['upper_03'])
mdb.models[MODEL_NAME].rootAssembly.Instance(dependent=ON, name=
'upper_left_01-1', part=mdb.models[MODEL_NAME].parts['upper_left_01'])
mdb.models[MODEL_NAME].rootAssembly.Instance(dependent=ON, name=
'upper_left_02-1', part=mdb.models[MODEL_NAME].parts['upper_left_02'])
mdb.models[MODEL_NAME].rootAssembly.Instance(dependent=ON, name=
'upper_right_01-1', part=mdb.models[MODEL_NAME].parts['upper_right_01'])
mdb.models[MODEL_NAME].rootAssembly.Instance(dependent=ON, name=
'upper_right_02-1', part=mdb.models[MODEL_NAME].parts['upper_right_02'])
mdb.models[MODEL_NAME].rootAssembly.Instance(dependent=ON, name=
'lower_left_01-2', part=mdb.models[MODEL_NAME].parts['lower_left_01'])
mdb.models[MODEL_NAME].rootAssembly.Instance(dependent=ON, name=
'lower_right_01-2', part=mdb.models[MODEL_NAME].parts['lower_right_01'])
mdb.models[MODEL_NAME].rootAssembly.Instance(dependent=ON, name='lower_02-2',
part=mdb.models[MODEL_NAME].parts['lower_02'])
# Translate parts
mdb.models[MODEL_NAME].rootAssembly.translate(instanceList=('pisador-1', ),
vector=(-0.373, 0.13, 0.0))
mdb.models[MODEL_NAME].rootAssembly.translate(instanceList=('plate-1', ),
vector=(0.0, 0.2845, 0.0))
mdb.models[MODEL_NAME].rootAssembly.translate(instanceList=('lower_left_01-1',
), vector=(-2.5275, 2.032495, 0.0))
mdb.models[MODEL_NAME].rootAssembly.translate(instanceList=('lower_right_01-1',
), vector=(-2.0315, 2.032495, 0.0))
mdb.models[MODEL_NAME].rootAssembly.translate(instanceList=('upper_left_01-1',
), vector=(-8.148372, 2.25428, 0.0))
mdb.models[MODEL_NAME].rootAssembly.translate(instanceList=('upper_right_01-1',
), vector=(-7.402372, 2.25428, 0.0))
mdb.models[MODEL_NAME].rootAssembly.translate(instanceList=('upper_left_02-1',
'upper_right_02-1'), vector=(2.0, 1.421506, 0.0))
mdb.models[MODEL_NAME].rootAssembly.translate(instanceList=('upper_03-1', ),
vector=(-10.7795, 0.637, 0.0))
mdb.models[MODEL_NAME].rootAssembly.translate(instanceList=('lower_02-1', ),
vector=(-7.735, -2.5, 0.0))
mdb.models[MODEL_NAME].rootAssembly.translate(instanceList=('lower_03-1', ),
vector=(-14.3255, -0.784, 0.0))
mdb.models[MODEL_NAME].rootAssembly.translate(instanceList=('lower_left_01-2',
), vector=(-2.5275, 1.532495, 0.0))
mdb.models[MODEL_NAME].rootAssembly.translate(instanceList=('lower_right_01-2',
), vector=(-2.0315, 1.532495, 0.0))
mdb.models[MODEL_NAME].rootAssembly.translate(instanceList=('lower_02-2', ),
vector=(-7.735, -3.0, 0.0))
# Surfaces ========================================================================
mdb.models[MODEL_NAME].parts['lower_02'].Surface(name='Surf-1', side2Edges=
mdb.models[MODEL_NAME].parts['lower_02'].edges.getSequenceFromMask((
'[#fffff ]', ), ))
mdb.models[MODEL_NAME].parts['lower_03'].Surface(name='Surf-1', side2Edges=
mdb.models[MODEL_NAME].parts['lower_03'].edges.getSequenceFromMask((
'[#1f ]', ), ))
mdb.models[MODEL_NAME].parts['lower_left_01'].Surface(name='Surf-1',
side2Edges=
mdb.models[MODEL_NAME].parts['lower_left_01'].edges.getSequenceFromMask((
'[#1f ]', ), ))
mdb.models[MODEL_NAME].parts['lower_right_01'].Surface(name='Surf-1',
side2Edges=
mdb.models[MODEL_NAME].parts['lower_right_01'].edges.getSequenceFromMask((
'[#1f ]', ), ))
mdb.models[MODEL_NAME].parts['pisador'].Surface(name='Surf-1', side2Edges=
mdb.models[MODEL_NAME].parts['pisador'].edges.getSequenceFromMask(('[#1 ]',
), ))
mdb.models[MODEL_NAME].parts['plate'].Surface(name='Surf-1', side1Edges=
mdb.models[MODEL_NAME].parts['plate'].edges.getSequenceFromMask(('[#3d18 ]',
), ))
mdb.models[MODEL_NAME].parts['upper_03'].Surface(name='Surf-1', side2Edges=
mdb.models[MODEL_NAME].parts['upper_03'].edges.getSequenceFromMask((
'[#1f ]', ), ))
mdb.models[MODEL_NAME].parts['upper_left_01'].Surface(name='Surf-1',
side2Edges=
mdb.models[MODEL_NAME].parts['upper_left_01'].edges.getSequenceFromMask((
'[#7 ]', ), ))
mdb.models[MODEL_NAME].parts['upper_left_02'].Surface(name='Surf-1',
side2Edges=
mdb.models[MODEL_NAME].parts['upper_left_02'].edges.getSequenceFromMask((
'[#3f ]', ), ))
mdb.models[MODEL_NAME].parts['upper_right_01'].Surface(name='Surf-1',
side2Edges=
mdb.models[MODEL_NAME].parts['upper_right_01'].edges.getSequenceFromMask((
'[#7 ]', ), ))
mdb.models[MODEL_NAME].parts['upper_right_02'].Surface(name='Surf-1',
side2Edges=
mdb.models[MODEL_NAME].parts['upper_right_02'].edges.getSequenceFromMask((
'[#3f ]', ), ))
# Create section ===================================================================
mdb.models[MODEL_NAME].HomogeneousSolidSection(material='Acero 1018 US', name=
'mp-section', thickness=2.99)
mdb.models[MODEL_NAME].parts['plate'].SectionAssignment(offset=0.0,
offsetField='', offsetType=MIDDLE_SURFACE, region=Region(
faces=mdb.models[MODEL_NAME].parts['plate'].faces.getSequenceFromMask(
mask=('[#1f ]', ), )), sectionName='mp-section', thicknessAssignment=
FROM_SECTION)
# Inertia & Mass assignment ========================================================
mdb.models[MODEL_NAME].parts['lower_02'].engineeringFeatures.PointMassInertia(
alpha=0.0, composite=0.0, i11=0.001, i22=0.001, i33=0.001, mass=0.01, name=
'Inertia-1', region=Region(referencePoints=(
mdb.models[MODEL_NAME].parts['lower_02'].referencePoints[2], )))
mdb.models[MODEL_NAME].parts['lower_left_01'].engineeringFeatures.PointMassInertia(
alpha=0.0, composite=0.0, i11=0.001, i22=0.001, i33=0.001, mass=0.01, name=
'Inertia-1', region=Region(referencePoints=(
mdb.models[MODEL_NAME].parts['lower_left_01'].referencePoints[2], )))
mdb.models[MODEL_NAME].parts['lower_right_01'].engineeringFeatures.PointMassInertia(
alpha=0.0, composite=0.0, i11=0.001, i22=0.001, i33=0.001, mass=0.01, name=
'Inertia-1', region=Region(referencePoints=(
mdb.models[MODEL_NAME].parts['lower_right_01'].referencePoints[2], )))
mdb.models[MODEL_NAME].parts['pisador'].engineeringFeatures.PointMassInertia(
alpha=0.0, composite=0.0, i11=0.001, i22=0.001, i33=0.001, mass=0.01, name=
'Inertia-1', region=Region(referencePoints=(
mdb.models[MODEL_NAME].parts['pisador'].referencePoints[2], )))
# Regenerate assembly
mdb.models[MODEL_NAME].rootAssembly.regenerate()
# Constraints ========================================================================
mdb.models[MODEL_NAME].RigidBody(name='Constraint-1', refPointRegion=Region(
referencePoints=(
mdb.models[MODEL_NAME].rootAssembly.instances['lower_02-1'].referencePoints[2],
)), surfaceRegion=
mdb.models[MODEL_NAME].rootAssembly.instances['lower_02-1'].surfaces['Surf-1'])
mdb.models[MODEL_NAME].RigidBody(name='Constraint-2', refPointRegion=Region(
referencePoints=(
mdb.models[MODEL_NAME].rootAssembly.instances['lower_02-2'].referencePoints[2],
)), surfaceRegion=
mdb.models[MODEL_NAME].rootAssembly.instances['lower_02-2'].surfaces['Surf-1'])
mdb.models[MODEL_NAME].RigidBody(name='Constraint-3', refPointRegion=Region(
referencePoints=(
mdb.models[MODEL_NAME].rootAssembly.instances['lower_03-1'].referencePoints[2],
)), surfaceRegion=
mdb.models[MODEL_NAME].rootAssembly.instances['lower_03-1'].surfaces['Surf-1'])
mdb.models[MODEL_NAME].RigidBody(name='Constraint-4', refPointRegion=Region(
referencePoints=(
mdb.models[MODEL_NAME].rootAssembly.instances['lower_left_01-1'].referencePoints[2],
)), surfaceRegion=
mdb.models[MODEL_NAME].rootAssembly.instances['lower_left_01-1'].surfaces['Surf-1'])
mdb.models[MODEL_NAME].RigidBody(name='Constraint-5', refPointRegion=Region(
referencePoints=(
mdb.models[MODEL_NAME].rootAssembly.instances['lower_left_01-2'].referencePoints[2],
)), surfaceRegion=
mdb.models[MODEL_NAME].rootAssembly.instances['lower_left_01-2'].surfaces['Surf-1'])
mdb.models[MODEL_NAME].RigidBody(name='Constraint-6', refPointRegion=Region(
referencePoints=(
mdb.models[MODEL_NAME].rootAssembly.instances['lower_right_01-1'].referencePoints[2],
)), surfaceRegion=
mdb.models[MODEL_NAME].rootAssembly.instances['lower_right_01-1'].surfaces['Surf-1'])
mdb.models[MODEL_NAME].RigidBody(name='Constraint-7', refPointRegion=Region(
referencePoints=(
mdb.models[MODEL_NAME].rootAssembly.instances['lower_right_01-2'].referencePoints[2],
)), surfaceRegion=
mdb.models[MODEL_NAME].rootAssembly.instances['lower_right_01-2'].surfaces['Surf-1'])
mdb.models[MODEL_NAME].RigidBody(name='Constraint-8', refPointRegion=Region(
referencePoints=(
mdb.models[MODEL_NAME].rootAssembly.instances['pisador-1'].referencePoints[2],
)), surfaceRegion=
mdb.models[MODEL_NAME].rootAssembly.instances['pisador-1'].surfaces['Surf-1'])
mdb.models[MODEL_NAME].RigidBody(name='Constraint-9', refPointRegion=Region(
referencePoints=(
mdb.models[MODEL_NAME].rootAssembly.instances['upper_03-1'].referencePoints[2],
)), surfaceRegion=
mdb.models[MODEL_NAME].rootAssembly.instances['upper_03-1'].surfaces['Surf-1'])
mdb.models[MODEL_NAME].RigidBody(name='Constraint-10', refPointRegion=Region(
referencePoints=(
mdb.models[MODEL_NAME].rootAssembly.instances['upper_left_01-1'].referencePoints[2],
)), surfaceRegion=
mdb.models[MODEL_NAME].rootAssembly.instances['upper_left_01-1'].surfaces['Surf-1'])
mdb.models[MODEL_NAME].RigidBody(name='Constraint-11', refPointRegion=Region(
referencePoints=(
mdb.models[MODEL_NAME].rootAssembly.instances['upper_left_02-1'].referencePoints[2],
)), surfaceRegion=
mdb.models[MODEL_NAME].rootAssembly.instances['upper_left_02-1'].surfaces['Surf-1'])
mdb.models[MODEL_NAME].RigidBody(name='Constraint-12', refPointRegion=Region(
referencePoints=(
mdb.models[MODEL_NAME].rootAssembly.instances['upper_right_01-1'].referencePoints[2],
)), surfaceRegion=
mdb.models[MODEL_NAME].rootAssembly.instances['upper_right_01-1'].surfaces['Surf-1'])
mdb.models[MODEL_NAME].RigidBody(name='Constraint-13', refPointRegion=Region(
referencePoints=(
mdb.models[MODEL_NAME].rootAssembly.instances['upper_right_02-1'].referencePoints[2],
)), surfaceRegion=
mdb.models[MODEL_NAME].rootAssembly.instances['upper_right_02-1'].surfaces['Surf-1'])
# Contact properties
mdb.models[MODEL_NAME].ContactProperty('Friction')
mdb.models[MODEL_NAME].interactionProperties['Friction'].TangentialBehavior(
dependencies=0, directionality=ISOTROPIC, elasticSlipStiffness=None,
formulation=PENALTY, fraction=0.005, maximumElasticSlip=FRACTION,
pressureDependency=OFF, shearStressLimit=None, slipRateDependency=OFF,
table=((0.1, ), ), temperatureDependency=OFF)
# Contacts =========================================================================
for _instance in mdb.models[MODEL_NAME].rootAssembly.instances.keys():
if not(_instance=="plate-1"):
mdb.models[MODEL_NAME].SurfaceToSurfaceContactExp(clearanceRegion=None,
createStepName=DYNEXP_STEPS[1], datumAxis=None, initialClearance=OMIT,
interactionProperty='Friction', master=
mdb.models[MODEL_NAME].rootAssembly.instances[_instance].surfaces['Surf-1']
, mechanicalConstraint=KINEMATIC, name="INT-"+_instance, slave=
mdb.models[MODEL_NAME].rootAssembly.instances['plate-1'].surfaces['Surf-1']
, sliding=FINITE)
mdb.models[MODEL_NAME].SelfContactExp(createStepName=DYNEXP_STEPS[1],
interactionProperty='Friction', mechanicalConstraint=KINEMATIC, name=
'INT-SELF', surface=
mdb.models[MODEL_NAME].rootAssembly.instances['plate-1'].surfaces['Surf-1'])
# mdb.models[MODEL_NAME].rootAssembly.regenerate()
mdb.models[MODEL_NAME].interactions['INT-lower_02-1'].move('Step-01-Down',
'Step-01-Up')
mdb.models[MODEL_NAME].interactions['INT-lower_02-1'].move('Step-01-Up',
'Step-02-Down')
mdb.models[MODEL_NAME].interactions['INT-lower_02-2'].deactivate('Step-02-Up')
mdb.models[MODEL_NAME].interactions['INT-lower_03-1'].move('Step-01-Down',
'Step-01-Up')
mdb.models[MODEL_NAME].interactions['INT-lower_03-1'].move('Step-01-Up',
'Step-02-Down')
mdb.models[MODEL_NAME].interactions['INT-lower_03-1'].move('Step-02-Down',
'Step-02-Up')
mdb.models[MODEL_NAME].interactions['INT-lower_03-1'].move('Step-02-Up',
'Step-03-Down')
mdb.models[MODEL_NAME].interactions['INT-lower_left_01-1'].deactivate(
'Step-02-Down')
mdb.models[MODEL_NAME].interactions['INT-lower_left_01-2'].deactivate(
'Step-02-Down')
mdb.models[MODEL_NAME].interactions['INT-lower_right_01-1'].deactivate(
'Step-02-Down')
mdb.models[MODEL_NAME].interactions['INT-lower_right_01-2'].deactivate(
'Step-02-Down')
mdb.models[MODEL_NAME].interactions['INT-pisador-1'].deactivate('Step-03-Down')
mdb.models[MODEL_NAME].interactions['INT-lower_02-1'].deactivate('Step-03-Down')
# Amplitude
mdb.models[MODEL_NAME].SmoothStepAmplitude(data=((0.0, 0.0), (0.86, 1.0)),
name='Amp-1', timeSpan=STEP)
# Field outputs
mdb.models['2D-MODEL'].fieldOutputRequests['F-Output-1'].setValues(
exteriorOnly=OFF, rebar=EXCLUDE, region=MODEL, sectionPoints=DEFAULT,
timeInterval=TIME_PERIOD/NFRAMES, variables=PRESELECT)
# Boundary conditions ====================================================================
# Fixed Points
mdb.models[MODEL_NAME].DisplacementBC(amplitude='Amp-1', createStepName=
DYNEXP_STEPS[1], distributionType=UNIFORM, fieldName='', fixed=OFF, localCsys=
None, name='BC-Fixed-Points', region=Region(referencePoints=(
mdb.models[MODEL_NAME].rootAssembly.instances['lower_02-2'].referencePoints[2],
mdb.models[MODEL_NAME].rootAssembly.instances['lower_right_01-2'].referencePoints[2],
mdb.models[MODEL_NAME].rootAssembly.instances['lower_left_01-2'].referencePoints[2],
mdb.models[MODEL_NAME].rootAssembly.instances['lower_03-1'].referencePoints[2],
)), u1=0.0, u2=0, ur3=0.0)
# Upper
mdb.models[MODEL_NAME].DisplacementBC(amplitude='Amp-1', createStepName=
DYNEXP_STEPS[1], distributionType=UNIFORM, fieldName='', fixed=OFF, localCsys=
None, name='BC-Upper-01', region=Region(referencePoints=(
mdb.models[MODEL_NAME].rootAssembly.instances['upper_left_01-1'].referencePoints[2],
mdb.models[MODEL_NAME].rootAssembly.instances['upper_right_01-1'].referencePoints[2],
)), u1=0.0, u2=-(YDISP), ur3=0.0)
mdb.models[MODEL_NAME].DisplacementBC(amplitude='Amp-1', createStepName=
DYNEXP_STEPS[1], distributionType=UNIFORM, fieldName='', fixed=OFF, localCsys=
None, name='BC-Upper-02', region=Region(referencePoints=(
mdb.models[MODEL_NAME].rootAssembly.instances['upper_left_02-1'].referencePoints[2],
mdb.models[MODEL_NAME].rootAssembly.instances['upper_right_02-1'].referencePoints[2],
)), u1=0.0, u2=0, ur3=0.0)
mdb.models[MODEL_NAME].DisplacementBC(amplitude='Amp-1', createStepName=
DYNEXP_STEPS[1], distributionType=UNIFORM, fieldName='', fixed=OFF, localCsys=
None, name='BC-Upper-03', region=Region(referencePoints=(
mdb.models[MODEL_NAME].rootAssembly.instances['upper_03-1'].referencePoints[2], ))
, u1=0.0, u2=0, ur3=0.0)
mdb.models[MODEL_NAME].boundaryConditions['BC-Upper-01'].setValuesInStep(stepName=DYNEXP_STEPS[2], u2=YDISP)
mdb.models[MODEL_NAME].boundaryConditions['BC-Upper-01'].setValuesInStep(stepName=DYNEXP_STEPS[3], u2=0)
mdb.models[MODEL_NAME].boundaryConditions['BC-Upper-02'].setValuesInStep(stepName=DYNEXP_STEPS[3], u2=-(YDISP-0.006))
mdb.models[MODEL_NAME].boundaryConditions['BC-Upper-02'].setValuesInStep(stepName=DYNEXP_STEPS[4], u2=YDISP-0.006)
mdb.models[MODEL_NAME].boundaryConditions['BC-Upper-02'].setValuesInStep(stepName=DYNEXP_STEPS[5], u2=0)
mdb.models[MODEL_NAME].boundaryConditions['BC-Upper-03'].setValuesInStep(stepName=DYNEXP_STEPS[5], u2=-(YDISP+0.001))
mdb.models[MODEL_NAME].boundaryConditions['BC-Upper-03'].setValuesInStep(stepName=DYNEXP_STEPS[6], u2=(YDISP+0.001))
# Lower
mdb.models[MODEL_NAME].DisplacementBC(amplitude='Amp-1', createStepName=
DYNEXP_STEPS[1], distributionType=UNIFORM, fieldName='', fixed=OFF, localCsys=
None, name='BC-LowerL-01', region=Region(referencePoints=(
mdb.models[MODEL_NAME].rootAssembly.instances['lower_left_01-1'].referencePoints[2],
)), u1=0.0, u2=UNSET, ur3=0.0)
mdb.models[MODEL_NAME].DisplacementBC(amplitude='Amp-1', createStepName=
DYNEXP_STEPS[1], distributionType=UNIFORM, fieldName='', fixed=OFF, localCsys=
None, name='BC-LowerR-01', region=Region(referencePoints=(
mdb.models[MODEL_NAME].rootAssembly.instances['lower_right_01-1'].referencePoints[2],
)), u1=0.0, u2=UNSET, ur3=0.0)
mdb.models[MODEL_NAME].DisplacementBC(amplitude='Amp-1', createStepName=
DYNEXP_STEPS[1], distributionType=UNIFORM, fieldName='', fixed=OFF, localCsys=
None, name='BC-Lower-02', region=Region(referencePoints=(
mdb.models[MODEL_NAME].rootAssembly.instances['lower_02-1'].referencePoints[2],
)), u1=0.0, u2=0.0, ur3=0.0)
mdb.models[MODEL_NAME].boundaryConditions['BC-LowerL-01'].setValuesInStep(stepName=DYNEXP_STEPS[2], u2=0.51)
mdb.models[MODEL_NAME].boundaryConditions['BC-LowerL-01'].setValuesInStep(stepName=DYNEXP_STEPS[3], u2=0.0)
mdb.models[MODEL_NAME].boundaryConditions['BC-LowerR-01'].setValuesInStep(stepName=DYNEXP_STEPS[2], u2=0.51)
mdb.models[MODEL_NAME].boundaryConditions['BC-LowerR-01'].setValuesInStep(stepName=DYNEXP_STEPS[3], u2=0.0)
mdb.models[MODEL_NAME].boundaryConditions['BC-Lower-02'].setValuesInStep(stepName=DYNEXP_STEPS[3], u2=FREED)
mdb.models[MODEL_NAME].boundaryConditions['BC-Lower-02'].setValuesInStep(stepName=DYNEXP_STEPS[4], u2=0.275)
mdb.models[MODEL_NAME].boundaryConditions['BC-Lower-02'].setValuesInStep(stepName=DYNEXP_STEPS[5], u2=0.0)
# Pisador
mdb.models[MODEL_NAME].DisplacementBC(amplitude='Amp-1', createStepName=
DYNEXP_STEPS[1], distributionType=UNIFORM, fieldName='', fixed=OFF, localCsys=
None, name='BC-Pisador', region=Region(referencePoints=(
mdb.models[MODEL_NAME].rootAssembly.instances['pisador-1'].referencePoints[2],
)), u1=0.0, u2=UNSET, ur3=0.0)
mdb.models[MODEL_NAME].boundaryConditions['BC-Pisador'].setValuesInStep(stepName=DYNEXP_STEPS[5], u2=0.0)
# X-axis constrained
mdb.models[MODEL_NAME].DisplacementBC(amplitude=UNSET, createStepName=
DYNEXP_STEPS[1], distributionType=UNIFORM, fieldName='', fixed=OFF,
localCsys=None, name='BC-8', region=Region(
vertices=mdb.models[MODEL_NAME].rootAssembly.instances['plate-1'].vertices.getSequenceFromMask(
mask=('[#2 ]', ), )), u1=0.0, u2=UNSET, ur3=UNSET)
# Loads ===================================================================================
# Pisador
mdb.models[MODEL_NAME].ConcentratedForce(amplitude='Amp-1', cf2=-2000.0,
createStepName=DYNEXP_STEPS[1], distributionType=UNIFORM, field='',
localCsys=None, name='Pisador-Force', region=Region(referencePoints=(
mdb.models[MODEL_NAME].rootAssembly.instances['pisador-1'].referencePoints[2],
)))
mdb.models[MODEL_NAME].loads['Pisador-Force'].setValuesInStep(stepName=DYNEXP_STEPS[5], cf2=0.0)
# Botadores
mdb.models[MODEL_NAME].ConcentratedForce(amplitude='Amp-1', cf2=1000.0,
createStepName=DYNEXP_STEPS[1], distributionType=UNIFORM, field='',
localCsys=None, name='B01L-Force', region=Region(referencePoints=(
mdb.models[MODEL_NAME].rootAssembly.instances['lower_left_01-1'].referencePoints[2],
)))
mdb.models[MODEL_NAME].ConcentratedForce(amplitude='Amp-1', cf2=1000.0,
createStepName=DYNEXP_STEPS[1], distributionType=UNIFORM, field='',
localCsys=None, name='B01R-Force', region=Region(referencePoints=(
mdb.models[MODEL_NAME].rootAssembly.instances['lower_right_01-1'].referencePoints[2],
)))
mdb.models[MODEL_NAME].ConcentratedForce(amplitude='Amp-1', cf2=1999.5,
createStepName=DYNEXP_STEPS[1], distributionType=UNIFORM, field='',
localCsys=None, name='B02-Force', region=Region(referencePoints=(
mdb.models[MODEL_NAME].rootAssembly.instances['lower_02-1'].referencePoints[2],
)))
mdb.models[MODEL_NAME].loads['B01L-Force'].setValuesInStep(stepName=DYNEXP_STEPS[2], cf2=0.0)
mdb.models[MODEL_NAME].loads['B01R-Force'].setValuesInStep(stepName=DYNEXP_STEPS[2], cf2=0.0)
mdb.models[MODEL_NAME].loads['B02-Force'].setValuesInStep(stepName=DYNEXP_STEPS[4], cf2=0.0)
# Gravity
mdb.models[MODEL_NAME].Gravity(amplitude='Amp-1', comp2=-386.0, createStepName=
DYNEXP_STEPS[1], distributionType=UNIFORM, field='', name='Gravity', region=Region(
faces=mdb.models[MODEL_NAME].rootAssembly.instances['plate-1'].faces.getSequenceFromMask(
mask=('[#f ]', ), )))
# Mesh plate
mdb.models[MODEL_NAME].parts['plate'].seedPart(deviationFactor=0.1,
minSizeFactor=0.1, size=MESH_SIZE_QUAD)
mdb.models[MODEL_NAME].parts['plate'].seedEdgeBySize(constraint=FINER,
deviationFactor=0.1, edges=
mdb.models[MODEL_NAME].parts['plate'].edges.getSequenceFromMask(('[#2100 ]',
), ), minSizeFactor=0.1, size=MESH_SIZE_TRI)
mdb.models[MODEL_NAME].parts['plate'].setMeshControls(elemShape=TRI, regions=
mdb.models[MODEL_NAME].parts['plate'].faces.getSequenceFromMask(('[#14 ]',
), ))
mdb.models[MODEL_NAME].parts['plate'].setElementType(elemTypes=(ElemType(
elemCode=CPE4R, elemLibrary=EXPLICIT), ElemType(elemCode=CPE3,
elemLibrary=EXPLICIT, secondOrderAccuracy=OFF, distortionControl=DEFAULT)),
regions=(mdb.models[MODEL_NAME].parts['plate'].faces.getSequenceFromMask((
'[#14 ]', ), ), ))
mdb.models[MODEL_NAME].parts['plate'].setElementType(elemTypes=(ElemType(
elemCode=CPE4R, elemLibrary=EXPLICIT, secondOrderAccuracy=OFF,
hourglassControl=DEFAULT, distortionControl=DEFAULT), ElemType(
elemCode=CPE3, elemLibrary=EXPLICIT)), regions=(
mdb.models[MODEL_NAME].parts['plate'].faces.getSequenceFromMask(('[#b ]',
), ), ))
mdb.models[MODEL_NAME].parts['plate'].generateMesh()
# Job
mdb.Job(activateLoadBalancing=False, atTime=None, contactPrint=OFF,
description='', echoPrint=OFF, explicitPrecision=SINGLE, historyPrint=OFF,
memory=90, memoryUnits=PERCENTAGE, model=MODEL_NAME, modelPrint=OFF,
multiprocessingMode=DEFAULT, name=JOB_NAME, nodalOutputPrecision=SINGLE,
numCpus=1, numDomains=1, parallelizationMethodExplicit=DOMAIN, queue=None,
resultsFormat=ODB, scratch='', type=ANALYSIS, userSubroutine='', waitHours=
0, waitMinutes=0)
# mdb.jobs[JOB_NAME].submit(consistencyChecking=OFF)
| JorgeDeLosSantos/metal-forming-itc | forming_2D.py | Python | mit | 37,581 |