repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
daStrauss/subsurface
|
src/superSolve/wrapCvxopt.py
|
1
|
2619
|
'''
Created on Jul 9, 2012
Copyright © 2013
The Board of Trustees of The Leland Stanford Junior University.
All Rights Reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: dstrauss
'''
import cvxopt
from cvxopt import umfpack
import copy
import numpy as np
from cvxopt import lapack
# i guess I don't explicitly check that these are sparse matrices.
# from scipy import sparse
def linsolve(A,b):
aLocal = A.tocoo()
AC = cvxopt.spmatrix(aLocal.data.tolist(),aLocal.row.tolist(), aLocal.col.tolist())
bLocal = cvxopt.matrix(copy.deepcopy(b))
umfpack.linsolve(AC,bLocal)
bLocal = np.array(bLocal).flatten()
return bLocal
def staticSolver(A):
'''Creates a routine for solving the matrix A --uses UMFPACK underneath'''
aLocal = A.tocoo()
AC = cvxopt.spmatrix(aLocal.data.tolist(),aLocal.row.tolist(), aLocal.col.tolist())
Fs = umfpack.symbolic(AC)
FA = umfpack.numeric(AC,Fs)
def Q( b ):
bLocal = cvxopt.matrix(copy.deepcopy(b))
umfpack.solve(AC,FA,bLocal)
bLocal = np.array(bLocal).flatten()
return bLocal
return Q
def createSymbolic(A):
''' returns a symbolic factorization object for later reuse'''
s = A.shape
aLocal = A.tocoo()
AC = cvxopt.spmatrix(aLocal.data.tolist(),aLocal.row.tolist(), aLocal.col.tolist(),s)
Fs = umfpack.symbolic(AC)
return Fs
def solveNumeric(A,b, Fs):
''' given a static Fs, or symbolic factorization of the matrix A, performs the numeric part '''
aLocal = A.tocoo()
s = A.shape
AC = cvxopt.spmatrix(aLocal.data.tolist(),aLocal.row.tolist(), aLocal.col.tolist(),s)
# Fs = umfpack.symbolic(AC)
FA = umfpack.numeric(AC,Fs)
bLocal = cvxopt.matrix(copy.deepcopy(b))
umfpack.solve(AC,FA,bLocal)
bLocal = np.array(bLocal).flatten()
return bLocal
def denseSolve(A,b):
''' solves an Ax = b matrix system with gesv'''
if isinstance(A,np.ndarray):
aLocal = cvxopt.matrix(A)
bLocal = cvxopt.matrix(b)
lapack.gesv(aLocal,bLocal)
return np.array(bLocal).flatten()
else:
return linsolve(A,b)
|
apache-2.0
| -7,317,539,029,297,520,000
| 30.926829
| 99
| 0.688694
| false
| 3.2725
| false
| false
| false
|
rouxcode/django-filer-addons
|
filer_addons/tests/test_management_commands.py
|
1
|
2189
|
# -*- coding: utf-8 -*-
from django.test import TestCase
from filer.tests import create_superuser
from filer.models import File, Folder
from filer_addons.tests.utils import create_django_file
class ManagementCommandsTests(TestCase):
def setUp(self):
self.superuser = create_superuser()
self.client.login(username='admin', password='secret')
self.folder = Folder.objects.create(name='test')
self.another_folder = Folder.objects.create(name='test')
def tearDown(self):
self.delete_files()
for folder in Folder.objects.all():
folder.delete()
def delete_files(self):
for f in File.objects.all():
f.delete()
def create_file(self, **kwargs):
"""
two files
kwargs size: tuple, img dimension
kwargs name: filename
:param kwargs:
:return:
"""
filename = 'file.jpg'
if kwargs.get('name', None):
filename = kwargs['name']
size = (50, 50, )
if kwargs.get('size', None):
size = kwargs['size']
django_file = create_django_file(filename=filename, size=size)
file_obj = File.objects.create(
owner=self.superuser,
original_filename=filename,
file=django_file,
)
file_obj.save()
return file_obj
# TODO: write more management command tests
def test_delete_thumbnails(self):
from django.core.management import call_command
call_command('filer_addons', 'delete_thumbnails', )
# import django
# from django.core.management import call_command
# if django.VERSION[:2] < (2, 1):
# call_command('filer_addons', 'delete_thumbnails', )
# else:
# call_command('filer_addons' )
# from filer_addons.filer_utils.management.commands import delete_thumbnails
# call_command(delete_thumbnails.Command(), )
# check for thumb dir not existing
def test_unused_files_command(selfs):
pass
def test_orphaned_files_command(selfs):
pass
def test_import_existing_files_command(selfs):
pass
|
mit
| -3,994,054,243,302,470,000
| 30.724638
| 88
| 0.602101
| false
| 4.106942
| true
| false
| false
|
pmarti/pyclutter
|
examples/hello.py
|
1
|
1992
|
import sys
import clutter
class HelloClutter:
def __init__ (self, message):
self.stage = clutter.Stage()
self.stage.set_color(clutter.color_from_string('DarkSlateGrey'))
self.stage.set_size(800, 600)
self.stage.set_title('My First Clutter Application')
self.stage.connect('key-press-event', clutter.main_quit)
self.stage.connect('button-press-event',
self.on_button_press_event)
color = clutter.Color(0xff, 0xcc, 0xcc, 0xdd)
self.label = clutter.Text()
self.label.set_font_name('Mono 32')
self.label.set_text(message)
self.label.set_color(color)
(label_width, label_height) = self.label.get_size()
label_x = self.stage.get_width() - label_width - 50
label_y = self.stage.get_height() - label_height
self.label.set_position(label_x, label_y)
self.stage.add(self.label)
self.cursor = clutter.Rectangle()
self.cursor.set_color(color)
self.cursor.set_size(20, label_height)
cursor_x = self.stage.get_width() - 50
cursor_y = self.stage.get_height() - label_height
self.cursor.set_position(cursor_x, cursor_y)
self.stage.add(self.cursor)
self.timeline = clutter.Timeline(500)
self.timeline.set_loop(True)
alpha = clutter.Alpha(self.timeline, clutter.LINEAR)
self.behaviour = clutter.BehaviourOpacity(0xdd, 0, alpha)
self.behaviour.apply(self.cursor)
def on_button_press_event (self, stage, event):
print "mouse button %d pressed at (%d, %d)" % \
(event.button, event.x, event.y)
def run (self):
self.stage.show_all()
self.timeline.start()
clutter.main()
def main (args):
if args:
message = args[0]
else:
message = 'Hello, Clutter!'
app = HelloClutter(message)
app.run()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
lgpl-2.1
| -3,097,715,510,959,132,000
| 32.2
| 72
| 0.601908
| false
| 3.325543
| false
| false
| false
|
AtalM2/iAtal
|
src/python/test_broken.py
|
1
|
1071
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import elements
import enums
#globals
ended = False
compteur = 0
map_ = False
def init(aMap):
global map_
map_ = aMap
def robot_init():
global undergroundSensor
undergroundSensor = sensor(enums.Level.Underground, 1)
global greenActuator
greenActuator = actuator(enums.Level.Underground, 1, "bibi")
#stratégie python
def strat():
print(undergroundSensor.activate())
greenActuator.activate()
print(undergroundSensor.activate())
global ended
ended = True
#sert à savoir si la strat est finie ou non
def isEnded():
return ended;
#Defines a sensor.
class sensor:
def __init__(self,level_, range_):
self.map_ = map_
self.level_ = level_
self.range_ = range_
def activate(self):
return map_.getItem(self.level_ , self.range_)
#defines an actuator
class actuator:
def __init__(self, level_, range_,newContent_):
self.map_ = map_
self.level_ = level_
self.range_ = range_
self.newContent_ = newContent_
def activate(self):
self.map_.setItem(self.level_, self.range_, self.newContent_)
|
gpl-3.0
| 7,224,220,530,610,876,000
| 19.557692
| 63
| 0.703461
| false
| 2.920765
| false
| false
| false
|
3liz/QgisQuickOSMPlugin
|
quick_osm_processing/advanced/download_overpass.py
|
1
|
3441
|
"""
/***************************************************************************
QuickOSM QGIS plugin
OSM Overpass API frontend
-------------------
begin : 2017-11-11
copyright : (C) 2017 by Etienne Trimaille
email : etienne dot trimaille at gmail dot com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
# import codecs
# import re
# import processing
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
from qgis.core import (
QgsProcessingParameterString,
QgsProcessingOutputFile,
)
class DownloadOverpassUrl(QgisAlgorithm):
URL = 'URL'
OUTPUT = 'OUTPUT'
def __init__(self):
super(DownloadOverpassUrl, self).__init__()
self.feedback = None
def group(self):
return self.tr('Advanced')
@staticmethod
def groupId():
return 'advanced'
@staticmethod
def name():
return 'downloadoverpassquery'
def displayName(self):
return self.tr('Download from Overpass')
def flags(self):
return super().flags() # | QgsProcessingAlgorithm.FlagHideFromToolbox
def shortHelpString(self):
return self.tr(
'Like the native QGIS File Downloader algorithm, this algorithm '
'will download an URL but it will also perform a OSM integrity '
'check at the end of the download.')
def initAlgorithm(self, config=None):
self.addParameter(
QgsProcessingParameterString(
self.URL, self.tr('URL, with the query encoded')))
self.addOutput(
QgsProcessingOutputFile(
self.OUTPUT, self.tr('Output')))
def processAlgorithm(self, parameters, context, feedback):
self.feedback = feedback
# url = self.parameterAsString(parameters, self.URL, context)
output = self.parameterAsFileOutput(parameters, self.OUTPUT, context)
# processing.run("native:filedownloader", {
# 'URL': url,
# 'OUTPUT': output,
# }, context=context, feedback=feedback)
# file_obj = codecs.open(self.result_path, 'r', 'utf-8')
# file_obj.seek(0, 2)
# fsize = file_obj.tell()
# file_obj.seek(max(fsize - 1024, 0), 0)
# lines = file_obj.readlines()
# file_obj.close()
#
# lines = lines[-10:] # Get last 10 lines
# timeout = '<remark> runtime error: Query timed out in "[a-z]+" ' \
# 'at line [\d]+ after ([\d]+) seconds. </remark>'
# if re.search(timeout, ''.join(lines)):
# raise QgsProcessingException(tr('Overpass API timeout'))
outputs = {
self.OUTPUT: output,
}
return outputs
|
gpl-2.0
| 8,111,771,314,235,636,000
| 34.112245
| 78
| 0.503051
| false
| 4.752762
| false
| false
| false
|
anselg/handy-scripts
|
latency/plot_histogram.py
|
1
|
4785
|
#! /usr/bin/env python
##########################################################################
# Import modules
##########################################################################
import sys
import os
import re
import h5py as h5
import numpy as np
import pandas as pd
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import operator
import subprocess
##########################################################################
# Parse command-line input; set global parameters
##########################################################################
if (len(sys.argv) == 0):
print("Give me an hdf file")
sys.exit()
else:
filename = sys.argv[1]
plotname = os.path.splitext(filename)[0] + ".svg"
plt.style.use('ggplot')
##########################################################################
# Define methods
##########################################################################
def runShellCommand(command):
output = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE).stdout.read().strip().decode()
return output
def getCpu():
command = "cat /proc/cpuinfo"
text = runShellCommand(command).split('\n')
procline = [line for line in text if re.search("model name", line)][0]
return procline.split(":")[1].strip()
def getGpu():
command = "lshw -numeric -C display"
text = runShellCommand(command).split('\n')
product = [line for line in text if re.search("product", line)]
vendor = [line for line in text if re.search("vendor", line)]
driver = [line for line in text if re.search("driver", line)]
if product and vendor and driver:
product = product[0].split("product:")[1].strip()
vendor = vendor[0].split("vendor:")[1].strip()
driver = driver[0].split("configuration:")[1].strip().split(" ")[
0].split("=")[1].strip()
return vendor, product, driver
else:
return "GPU vendor not found", "GPU model not found", "GPU driver not found"
def getDaq():
command = "lspci"
text = runShellCommand(command).split('\n')
daqline = [line for line in text if re.search("National", line)]
if daqline:
daqline = daqline[0]
return daqline.split(":")[2].strip()
else:
return "DAQ not found"
def getDistro():
command = "echo $(lsb_release -is) $(lsb_release -rs)"
return runShellCommand(command)
def getKernel():
command = "uname -r"
return runShellCommand(command)
def getHostname():
command = "uname -n"
return runShellCommand(command)
def unwrapHistogram(f):
f["Count"] = f["Count"] - 1
latencies = []
for idx, row in f.iterrows():
latencies.extend([row["Latency (us)"]] * int(row["Count"]))
df = pd.DataFrame(latencies, columns=["Latency (us)"])
return df
##########################################################################
# Process data
##########################################################################
#filename = "test_rt_histdata_4.1.18_30min.txt"
raw_data = pd.read_csv(
filename,
sep=" ",
comment="#",
names=[
"Latency (us)",
"Count"])
data = unwrapHistogram(raw_data.copy(deep=True))
##########################################################################
# Generate table
##########################################################################
cpu = getCpu()
daq = getDaq()
hostname = getHostname()
distro = getDistro()
kernel = getKernel()
vendor, model, driver = getGpu()
frequency = 10.0
col1 = [
"Computer",
"Kernel",
"CPU",
"GPU Vendor",
"GPU Model",
"GPU Driver",
"RT Freq"]
col2 = [
hostname + " (" + distro + ")",
kernel,
cpu,
vendor,
model,
driver,
str(frequency) + " kHz"]
col2 = [[value] for value in col2]
##########################################################################
# Generate plot
##########################################################################
f, ax = plt.subplots(
2, gridspec_kw={
'height_ratios': [
1, 2.5]}, figsize=(
8, 7))
ax[0].axis('off')
table = ax[0].table(cellText=col2, rowLabels=col1, loc='center',
colWidths=[.8], colLoc='right', bbox=[.1, 0, .85, 1])
data.hist("Latency (us)", bins=50, ax=ax[1])
ax[1].set_title("")
ax[1].set_yscale('log')
ax[1].set_ylabel('Count')
ax[1].set_xlabel('Latency (us)')
mean_latency = data['Latency (us)'].mean()
std_latency = data['Latency (us)'].std()
ax[1].table(
cellText=[
[mean_latency],
[std_latency]],
rowLabels=[
"Mean (us)",
"Std Dev (us)"],
loc='center right',
colWidths=[.2] * 2)
plt.tight_layout()
plt.savefig(plotname, dpi=300)
plt.close()
|
gpl-3.0
| 245,206,177,885,361,280
| 25.731844
| 84
| 0.491118
| false
| 3.858871
| false
| false
| false
|
mivade/qCamera
|
qcamera/camprops.py
|
1
|
5831
|
"""Camera properties"""
import os.path
import json
# TODO: don't allow updating of properties that don't exist in the
# default self.props set in __init__
from . exceptions import CameraPropertiesError
PATH = os.path.split(os.path.abspath(__file__))[0]
class CameraProperties(object):
"""Class used for storing properties of the camera in use and
flags about what functionality is supported.
"""
# Basic functions
# -------------------------------------------------------------------------
def __init__(self, filename=None, **kwargs):
"""Without kwargs passed, populate the base properties
dict. Otherwise, populate as appropriate. See self.props for
valid keyword arguments.
Parameters
----------
filename : str or None
If passed, the path to a JSON file that sets all the
camera properties.
"""
self.props = {
# Generic properties
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Number of horizontal and vertical pixels
'pixels': [0, 0],
# Pixel size in um. 0 means N/A
'pixel_um': 0,
# Bits per pixel. This could conceivably be a tuple if a
# color camera. The pixel_mode attribute specifies if it
# is mono or some form of color.
'depth': 8,
'pixel_mode': 'mono',
# Available trigger modes
'trigger_modes': ['internal'],
# Available acquisition modes
'acquisition_modes': ['continuous'],
# List of valid values for binning
'bins': [1],
# Min and max temperatures for cameras with a
# cooler. Meaningless otherwise.
'temp_range': [-90, 30],
# Min and max values for gain. Meaningless if the camera
# gain cannot be adjusted.
'gain_range': [0, 255],
# Min and max values for exposure. Meaningless if the camera
# exposure cannot be adjusted. For some cameras this has units
# for others these are an arbitrary units.
'exposure_range': [1,2000],
# Functionality flags
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Can hardware cropping be set?
'hardware_crop': False,
# Can the gain be adjusted?
'gain_adjust': False,
# Can the exposure be adjusted?
'exposure_adjust': True,
# Is there a built-in tempurature controller for the
# sensor?
'temp_control': False,
# Does the camera have a builtin shutter?
'shutter': False,
# Default settings
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Minimum and maximum threshold values for contrast adjustment
'init_contrast': [0, 256],
# Start acquisition immediately if True
'auto_start': True,
# Initial temperature set point
'init_set_point': -10,
# Start temperature control immediately?
'auto_temp_control': False,
# Initial shutter state open?
'init_shutter': False,
# Initial gain
'init_gain': 0,
# Initial exposure
'init_exposure': 100,
}
# Update parameters from a file if given.
if filename is not None:
self.load(filename)
else:
print("No camera properties loaded!")
def __getitem__(self, key):
return self.props[key]
def __setitem__(self, key, value):
self.props[key] = value
def __delitem__(self, key):
self.props.popitem(key)
def __iter__(self):
pass # TODO
def __str__(self):
return json.dumps(self.props, indent=2)
def update(self, props):
"""Update the props dict."""
assert isinstance(props, dict)
self.props.update(props)
# Loading and saving properties
# -------------------------------------------------------------------------
# Definitions of basic camera properties can be stored in a JSON
# file so that we only need to determine at runtime a few
# differing parameters that change depending on the specific model
# of camera being used. For example, the Andor SDK supports
# several different specific cameras, but some functionality
# depends on the physical camera being used. Most of the
# capabilities for all models is the same, however, and so these
# generic values are stored in a file and only the few that are
# camera-specific are queried for.
def save(self, filename):
"""Save the properties to a JSON file."""
with open(filename, 'w') as outfile:
json.dump(self.props, outfile, indent=4, sort_keys=True)
def load(self, filename, abs_path=False):
"""Load the properties from a JSON file. If abs_path is False,
load the file from the global properties directory (i.e.,
qcamera/props).
"""
if not abs_path:
path = os.path.join(PATH, 'props', filename)
else:
path = filename
with open(path, 'r') as infile:
props = json.load(infile)
# TODO: this should check that keys are valid!
self.props = props
if __name__ == "__main__":
props = CameraProperties()
props.save('test.json')
|
bsd-2-clause
| 4,954,028,469,685,111,000
| 31.130682
| 79
| 0.513291
| false
| 4.80709
| false
| false
| false
|
osantana/quickstartup
|
tests/website/tests.py
|
1
|
2616
|
import pytest
from django.test import override_settings
from django.urls import NoReverseMatch
from quickstartup.qs_pages.models import Page
from quickstartup.qs_pages.urlresolver import page_reverse
from ..base import TEMPLATES, check_contains, check_in_html, check_template_used
pytestmark = pytest.mark.django_db
@override_settings(TEMPLATES=TEMPLATES)
def test_success_reverse():
Page.objects.create(slug="about", template_name="about.html")
url = page_reverse("about")
assert "/about/" == url
def test_fail_reverse_missing_page():
with pytest.raises(NoReverseMatch):
page_reverse("unknown")
def test_fail_reverse_invalid_url():
with pytest.raises(NoReverseMatch):
page_reverse("/")
def test_bootstrap_pages():
assert Page.objects.get(slug="").get_absolute_url() == "/"
assert Page.objects.get(slug="terms").get_absolute_url() == "/terms/"
assert Page.objects.get(slug="privacy").get_absolute_url() == "/privacy/"
def test_path():
page = Page.objects.get(slug="terms")
assert page.path == "/terms/"
assert str(page) == "/terms/"
def test_filter_invalid_pages():
pages = Page.objects.all()
assert "inv@lid" not in pages
def test_success_terms_page_access(client):
response = client.get("/terms/")
assert response.status_code == 200
assert check_contains(response, "<title>Terms of Service —")
def test_success_terms_page_access_missing_trailing_slash(client):
response = client.get("/terms")
assert check_contains(response, "<title>Terms of Service — ")
def test_success_privacy_page_access(client):
response = client.get("/privacy/")
assert response.status_code == 200
assert check_contains(response, "<title>Privacy Policy —")
def test_fail_page_404(client):
response = client.get("/unknown/")
assert response.status_code == 404
def test_fail_invalid_url(client):
response = client.get("/err/or/")
assert response.status_code == 404
@override_settings(TEMPLATES=TEMPLATES, DEBUG=False)
def test_call_template_with_error_and_debug_disabled(client):
Page.objects.create(slug="buggy-template", template_name="buggy-template.html")
response = client.get(page_reverse("buggy-template"))
assert response.status_code == 404 # original error is 404 because we dont map pages urls
def test_index_page_anonymous_user(client):
response = client.get("/")
assert response.status_code == 200
assert check_template_used(response, "website/landing.html")
assert check_in_html("<title>Django Quickstartup</title>", response.content.decode("utf-8"))
|
mit
| -3,780,522,163,532,078,000
| 30.071429
| 96
| 0.708046
| false
| 3.625
| true
| false
| false
|
Gorah/py_accuracy_report
|
runreport.py
|
1
|
34212
|
#Script written for Python 2.7
#Dependencies to download: pyodbc (SQL Server Drivers)
import pyodbc
import datetime
import sys
import re
from contextlib import contextmanager
LATE_CASES = {}
@contextmanager
def get_connection():
"""
Connect to DB
"""
cnxn = pyodbc.connect('DRIVER={SQL SERVER};SERVER=BPOPLMCBC16;DATABASE=AdminTracker_SQL;UID=AppLogon;PWD=ZdrojHPL1950')
yield cnxn.cursor()
cnxn.commit()
def get_DBdata(sql, sD, eD, cursor):
"""
This function takes SQL string and connection object and returns
rowset with data
"""
if(sD):
cursor.execute(sql, sD, eD)
else:
cursor.execute(sql)
try:
rows = cursor.fetchall()
except Error as err:
rows = None
print err.strerror
sys.exit(0)
return rows
def count_days(row, userecdate=False):
"""
This function calculates number of days between Cut Off Date and
date of receiving complete documents.
"""
cutD = row.CutOffDate
if userecdate:
recD = row.DateReceived
else:
#if CompleteDocsDate is missing, use current date instead
try:
recD = row.CompleteDocsDate
except AttributeError:
recD = datetime.datetime.now()
if not recD:
recD = datetime.datetime.now()
return day_diff(recD, cutD)
def day_diff(date1, date2):
"""
This function returns difference in days between 2 dates.
"""
days = (date1 - date2).days
if days > 0:
return days
else:
return 0
def write_to_dict(row, ttype, notes):
"""
This function fills dictionary with a new entry (which is another
dictionary containing all the necessary data)
"""
#new empty dictionary is created to store ticket data in it
case_descr = {}
case_descr['type'] = ttype
#This allows overriding default notes script overriding
case_descr['notes'] = notes
if not row.Forname:
forename = ' '
else:
forename = row.Forname
case_descr['eename'] = row.Surname + ' ' + forename
case_descr['eeid'] = row.EEID
case_descr['rootcause'] = row.CauseText
#new dictionary is appended to general dict under ticket ID as key
LATE_CASES[row.ID] = case_descr
def contract_exp_by_dates(sD, eD, cursor):
"""
This function takes date boundaries and connection object and
fetches data from DB. Then it sends recordsets for further
processing.
This function covers Contract Expiration - Late Submission category.
"""
sql = """SELECT T.ID, T.DateReceived, T.EffectiveDate,
T.CutOffDate, T.EEImpact, T.CompleteDocsDate,
T.NumberOfReminders, E.EEID, E.Forname, E.Surname, T.SourceID,
R.CauseText, T.InRejComment
FROM tTracker as T INNER JOIN
tMCBCEmployee as E ON T.EeID = E.ID INNER JOIN
tRootCause as R ON T.RootCause = R.ID
WHERE (T.ProcessID IN (262, 330)) AND
(T.DateReceived BETWEEN ? AND ?) AND
(T.EffectiveDate < T.DateReceived OR T.CutOffDate < T.DateReceived)"""
ttype = 'Contract Expiration - Late Renewal Submission'
#getting recordset from DB
result = get_DBdata(sql, sD, eD, cursor)
"""if there are any rows in response we're checking each row to
determine which piece of string to use in description of case.
After string is determined row and meta data are sent to be added
to dictionary.
"""
if result:
for row in result:
compDocs = get_compDocsString(row.CompleteDocsDate, row.InRejComment)
docs_rec = get_compDocsString(row.CompleteDocsDate)
notes = ('"%s%s.\n%s%s.\n%s.\n%s%s.\n%s%s.\n%s%d.\n%s."' %
('Contract End date ',
row.EffectiveDate.strftime('%d/%m/%Y'),
'PCR received on ',
row.DateReceived.strftime('%d/%m/%Y'),
docs_rec,
'Request should be submitted by ',
row.CutOffDate.strftime('%d/%m/%Y'),
'Request should be submitted by ',
row.CutOffDate.strftime('%d/%m/%Y'),
'Days late for payroll cut off: ',
day_diff(datetime.datetime.now(), row.CutOffDate),
row.EEImpact
))
write_to_dict(row, ttype, notes)
def contract_no_response(sD, eD, cursor):
"""
This function finds records where there was no response for end
of contract reminder.
"""
sql = """SELECT T.ID, T.DateReceived, T.EffectiveDate,
T.CutOffDate, T.EEImpact, T.CompleteDocsDate,
T.NumberOfReminders, E.EEID, E.Forname,
E.Surname, T.LetterSentOn, R.CauseText
FROM tTracker as T INNER JOIN
tMCBCEmployee as E ON T.EeID = E.ID INNER JOIN
tRootCause as R ON T.RootCause = R.ID
WHERE T.ProcessID IN (352, 350, 383, 399) AND
(T.DateReceived BETWEEN ? AND ?) AND
((T.EffectiveDate < GETDATE() AND T.SignedLetterReceivedOn is null)
OR (T.CutOffDate < GETDATE() AND T.SignedLetterReceivedOn is null))"""
#getting data from DB
result = get_DBdata(sql, sD, eD, cursor)
if result:
for row in result:
if row.LetterSentOn:
letter = ('%s%s' %('Email to manager sent on ',
row.LetterSentOn.strftime('%d/%m/%Y')))
else:
letter = 'Email not sent yet'
notes = ('"%s%s.\n%s.\n%s%s.\n%s.\n%s%d.\n%s."' %
('Contract End date ',
row.EffectiveDate.strftime('%d/%m/%Y'),
letter,
'Request should be submitted by ',
row.CutOffDate.strftime('%d/%m/%Y'),
'Response not received from LM',
'Days late for payroll cut off: ',
day_diff(datetime.datetime.now(), row.CutOffDate),
row.EEImpact
))
write_to_dict(row, 'Contract Expiration - No Response', notes)
def contract_exp_by_letters(sD, eD, cursor):
"""
This function fetches data for Contract Expiration category,scoping
for the letter tickets. Late tickets are fetched and are split into
2 different types: 'no response' and 'late submission'.
Data is later sent to be written to dictionary
"""
sql = """SELECT T.ID, T.DateReceived, T.EffectiveDate,
T.CutOffDate, T.EEImpact, T.CompleteDocsDate,
T.NumberOfReminders, E.EEID, E.Forname, T.SignedLetterReceivedOn,
E.Surname, T.LetterSentOn, R.CauseText FROM tTracker as T INNER JOIN
tMCBCEmployee as E ON T.EeID = E.ID INNER JOIN
tRootCause as R ON T.RootCause = R.ID
WHERE (T.ProcessID IN (349, 351, 352, 350, 383, 399)) AND
(T.DateReceived BETWEEN ? AND ?) AND
((T.EffectiveDate < GETDATE() AND T.SignedLetterRequired = 1)
OR (T.CutOffDate < GETDATE() AND T.SignedLetterRequired = 1))"""
notes_name = 'Contract End effective date '
#getting recordset from DB
result = get_DBdata(sql, sD, eD, cursor)
"""if there are any rows in response we're checking each row to
determine which piece of string to use in description of case.
After string is determined row and meta data are sent to be added
to dictionary.
"""
if result:
for row in result:
#############################
#TEMP STUFF - REMOVE IN PROD
if not row.LetterSentOn:
LetterSentOn = datetime.datetime(2010, 10, 10)
else:
LetterSentOn = row.LetterSentOn
###################################
if not row.SignedLetterReceivedOn:
SignedLetterReceivedOn = datetime.datetime.today()
else:
SignedLetterReceivedOn = row.SignedLetterReceivedOn
if row.LetterSentOn:
letter = ('%s%s' %('Email to manager sent on ',
row.LetterSentOn.strftime('%d/%m/%Y')))
else:
letter = 'Email not sent yet'
#create statuses of signed letter received back
#basing on date conditions
if row.SignedLetterReceivedOn:
sigLetter = ('%s%s' % ('Response from LM received on ',
# row.SignedLetterReceivedOn.strftime('%d/%m/%Y')))
SignedLetterReceivedOn.strftime('%d/%m/%Y')))
else:
sigLetter = 'Response from LM not yet returned'
ttype = 'Contract Expiration - Late Renewal Submission'
notes = ('"%s%s.\n%s.\n%s%s.\n%s.\n%s%d.\n%s."' %
('Contract End date ',
row.EffectiveDate.strftime('%d/%m/%Y'),
letter,
'Response should be submitted by ',
row.CutOffDate.strftime('%d/%m/%Y'),
sigLetter,
'Days late for payroll cut off: ',
day_diff(SignedLetterReceivedOn, row.CutOffDate),
row.EEImpact
))
write_to_dict(row, ttype, notes)
def late_loa(sD, eD, cursor):
"""
This function finds late loa cases
"""
sql = """SELECT T.ID, T.DateReceived, T.EffectiveDate,
T.EEImpact, E.EEID, E.Forname, E.Surname, P.ProcessName, T.SourceID, R.CauseText
FROM tTracker as T INNER JOIN
tMCBCEmployee as E ON T.EeID = E.ID INNER JOIN
tProcess as P ON T.ProcessID = P.ID INNER JOIN
tRootCause as R ON T.RootCause = R.ID
WHERE (T.ProcessID IN (246, 261, 264, 282, 284, 289, 305,
306, 326, 341)) AND
(T.DateReceived BETWEEN ? AND ?)"""
ttype = 'Leave of Absence - Late Submission'
#getting recordset from DB
result = get_DBdata(sql, sD, eD, cursor)
#if there are any records in the recordset they need to be analized if
#they are late.
if result:
for row in result:
#checks if row is late. if yes adds an entry
if check_if_late_loa(row):
source = get_source_string(row.SourceID)
friday = row.EffectiveDate + datetime.timedelta(days=(4 - row.EffectiveDate.weekday()))
notes = ('"%s%s.\n%s%s.\n%s%s.\n%s%s.\n%s%d.\n%s"' %
('Process type: ',
row.ProcessName,
'Effective date ',
row.EffectiveDate.strftime('%d/%m/%Y'),
'Request should be submitted by ',
friday.strftime('%d/%m/%Y'),
source,
row.DateReceived.strftime('%d/%m/%Y'),
'Days late: ',
day_diff(row.DateReceived, friday),
row.EEImpact
))
write_to_dict(row, ttype, notes)
def check_if_late_loa(row):
"""
This function checks if loa entry is late or not based on business req.
"""
#find how many days friday is away from
diff = 4 - row.EffectiveDate.weekday()
fridayDate = row.EffectiveDate + datetime.timedelta(days=diff)
#checks if date received is greater than date of Friday in the week when
#effective date took place
if (row.DateReceived - fridayDate).days > 0:
return True
else:
return False
def ret_from_loa_by_dates(sD, eD, cursor):
"""
This function collects data about Return From LOA category and
sends records with late tickets to be added to dictionary.
"""
sql = """SELECT T.ID, T.DateReceived, T.EffectiveDate,
T.CutOffDate, T.EEImpact, T.CompleteDocsDate,
T.NumberOfReminders, E.EEID, E.Forname, E.Surname, R.CauseText,
T.SourceID, T.InRejComment
FROM tTracker as T INNER JOIN
tMCBCEmployee as E ON T.EeID = E.ID INNER JOIN
tRootCause as R ON T.RootCause = R.ID
WHERE (T.ProcessID = 325) AND
(T.DateReceived BETWEEN ? AND ?) AND (T.EffectiveDate < T.DateReceived)"""
ttype = 'Return from Leave - Late Submission'
#getting recordset from DB
result = get_DBdata(sql, sD, eD, cursor)
#if there are any records in the recordset each row is sent to be
#added to dictionary
if result:
for row in result:
source = get_source_string(row.SourceID)
#make sure to use a date. If complete docs la
compDocs = get_compDocsString(row.CompleteDocsDate, row.InRejComment)
dateRec = get_docsDate(row.CompleteDocsDate)
if (row.DateReceived - row.EffectiveDate).days > 0:
notes = ('"%s%s.\n%s%s.\n%s.\n%s%s.\n%s%d.\n%s"' %('Return effective on ',
row.EffectiveDate.strftime('%d/%m/%Y'),
source,
row.DateReceived.strftime('%d/%m/%Y'),
compDocs,
'Request should be submitted by ',
row.EffectiveDate.strftime('%d/%m/%Y'),
'Days late for payroll cut off: ',
day_diff(dateRec, row.EffectiveDate),
row.EEImpact
))
write_to_dict(row, ttype, notes)
def late_by_action(sD, eD, scope, procname, cursor):
"""
This function finds late job change actions in SAP among tickets
"""
sql = """SELECT T.ID, T.DateReceived, T.EffectiveDate,
T.CutOffDate, T.EEImpact, T.CompleteDocsDate,
T.NumberOfReminders, E.EEID, E.Forname, E.Surname , R.CauseText,
T.SourceID, T.InRejComment
FROM tTracker as T INNER JOIN
tMCBCEmployee as E ON T.EeID = E.ID INNER JOIN
tRootCause as R ON T.RootCause = R.ID
WHERE (T.ProcessID IN (""" + scope + """) AND
T.DateReceived BETWEEN ? AND ?) AND
(((T.EffectiveDate < T.CompleteDocsDate) OR
(T.CutOffDate < T.CompleteDocsDate) AND T.CompleteDocsDate IS NOT NULL)
OR ((T.EffectiveDate < T.DateReceived OR T.CutOffDate < T.DateReceived) AND
T.CompleteDocsDate IS NULL))"""
ttype = procname + " - Late Submission"
#getting recordset from DB
result = get_DBdata(sql, sD, eD, cursor)
if result:
for row in result:
source = get_source_string(row.SourceID)
compDocs = get_compDocsString(row.CompleteDocsDate, row.InRejComment)
dateRec = get_docsDate(row.CompleteDocsDate)
notes = ('"%s%s.\n%s%s.\n%s.\n%s%s.\n%s%d.\n%s"' %
(procname + ' effective on ',
row.EffectiveDate.strftime('%d/%m/%Y'),
source,
row.DateReceived.strftime('%d/%m/%Y'),
compDocs,
'Request should be submitted by ',
row.CutOffDate.strftime('%d/%m/%Y'),
'Days late for payroll cut off: ',
day_diff(dateRec, row.CutOffDate),
row.EEImpact
))
write_to_dict(row, ttype, notes)
def late_by_letters(sD, eD, scope, procname, cursor):
"""
This function finds late job change letters
"""
sql = """SELECT T.ID, T.DateReceived, T.CompleteDocsDate, T.EffectiveDate,
T.CutOffDate, T.EEImpact, T.SignedLetterReceivedOn,
T.NumberOfReminders, E.EEID, E.Forname,
E.Surname, T.LetterReceived, T.SignedLetterRequired,
T.LetterSentOn, R.CauseText, T.SourceID, T.InRejComment
FROM tTracker as T INNER JOIN
tMCBCEmployee as E ON T.EeID = E.ID INNER JOIN
tRootCause as R ON T.RootCause = R.ID
WHERE (T.ProcessID IN (""" + scope + """)) AND
(T.DateReceived BETWEEN ? AND ?) AND
((T.EffectiveDate < T.CompleteDocsDate) OR
(T.CutOffDate < T.CompleteDocsDate) OR
(T.EffectiveDate < T.SignedLetterReceivedOn AND T.SignedLetterRequired = 1
AND T.SignedLetterReceivedOn IS NOT NULL) OR
(T.CutOffDate < T.SignedLetterReceivedOn AND T.SignedLetterRequired = 1
AND T.SignedLetterReceivedOn IS NOT NULL) OR
(T.SignedLetterRequired = 1 AND T.SignedLetterReceivedOn IS NULL AND
T.EffectiveDate < GETDATE()) OR
(T.SignedLetterRequired = 1 AND T.SignedLetterReceivedOn IS NULL AND
T.CutOffDate < GETDATE()))"""
ttype = procname + " - Late Submission"
#grab recordset from DB
result = get_DBdata(sql, sD, eD, cursor)
if result:
for row in result:
#############################
#TEMP STUFF - REMOVE IN PROD
if not row.LetterSentOn:
LetterSentOn = datetime.datetime(2010, 10, 10)
else:
LetterSentOn = row.LetterSentOn
if not row.SignedLetterReceivedOn:
SignedLetterReceivedOn = datetime.datetime(2010, 10, 10)
else:
SignedLetterReceivedOn = row.SignedLetterReceivedOn
###################################
source = get_source_string(row.SourceID)
compDocs = get_compDocsString(row.CompleteDocsDate, row.InRejComment)
dateRec = get_docsDate(row.CompleteDocsDate)
#create statuses of signed letter received back
#basing on date conditions
if row.LetterReceived == 1 and row.SignedLetterReceivedOn:
sigLetter = ('%s%s' % ('Signed letter received on ',
#row.SignedLetterReceivedOn.strftime('%d/%m/%Y')))
SignedLetterReceivedOn.strftime('%d/%m/%Y')))
sigLetterRec = True
elif row.LetterReceived == 1 and row.SignedLetterRequired == 1 and not row.SignedLetterReceivedOn:
sigLetter = 'Signed letter not yet returned'
sigLetterRec = True
elif row.LetterReceived == 0:
sigLetterRec = False
#create statuses for letter sent, offer pack sent based on dates
if row.LetterReceived == 1:
letterSent = ('%s%s' % ('Letter sent on ',
#row.LetterSentOn.strftime('%d/%m/%Y')))
LetterSentOn.strftime('%d/%m/%Y')))
else:
letterSent = 'Letter not sent yet'
#calculate amount of days late basing on currenn document and contract statuses
#and on docs submission date
if row.CompleteDocsDate > row.CutOffDate:
days = day_diff(row.CompleteDocsDate, row.CutOffDate)
elif row.CompleteDocsDate > row.EffectiveDate:
days = day_diff(row.CompleteDocsDate, row.EffectiveDate)
if row.SignedLetterReceivedOn:
if row.SignedLetterReceivedOn > row.CutOffDate:
days = day_diff(row.SignedLetterReceivedOn, row.CutOffDate)
elif row.SignedLetterReceivedOn > row.EffectiveDate:
days = day_diff(row.SignedLetterReceivedOn, row.EffectiveDate)
#create notes field
if sigLetterRec:
notes = ('"%s%s.\n%s%s.\n%s.\n%s.\n%s.\n%s%s.\n%s%d.\n%s."' %
(procname + ' effective on ',
row.EffectiveDate.strftime('%d/%m/%Y'),
source,
row.DateReceived.strftime('%d/%m/%Y'),
compDocs,
letterSent,
sigLetter,
'Request should be submitted by ',
row.CutOffDate.strftime('%d/%m/%Y'),
'Days late for payroll cut off: ',
days,
row.EEImpact
))
else:
notes = ('"%s%s.\n%s%s.\n%s.\n%s.\n%s%s.\n%s%d.\n%s."' %
(procname + ' effective on ',
row.EffectiveDate.strftime('%d/%m/%Y'),
source,
row.DateReceived.strftime('%d/%m/%Y'),
compDocs,
letterSent,
'Request should be submitted by ',
row.CutOffDate.strftime('%d/%m/%Y'),
'Days late for payroll cut off: ',
days,
row.EEImpact
))
write_to_dict(row, ttype, notes)
def late_hire(sD, eD, cursor):
"""
This function finds late hire actions
"""
sql = """SELECT T.ID, T.DateReceived, T.EffectiveDate,
T.CutOffDate, T.EEImpact, T.CompleteDocsDate,
T.NumberOfReminders, E.EEID, E.Forname, E.Surname , T.LetterReceived,
T.LetterSentOn, T.SignedLetterReceivedOn, T.CloseDate, R.CauseText, T.InRejComment
FROM tTracker as T INNER JOIN
tMCBCEmployee as E ON T.EeID = E.ID INNER JOIN
tRootCause as R ON T.RootCause = R.ID
WHERE (T.ProcessID IN (371, 372) AND
(T.DateReceived BETWEEN ? AND ?)) AND
((T.EffectiveDate < T.DateReceived OR T.CutOffDate < T.DateReceived
AND T.CompleteDocsDate IS NULL) OR (T.SignedLetterReceivedOn > T.EffectiveDate)
OR (T.SignedLetterReceivedOn > T.CutOffDate) OR (T.CompleteDocsDate > T.EffectiveDate
OR T.CompleteDocsDate > T.CutOffDate) OR
(T.SignedLetterReceivedOn IS NULL AND (T.CutOffDate < GETDATE() OR
T.EffectiveDate < GETDATE())))"""
result = get_DBdata(sql, sD, eD, cursor)
ttype = 'Hires - Missing Documentation'
if result:
for row in result:
# if complete documents date is set use it as Complete docs received on
# else note that complete docs were not received yet
compDocs = get_compDocsString(row.CompleteDocsDate, row.InRejComment)
#create statuses of signed letter received back
#basing on date conditions
if row.LetterReceived == 1 and row.SignedLetterReceivedOn:
sigLetter = ('%s%s' % ('Signed contract received on ',
row.SignedLetterReceivedOn.strftime('%d/%m/%Y')))
sigLetterRec = True
elif row.LetterReceived == 1 and not row.SignedLetterReceivedOn:
sigLetter = 'Signed contract not yet returned'
sigLetterRec = True
elif row.LetterReceived == 0:
sigLetterRec = False
#create statuses for letter sent, offer pack sent based on dates
if row.CloseDate:
letterSent = ('%s%s' % ('Contract sent on ',
row.CloseDate.strftime('%d/%m/%Y')))
offPack = ('%s%s' % ('Offer pack sent on ',
row.CloseDate.strftime('%d/%m/%Y')))
else:
letterSent = 'Contract not sent yet'
offPack = 'Offer pack not sent yet'
#This checks if complete docs date has been filled in. If not,
#we can assume that complete documents are not yet provided and
#we are using current date instead.
if row.CompleteDocsDate:
docsRecDate = row.CompleteDocsDate
else:
docsRecDate = datetime.datetime.today()
#calculate amount of days late basing on currenn document and contract statuses
#and on docs submission date
if docsRecDate > row.CutOffDate:
days = day_diff(docsRecDate, row.CutOffDate)
elif docsRecDate > row.EffectiveDate:
days = day_diff(docsRecDate, row.EffectiveDate)
if row.SignedLetterReceivedOn:
if row.SignedLetterReceivedOn > row.CutOffDate:
days = day_diff(row.SignedLetterReceivedOn, row.CutOffDate)
elif row.SignedLetterReceivedOn > row.EffectiveDate:
days = day_diff(row.SignedLetterReceivedOn, row.EffectiveDate)
#create notes string
if sigLetterRec:
notes = ('"%s%s.\n%s.\n%s.\n%s.\n%s.\n%s%s.\n%s%d.\n%s"' %('New Hire effective on ',
row.EffectiveDate.strftime('%d/%m/%Y'),
compDocs,
letterSent,
offPack,
sigLetter,
'Request should be submitted by ',
row.CutOffDate.strftime('%d/%m/%Y'),
'Days late: ',
days,
row.EEImpact))
else:
notes = ('"%s%s.\n%s.\n%s.\n%s.\n%s%s.\n%s%d.\n%s"' %('New Hire effective on ',
row.EffectiveDate.strftime('%d/%m/%Y'),
compDocs,
letterSent,
offPack,
'Request should be submitted by ',
row.CutOffDate.strftime('%d/%m/%Y'),
'Days late: ',
days,
row.EEImpact))
#write result to dictionary
write_to_dict(row, ttype, notes)
def late_termination(sD, eD, cursor):
"""
This function finds late job change actions in SAP among tickets
"""
sql = """SELECT T.ID, T.DateReceived, T.EffectiveDate,
T.CutOffDate, T.EEImpact, T.CompleteDocsDate,
T.NumberOfReminders, E.EEID, E.Forname, E.Surname , R.CauseText,
T.SourceID, T.InRejComment
FROM tTracker as T INNER JOIN
tMCBCEmployee as E ON T.EeID = E.ID INNER JOIN
tRootCause as R ON T.RootCause = R.ID
WHERE (T.ProcessID IN (336, 337, 338) AND
T.DateReceived BETWEEN ? AND ?) AND
(((T.EffectiveDate < T.CompleteDocsDate) OR
(T.CutOffDate < T.CompleteDocsDate) AND T.CompleteDocsDate IS NOT NULL)
OR ((T.EffectiveDate < T.DateReceived OR T.CutOffDate < T.DateReceived) AND
T.CompleteDocsDate IS NULL))"""
ttype = "Termination - Late Submission"
#getting recordset from DB
result = get_DBdata(sql, sD, eD, cursor)
if result:
for row in result:
source = get_source_string(row.SourceID)
compDocs = get_compDocsString(row.CompleteDocsDate, row.InRejComment)
dateRec = get_docsDate(row.CompleteDocsDate)
notes = ('"%s%s.\n%s%s.\n%s.\n%s%s.\n%s%d.\n%s"' %
('Termination effective on ',
row.EffectiveDate.strftime('%d/%m/%Y'),
source,
row.DateReceived.strftime('%d/%m/%Y'),
compDocs,
'Request should be submitted by ',
row.CutOffDate.strftime('%d/%m/%Y'),
'Days late for payroll cut off: ',
day_diff(dateRec, row.CutOffDate),
row.EEImpact
))
write_to_dict(row, ttype, notes)
def termination_checklist_check(cursor):
"""
This function finds all unsubmitted termination checklists and
feeds them into dictionary.
"""
sql = """SELECT T.ID, T.DateReceived, T.EffectiveDate,
T.CutOffDate, T.EEImpact, T.CompleteDocsDate,
T.NumberOfReminders, E.EEID, E.Forname,
E.Surname, T.LetterReceived, R.CauseText
FROM tTracker as T INNER JOIN
tMCBCEmployee as E ON T.EeID = E.ID INNER JOIN
tRootCause as R ON T.RootCause = R.ID
WHERE (T.ProcessID = 417) AND (T.LetterReceived = 0)
AND (T.EffectiveDate < GETDATE()) AND (T.CurrentStatus <> 1)"""
ttype = 'Termination - No Termination Checklist submitted'
#getting recordset from DB
sD = None
eD = None
result = get_DBdata(sql, sD, eD, cursor)
#if there are any records in the recordset each row is sent to be
#added to dictionary
if result:
for row in result:
notes = ('Possible SOX audit compliance issue')
write_to_dict(row, ttype, notes)
def get_source_string(sourceID):
if sourceID == 2:
return 'PCR received on '
else:
return 'Non-PCR request received on'
def get_docsDate(compdate):
if compdate:
return compdate
else:
return datetime.datetime.today()
def get_compDocsString(compdate, details = None):
if details:
addcoments = (' (details about missing data: %s)' % (details))
else:
addcoments = ''
if compdate:
return ('%s%s%s' % ('Complete request received on ',
compdate.strftime('%d/%m/%Y'),
addcoments))
else:
return 'Complete documents still pending'
def write_to_file():
"""
This function saves report to csv file
"""
#Open file to save report to
report = open('report.csv', 'w')
for key in LATE_CASES:
#build file entry row from dict data
fileentry = '%d,%s,%s,%s,%s,%d' % (key, LATE_CASES[key]['type'],
LATE_CASES[key]['notes'],
LATE_CASES[key]['rootcause'],
LATE_CASES[key]['eename'],
LATE_CASES[key]['eeid'])
#write etry to file
report.write(fileentry + '\n')
#close the file
report.close()
def runReport(sD, eD):
with get_connection() as cursor:
#Contract Expiration section
contract_exp_by_dates(sD, eD, cursor)
contract_exp_by_letters(sD, eD, cursor)
contract_no_response(sD, eD, cursor)
#LOA section
late_loa(sD, eD, cursor)
#Return From LOA section
ret_from_loa_by_dates(sD, eD, cursor)
#Job Change section
#Job Changes action tickets
procname = "Job Change"
scope = "315, 331, 323, 335, 340, 339"
late_by_action(sD, eD, scope, procname, cursor)
#Job Changes letter tickets
scope = '363, 385, 386, 400, 410, 412, 413'
late_by_letters(sD, eD, scope, procname, cursor)
#New Hire section
late_hire(sD, eD, cursor)
#Pay Changes section
procname = 'Pay Change'
#Pay Changes action tickets
scope = '327, 328, 329'
late_by_action(sD, eD, scope, procname, cursor)
#Pay Changes letter tickets
scope = '395, 396, 397, 347'
late_by_letters(sD, eD, scope, procname, cursor)
#Termination section
procname = 'Termination'
#Termination actions
late_termination(sD, eD, cursor)
#Termination checklist
termination_checklist_check(cursor)
#Save the report to file
write_to_file()
if __name__ == '__main__':
"""
Program entry point.
Command line argument should contain a date in YYYY-MM-DD format
"""
#making sure that date will be passed and in correct format
if len(sys.argv) < 3:
print "Missing date, please pass it as an argument!"
sys.exit()
elif not re.match(r"\d{4}-\d{2}-\d{2}", sys.argv[1]):
print "Incorrect date format - should be YYYY-MM-DD"
sys.exit()
elif not re.match(r"\d{4}-\d{2}-\d{2}", sys.argv[2]):
print "Incorrect date format - should be YYYY-MM-DD"
sys.exit()
runReport(sys.argv[1], sys.argv[2])
|
gpl-2.0
| -7,754,132,641,585,172,000
| 40.519417
| 123
| 0.512949
| false
| 4.150431
| false
| false
| false
|
closeio/socketshark
|
socketshark/utils.py
|
1
|
3506
|
import asyncio
import ssl
import time
from urllib.parse import urlsplit, urlunsplit
import aiohttp
from . import constants as c
def _get_rate_limit_wait(log, resp, opts):
"""
Returns the number of seconds we should wait given a 429 HTTP response and
HTTP options.
"""
max_wait = 3600
wait = opts['wait']
header_name = opts['rate_limit_reset_header_name']
if header_name and header_name in resp.headers:
header_value = resp.headers[header_name]
try:
new_wait = float(header_value)
# Make sure we have a valid value (not negative, NaN, or Inf)
if 0 <= new_wait <= max_wait:
wait = new_wait
elif new_wait > max_wait:
log.warn(
'rate reset value too high',
name=header_name,
value=header_value,
)
wait = max_wait
else:
log.warn(
'invalid rate reset value',
name=header_name,
value=header_value,
)
except ValueError:
log.warn(
'invalid rate reset value',
name=header_name,
value=header_value,
)
return wait
def _scrub_url(url):
"""Scrub URL username and password."""
url_parts = urlsplit(url)
if url_parts.password is None:
return url
else:
# url_parts tuple doesn't include password in _fields
# so can't easily use _replace to get rid of password
# and then call urlunsplit to reconstruct url.
_, _, hostinfo = url_parts.netloc.rpartition('@')
scrubbed_netloc = f'*****:*****@{hostinfo}'
scrubbed_url_parts = url_parts._replace(netloc=scrubbed_netloc)
return urlunsplit(scrubbed_url_parts)
async def http_post(shark, url, data):
log = shark.log.bind(url=_scrub_url(url))
opts = shark.config['HTTP']
if opts.get('ssl_cafile'):
ssl_context = ssl.create_default_context(cafile=opts['ssl_cafile'])
else:
ssl_context = None
conn = aiohttp.TCPConnector(ssl_context=ssl_context)
async with aiohttp.ClientSession(connector=conn) as session:
wait = opts['wait']
for n in range(opts['tries']):
if n > 0:
await asyncio.sleep(wait)
try:
start_time = time.time()
response_data = None
async with session.post(
url, json=data, timeout=opts['timeout']
) as resp:
if resp.status == 429: # Too many requests.
wait = _get_rate_limit_wait(log, resp, opts)
continue
else:
wait = opts['wait']
resp.raise_for_status()
response_data = await resp.json()
return response_data
except aiohttp.ClientError:
log.exception('unhandled exception in http_post')
except asyncio.TimeoutError:
log.exception('timeout in http_post')
finally:
log.debug(
'http request',
request=data,
response=response_data,
duration=time.time() - start_time,
)
return {'status': 'error', 'error': c.ERR_SERVICE_UNAVAILABLE}
|
mit
| -1,969,585,453,855,249,400
| 32.711538
| 78
| 0.51911
| false
| 4.393484
| false
| false
| false
|
Architizer/Feedly
|
feedly/serializers/cassandra/aggregated_activity_serializer.py
|
1
|
1244
|
from feedly.activity import AggregatedActivity
from feedly.serializers.aggregated_activity_serializer import AggregatedActivitySerializer
import pickle
class CassandraAggregatedActivitySerializer(AggregatedActivitySerializer):
def __init__(self, model):
self.model = model
def dumps(self, aggregated):
activities = pickle.dumps(aggregated.activities)
model_instance = self.model(
activity_id=long(aggregated.serialization_id),
activities=activities,
group=aggregated.group,
created_at=aggregated.created_at,
updated_at=aggregated.updated_at,
seen_at=aggregated.seen_at,
read_at=aggregated.read_at
)
return model_instance
def loads(self, serialized_aggregated):
activities = pickle.loads(serialized_aggregated.activities)
aggregated = AggregatedActivity(
group=serialized_aggregated.group,
activities=activities,
created_at=serialized_aggregated.created_at,
updated_at=serialized_aggregated.updated_at,
seen_at=serialized_aggregated.seen_at,
read_at=serialized_aggregated.read_at
)
return aggregated
|
bsd-3-clause
| -4,981,306,401,176,553,000
| 35.588235
| 90
| 0.67283
| false
| 4.47482
| false
| false
| false
|
Micutio/CAB_Simulations
|
SugarScape/ca/ss_cell.py
|
1
|
1856
|
"""
Module containing the cell definition for the Sugarscape world.
"""
from cab.ca.cell import CellHex
__author__ = 'Michael Wagner'
__version__ = '1.0'
class WorldCell(CellHex):
def __init__(self, x, y, gc):
super().__init__(x, y, gc)
self.t_gen = None
self.sugar = 0
self.spice = 0
self.max_sugar = 0
self.max_spice = 0
self.growth_cycle = 3
self.growth_cycle_count = 0
self.state = False
def set_terrain_gen(self, tg):
self.t_gen = tg
self.sugar = int(self.t_gen.get(self.x, self.y))
self.spice = int(self.gc.MAX_SUGAR - self.sugar)
self.max_sugar = int(self.t_gen.get(self.x, self.y))
self.max_spice = int(self.gc.MAX_SUGAR - self.sugar)
# print("sugar: {0}, spice: {1}".format(self.sugar, self.spice))
def clone(self, x, y):
wc = WorldCell(x, y, self.gc)
wc.set_terrain_gen(self.t_gen)
return wc
def sense_neighborhood(self):
pass
def update(self):
if self.growth_cycle_count == self.growth_cycle:
if self.sugar < self.max_sugar:
self.sugar += 1
if self.spice < self.max_spice:
self.spice += 1
self.growth_cycle_count = 0
else:
self.growth_cycle_count += 1
self.calculate_cell_color()
def calculate_cell_color(self):
if self.max_sugar == 0:
normalized_su = 0
else:
normalized_su = self.sugar / self.gc.MAX_SUGAR
if self.max_spice == 0:
normalized_sp = 0
else:
normalized_sp = self.spice / self.gc.MAX_SUGAR
red = int(min(max(0, 150 * normalized_sp), 255))
green = int(min(max(0, 200 * normalized_su), 255))
blue = 0
self.color = (red, green, blue)
|
mit
| 2,815,983,188,803,360,300
| 28.460317
| 72
| 0.542026
| false
| 3.145763
| false
| false
| false
|
michaellaier/pymor
|
src/pymortests/fixtures/discretization.py
|
1
|
1548
|
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright Holders: Rene Milk, Stephan Rave, Felix Schindler
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
from itertools import product
import pytest
from pymor.discretizers.advection import discretize_nonlinear_instationary_advection_fv
from pymor.discretizers.elliptic import discretize_elliptic_cg
from pymortests.fixtures.analyticalproblem import (picklable_thermalblock_problems, non_picklable_thermalblock_problems,
burgers_problems)
picklable_discretizaion_generators = \
[lambda p=p,d=d: discretize_elliptic_cg(p, diameter=d)[0]
for p, d in product(picklable_thermalblock_problems, [1./50., 1./100.])] + \
[lambda p=p,d=d: discretize_nonlinear_instationary_advection_fv(p, diameter=d)[0]
for p, d in product(burgers_problems, [1./10., 1./15.])]
non_picklable_discretization_generators = \
[lambda p=p,d=d: discretize_elliptic_cg(p, diameter=d)[0]
for p, d in product(non_picklable_thermalblock_problems, [1./20., 1./30.])]
discretization_generators = picklable_discretizaion_generators + non_picklable_discretization_generators
@pytest.fixture(params=discretization_generators)
def discretization(request):
return request.param()
@pytest.fixture(params=picklable_discretizaion_generators)
def picklable_discretization(request):
return request.param()
|
bsd-2-clause
| -357,065,425,509,571,400
| 37.7
| 120
| 0.72739
| false
| 3.071429
| false
| false
| false
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/Examples/Catalyst/PythonDolfinExample/simulation-catalyst-step2.py
|
1
|
5483
|
"""This demo program solves the incompressible Navier-Stokes equations
on an L-shaped domain using Chorin's splitting method."""
# Copyright (C) 2010-2011 Anders Logg
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Mikael Mortensen 2011
#
# First added: 2010-08-30
# Last changed: 2011-06-30
#
# SC14 Paraview's Catalyst tutorial
#
# Step 2 : plug to catalyst python API, add a coProcess function
#
# [SC14-Catalyst] we need a python environment that enables import of both Dolfin and ParaView
execfile("simulation-env.py")
# [SC14-Catalyst] import paraview, vtk and paraview's simple API
import sys
import paraview
import paraview.vtk as vtk
import paraview.simple as pvsimple
# [SC14-Catalyst] check for command line arguments
if len(sys.argv) != 3:
print "command is 'python",sys.argv[0],"<script name> <number of time steps>'"
sys.exit(1)
# [SC14-Catalyst] initialize and read input parameters
paraview.options.batch = True
paraview.options.symmetric = True
# [SC14-Catalyst] import user co-processing script
import vtkPVCatalystPython
import os
scriptpath, scriptname = os.path.split(sys.argv[1])
sys.path.append(scriptpath)
if scriptname.endswith(".py"):
print 'script name is ', scriptname
scriptname = scriptname[0:len(scriptname)-3]
try:
cpscript = __import__(scriptname)
except:
print sys.exc_info()
print 'Cannot find ', scriptname, ' -- no coprocessing will be performed.'
sys.exit(1)
# [SC14-Catalyst] Co-Processing routine to be called at the end of each simulation time step
def coProcess(grid, time, step):
# initialize data description
datadescription = vtkPVCatalystPython.vtkCPDataDescription()
datadescription.SetTimeData(time, step)
datadescription.AddInput("input")
cpscript.RequestDataDescription(datadescription)
# to be continued ...
# Begin demo
from dolfin import *
# Print log messages only from the root process in parallel
parameters["std_out_all_processes"] = False;
# Load mesh from file
mesh = Mesh(DOLFIN_EXAMPLE_DATA_DIR+"/lshape.xml.gz")
# Define function spaces (P2-P1)
V = VectorFunctionSpace(mesh, "Lagrange", 2)
Q = FunctionSpace(mesh, "Lagrange", 1)
# Define trial and test functions
u = TrialFunction(V)
p = TrialFunction(Q)
v = TestFunction(V)
q = TestFunction(Q)
# Set parameter values
dt = 0.01
T = 3
nu = 0.01
# Define time-dependent pressure boundary condition
p_in = Expression("sin(3.0*t)", t=0.0)
# Define boundary conditions
noslip = DirichletBC(V, (0, 0),
"on_boundary && \
(x[0] < DOLFIN_EPS | x[1] < DOLFIN_EPS | \
(x[0] > 0.5 - DOLFIN_EPS && x[1] > 0.5 - DOLFIN_EPS))")
inflow = DirichletBC(Q, p_in, "x[1] > 1.0 - DOLFIN_EPS")
outflow = DirichletBC(Q, 0, "x[0] > 1.0 - DOLFIN_EPS")
bcu = [noslip]
bcp = [inflow, outflow]
# Create functions
u0 = Function(V)
u1 = Function(V)
p1 = Function(Q)
# Define coefficients
k = Constant(dt)
f = Constant((0, 0))
# Tentative velocity step
F1 = (1/k)*inner(u - u0, v)*dx + inner(grad(u0)*u0, v)*dx + \
nu*inner(grad(u), grad(v))*dx - inner(f, v)*dx
a1 = lhs(F1)
L1 = rhs(F1)
# Pressure update
a2 = inner(grad(p), grad(q))*dx
L2 = -(1/k)*div(u1)*q*dx
# Velocity update
a3 = inner(u, v)*dx
L3 = inner(u1, v)*dx - k*inner(grad(p1), v)*dx
# Assemble matrices
A1 = assemble(a1)
A2 = assemble(a2)
A3 = assemble(a3)
# Use amg preconditioner if available
prec = "amg" if has_krylov_solver_preconditioner("amg") else "default"
# Create files for storing solution
ufile = File("results/velocity.pvd")
pfile = File("results/pressure.pvd")
# Time-stepping
maxtimestep = int(sys.argv[2])
tstep = 0
t = dt
while tstep < maxtimestep:
# Update pressure boundary condition
p_in.t = t
# Compute tentative velocity step
begin("Computing tentative velocity")
b1 = assemble(L1)
[bc.apply(A1, b1) for bc in bcu]
solve(A1, u1.vector(), b1, "gmres", "default")
end()
# Pressure correction
begin("Computing pressure correction")
b2 = assemble(L2)
[bc.apply(A2, b2) for bc in bcp]
solve(A2, p1.vector(), b2, "gmres", prec)
end()
# Velocity correction
begin("Computing velocity correction")
b3 = assemble(L3)
[bc.apply(A3, b3) for bc in bcu]
solve(A3, u1.vector(), b3, "gmres", "default")
end()
# Plot solution [SC14-Catalyst] Not anymore
# plot(p1, title="Pressure", rescale=True)
# plot(u1, title="Velocity", rescale=True)
# Save to file [SC14-Catalyst] Not anymore
# ufile << u1
# pfile << p1
# [SC14-Catalyst] convert solution to VTK grid
ugrid = None
# [SC14-Catalyst] trigger catalyst execution
coProcess(ugrid,t,tstep)
# Move to next time step
u0.assign(u1)
t += dt
tstep += 1
print "t =", t, "step =",tstep
# Hold plot [SC14-Catalyst] Not anymore
# interactive()
|
gpl-3.0
| 4,628,511,660,132,762,000
| 26.552764
| 94
| 0.683932
| false
| 3.012637
| false
| false
| false
|
jimi-c/ansible
|
lib/ansible/modules/cloud/amazon/ec2_instance.py
|
1
|
67394
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_instance
short_description: Create & manage EC2 instances
description:
- Gather facts about ec2 instances in AWS
version_added: "2.5"
author:
- Ryan Scott Brown, @ryansb
requirements: [ "boto3", "botocore" ]
options:
instance_ids:
description:
- If you specify one or more instance IDs, only instances that have the specified IDs are returned.
state:
description:
- Goal state for the instances
choices: [present, terminated, running, started, stopped, restarted, rebooted, absent]
default: present
wait:
description:
- Whether or not to wait for the desired state (use wait_timeout to customize this)
default: true
wait_timeout:
description:
- How long to wait (in seconds) for the instance to finish booting/terminating
default: 600
instance_type:
description:
- Instance type to use for the instance, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html)
Only required when instance is not already present
default: t2.micro
user_data:
description:
- Opaque blob of data which is made available to the ec2 instance
tower_callback:
description:
- Preconfigured user-data to enable an instance to perform a Tower callback (Linux only).
- Mutually exclusive with I(user_data).
- For Windows instances, to enable remote access via Ansible set I(tower_callback.windows) to true, and optionally set an admin password.
- If using 'windows' and 'set_password', callback to Tower will not be performed but the instance will be ready to receive winrm connections from Ansible.
suboptions:
tower_address:
description:
- IP address or DNS name of Tower server. Must be accessible via this address from the VPC that this instance will be launched in.
job_template_id:
description:
- Either the integer ID of the Tower Job Template, or the name (name supported only for Tower 3.2+)
host_config_key:
description:
- Host configuration secret key generated by the Tower job template.
tags:
description:
- A hash/dictionary of tags to add to the new instance or to add/remove from an existing one.
purge_tags:
description:
- Delete any tags not specified in the task that are on the instance.
This means you have to specify all the desired tags on each task affecting an instance.
default: false
image:
description:
- An image to use for the instance. The ec2_ami_facts module may be used to retrieve images.
One of I(image) or I(image_id) are required when instance is not already present.
- Complex object containing I(image.id), I(image.ramdisk), and I(image.kernel).
- I(image.id) is the AMI ID.
- I(image.ramdisk) overrides the AMI's default ramdisk ID.
- I(image.kernel) is a string AKI to override the AMI kernel.
image_id:
description:
- I(ami) ID to use for the instance. One of I(image) or I(image_id) are required when instance is not already present.
- This is an alias for I(image.id).
security_groups:
description:
- A list of security group IDs or names (strings). Mutually exclusive with I(security_group).
security_group:
description:
- A security group ID or name. Mutually exclusive with I(security_groups).
name:
description:
- The Name tag for the instance.
vpc_subnet_id:
description:
- The subnet ID in which to launch the instance (VPC)
If none is provided, ec2_instance will chose the default zone of the default VPC
aliases: ['subnet_id']
network:
description:
- Either a dictionary containing the key 'interfaces' corresponding to a list of network interface IDs or
containing specifications for a single network interface.
- If specifications for a single network are given, accepted keys are assign_public_ip (bool),
private_ip_address (str), ipv6_addresses (list), source_dest_check (bool), description (str),
delete_on_termination (bool), device_index (int), groups (list of security group IDs),
private_ip_addresses (list), subnet_id (str).
- I(network.interfaces) should be a list of ENI IDs (strings) or a list of objects containing the key I(id).
- Use the ec2_eni to create ENIs with special settings.
volumes:
description:
- A list of block device mappings, by default this will always use the AMI root device so the volumes option is primarily for adding more storage.
- A mapping contains the (optional) keys device_name, virtual_name, ebs.volume_type, ebs.volume_size, ebs.kms_key_id,
ebs.iops, and ebs.delete_on_termination.
- For more information about each parameter, see U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html)
launch_template:
description:
- The EC2 launch template to base instance configuration on.
- I(launch_template.id) the ID or the launch template (optional if name is specified)
- I(launch_template.name) the pretty name of the launch template (optional if id is specified)
- I(launch_template.version) the specific version of the launch template to use. If unspecified, the template default is chosen.
key_name:
description:
- Name of the SSH access key to assign to the instance - must exist in the region the instance is created.
availability_zone:
description:
- Specify an availability zone to use the default subnet it. Useful if not specifying the I(vpc_subnet_id) parameter.
- If no subnet, ENI, or availability zone is provided, the default subnet in the default VPC will be used in the first AZ (alphabetically sorted).
instance_initiated_shutdown_behavior:
description:
- Whether to stop or terminate an instance upon shutdown.
choices: ['stop', 'terminate']
tenancy:
description:
- What type of tenancy to allow an instance to use. Default is shared tenancy. Dedicated tenancy will incur additional charges.
choices: ['dedicated', 'default']
termination_protection:
description:
- Whether to enable termination protection.
This module will not terminate an instance with termination protection active, it must be turned off first.
cpu_credit_specification:
description:
- For T2 series instances, choose whether to allow increased charges to buy CPU credits if the default pool is depleted.
- Choose I(unlimited) to enable buying additional CPU credits.
choices: [unlimited, standard]
cpu_options:
description:
- Reduce the number of vCPU exposed to the instance.
- Those parameters can only be set at instance launch. The two suboptions threads_per_core and core_count are mandatory.
- See U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-optimize-cpu.html) for combinations available.
- Requires botocore >= 1.10.16
version_added: 2.7
suboptions:
threads_per_core:
description:
- Select the number of threads per core to enable. Disable or Enable Intel HT
choices: [1, 2]
required: true
core_count:
description:
- Set the number of core to enable.
required: true
detailed_monitoring:
description:
- Whether to allow detailed cloudwatch metrics to be collected, enabling more detailed alerting.
ebs_optimized:
description:
- Whether instance is should use optimized EBS volumes, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html)
filters:
description:
- A dict of filters to apply when deciding whether existing instances match and should be altered. Each dict item
consists of a filter key and a filter value. See
U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html)
for possible filters. Filter names and values are case sensitive.
By default, instances are filtered for counting by their "Name" tag, base AMI, state (running, by default), and
subnet ID. Any queryable filter can be used. Good candidates are specific tags, SSH keys, or security groups.
default: {"tag:Name": "<provided-Name-attribute>", "subnet-id": "<provided-or-default subnet>"}
instance_role:
description:
- The ARN or name of an EC2-enabled instance role to be used. If a name is not provided in arn format
then the ListInstanceProfiles permission must also be granted.
U(https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListInstanceProfiles.html) If no full ARN is provided,
the role with a matching name will be used from the active AWS account.
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Terminate every running instance in a region. Use with EXTREME caution.
- ec2_instance:
state: absent
filters:
instance-state-name: running
# restart a particular instance by its ID
- ec2_instance:
state: restarted
instance_ids:
- i-12345678
# start an instance with a public IP address
- ec2_instance:
name: "public-compute-instance"
key_name: "prod-ssh-key"
vpc_subnet_id: subnet-5ca1ab1e
instance_type: c5.large
security_group: default
network:
assign_public_ip: true
image_id: ami-123456
tags:
Environment: Testing
# start an instance and have it begin a Tower callback on boot
- ec2_instance:
name: "tower-callback-test"
key_name: "prod-ssh-key"
vpc_subnet_id: subnet-5ca1ab1e
security_group: default
tower_callback:
# IP or hostname of tower server
tower_address: 1.2.3.4
job_template_id: 876
host_config_key: '[secret config key goes here]'
network:
assign_public_ip: true
image_id: ami-123456
cpu_credit_specification: unlimited
tags:
SomeThing: "A value"
'''
RETURN = '''
instances:
description: a list of ec2 instances
returned: always
type: complex
contains:
ami_launch_index:
description: The AMI launch index, which can be used to find this instance in the launch group.
returned: always
type: int
sample: 0
architecture:
description: The architecture of the image
returned: always
type: string
sample: x86_64
block_device_mappings:
description: Any block device mapping entries for the instance.
returned: always
type: complex
contains:
device_name:
description: The device name exposed to the instance (for example, /dev/sdh or xvdh).
returned: always
type: string
sample: /dev/sdh
ebs:
description: Parameters used to automatically set up EBS volumes when the instance is launched.
returned: always
type: complex
contains:
attach_time:
description: The time stamp when the attachment initiated.
returned: always
type: string
sample: "2017-03-23T22:51:24+00:00"
delete_on_termination:
description: Indicates whether the volume is deleted on instance termination.
returned: always
type: bool
sample: true
status:
description: The attachment state.
returned: always
type: string
sample: attached
volume_id:
description: The ID of the EBS volume
returned: always
type: string
sample: vol-12345678
client_token:
description: The idempotency token you provided when you launched the instance, if applicable.
returned: always
type: string
sample: mytoken
ebs_optimized:
description: Indicates whether the instance is optimized for EBS I/O.
returned: always
type: bool
sample: false
hypervisor:
description: The hypervisor type of the instance.
returned: always
type: string
sample: xen
iam_instance_profile:
description: The IAM instance profile associated with the instance, if applicable.
returned: always
type: complex
contains:
arn:
description: The Amazon Resource Name (ARN) of the instance profile.
returned: always
type: string
sample: "arn:aws:iam::000012345678:instance-profile/myprofile"
id:
description: The ID of the instance profile
returned: always
type: string
sample: JFJ397FDG400FG9FD1N
image_id:
description: The ID of the AMI used to launch the instance.
returned: always
type: string
sample: ami-0011223344
instance_id:
description: The ID of the instance.
returned: always
type: string
sample: i-012345678
instance_type:
description: The instance type size of the running instance.
returned: always
type: string
sample: t2.micro
key_name:
description: The name of the key pair, if this instance was launched with an associated key pair.
returned: always
type: string
sample: my-key
launch_time:
description: The time the instance was launched.
returned: always
type: string
sample: "2017-03-23T22:51:24+00:00"
monitoring:
description: The monitoring for the instance.
returned: always
type: complex
contains:
state:
description: Indicates whether detailed monitoring is enabled. Otherwise, basic monitoring is enabled.
returned: always
type: string
sample: disabled
network_interfaces:
description: One or more network interfaces for the instance.
returned: always
type: complex
contains:
association:
description: The association information for an Elastic IPv4 associated with the network interface.
returned: always
type: complex
contains:
ip_owner_id:
description: The ID of the owner of the Elastic IP address.
returned: always
type: string
sample: amazon
public_dns_name:
description: The public DNS name.
returned: always
type: string
sample: ""
public_ip:
description: The public IP address or Elastic IP address bound to the network interface.
returned: always
type: string
sample: 1.2.3.4
attachment:
description: The network interface attachment.
returned: always
type: complex
contains:
attach_time:
description: The time stamp when the attachment initiated.
returned: always
type: string
sample: "2017-03-23T22:51:24+00:00"
attachment_id:
description: The ID of the network interface attachment.
returned: always
type: string
sample: eni-attach-3aff3f
delete_on_termination:
description: Indicates whether the network interface is deleted when the instance is terminated.
returned: always
type: bool
sample: true
device_index:
description: The index of the device on the instance for the network interface attachment.
returned: always
type: int
sample: 0
status:
description: The attachment state.
returned: always
type: string
sample: attached
description:
description: The description.
returned: always
type: string
sample: My interface
groups:
description: One or more security groups.
returned: always
type: complex
contains:
- group_id:
description: The ID of the security group.
returned: always
type: string
sample: sg-abcdef12
group_name:
description: The name of the security group.
returned: always
type: string
sample: mygroup
ipv6_addresses:
description: One or more IPv6 addresses associated with the network interface.
returned: always
type: complex
contains:
- ipv6_address:
description: The IPv6 address.
returned: always
type: string
sample: "2001:0db8:85a3:0000:0000:8a2e:0370:7334"
mac_address:
description: The MAC address.
returned: always
type: string
sample: "00:11:22:33:44:55"
network_interface_id:
description: The ID of the network interface.
returned: always
type: string
sample: eni-01234567
owner_id:
description: The AWS account ID of the owner of the network interface.
returned: always
type: string
sample: 01234567890
private_ip_address:
description: The IPv4 address of the network interface within the subnet.
returned: always
type: string
sample: 10.0.0.1
private_ip_addresses:
description: The private IPv4 addresses associated with the network interface.
returned: always
type: complex
contains:
- association:
description: The association information for an Elastic IP address (IPv4) associated with the network interface.
returned: always
type: complex
contains:
ip_owner_id:
description: The ID of the owner of the Elastic IP address.
returned: always
type: string
sample: amazon
public_dns_name:
description: The public DNS name.
returned: always
type: string
sample: ""
public_ip:
description: The public IP address or Elastic IP address bound to the network interface.
returned: always
type: string
sample: 1.2.3.4
primary:
description: Indicates whether this IPv4 address is the primary private IP address of the network interface.
returned: always
type: bool
sample: true
private_ip_address:
description: The private IPv4 address of the network interface.
returned: always
type: string
sample: 10.0.0.1
source_dest_check:
description: Indicates whether source/destination checking is enabled.
returned: always
type: bool
sample: true
status:
description: The status of the network interface.
returned: always
type: string
sample: in-use
subnet_id:
description: The ID of the subnet for the network interface.
returned: always
type: string
sample: subnet-0123456
vpc_id:
description: The ID of the VPC for the network interface.
returned: always
type: string
sample: vpc-0123456
placement:
description: The location where the instance launched, if applicable.
returned: always
type: complex
contains:
availability_zone:
description: The Availability Zone of the instance.
returned: always
type: string
sample: ap-southeast-2a
group_name:
description: The name of the placement group the instance is in (for cluster compute instances).
returned: always
type: string
sample: ""
tenancy:
description: The tenancy of the instance (if the instance is running in a VPC).
returned: always
type: string
sample: default
private_dns_name:
description: The private DNS name.
returned: always
type: string
sample: ip-10-0-0-1.ap-southeast-2.compute.internal
private_ip_address:
description: The IPv4 address of the network interface within the subnet.
returned: always
type: string
sample: 10.0.0.1
product_codes:
description: One or more product codes.
returned: always
type: complex
contains:
- product_code_id:
description: The product code.
returned: always
type: string
sample: aw0evgkw8ef3n2498gndfgasdfsd5cce
product_code_type:
description: The type of product code.
returned: always
type: string
sample: marketplace
public_dns_name:
description: The public DNS name assigned to the instance.
returned: always
type: string
sample:
public_ip_address:
description: The public IPv4 address assigned to the instance
returned: always
type: string
sample: 52.0.0.1
root_device_name:
description: The device name of the root device
returned: always
type: string
sample: /dev/sda1
root_device_type:
description: The type of root device used by the AMI.
returned: always
type: string
sample: ebs
security_groups:
description: One or more security groups for the instance.
returned: always
type: complex
contains:
- group_id:
description: The ID of the security group.
returned: always
type: string
sample: sg-0123456
- group_name:
description: The name of the security group.
returned: always
type: string
sample: my-security-group
network.source_dest_check:
description: Indicates whether source/destination checking is enabled.
returned: always
type: bool
sample: true
state:
description: The current state of the instance.
returned: always
type: complex
contains:
code:
description: The low byte represents the state.
returned: always
type: int
sample: 16
name:
description: The name of the state.
returned: always
type: string
sample: running
state_transition_reason:
description: The reason for the most recent state transition.
returned: always
type: string
sample:
subnet_id:
description: The ID of the subnet in which the instance is running.
returned: always
type: string
sample: subnet-00abcdef
tags:
description: Any tags assigned to the instance.
returned: always
type: dict
sample:
virtualization_type:
description: The type of virtualization of the AMI.
returned: always
type: string
sample: hvm
vpc_id:
description: The ID of the VPC the instance is in.
returned: always
type: dict
sample: vpc-0011223344
'''
import re
import uuid
import string
import textwrap
import time
from collections import namedtuple
try:
import boto3
import botocore.exceptions
except ImportError:
pass
from ansible.module_utils.six import text_type, string_types
from ansible.module_utils.six.moves.urllib import parse as urlparse
from ansible.module_utils._text import to_bytes, to_native
import ansible.module_utils.ec2 as ec2_utils
from ansible.module_utils.ec2 import (boto3_conn,
ec2_argument_spec,
get_aws_connection_info,
AWSRetry,
ansible_dict_to_boto3_filter_list,
compare_aws_tags,
boto3_tag_list_to_ansible_dict,
ansible_dict_to_boto3_tag_list,
camel_dict_to_snake_dict)
from ansible.module_utils.aws.core import AnsibleAWSModule
module = None
def tower_callback_script(tower_conf, windows=False, passwd=None):
script_url = 'https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1'
if windows and passwd is not None:
script_tpl = """<powershell>
$admin = [adsi]("WinNT://./administrator, user")
$admin.PSBase.Invoke("SetPassword", "{PASS}")
Invoke-Expression ((New-Object System.Net.Webclient).DownloadString('{SCRIPT}'))
</powershell>
"""
return to_native(textwrap.dedent(script_tpl).format(PASS=passwd, SCRIPT=script_url))
elif windows and passwd is None:
script_tpl = """<powershell>
$admin = [adsi]("WinNT://./administrator, user")
Invoke-Expression ((New-Object System.Net.Webclient).DownloadString('{SCRIPT}'))
</powershell>
"""
return to_native(textwrap.dedent(script_tpl).format(PASS=passwd, SCRIPT=script_url))
elif not windows:
for p in ['tower_address', 'job_template_id', 'host_config_key']:
if p not in tower_conf:
module.fail_json(msg="Incomplete tower_callback configuration. tower_callback.{0} not set.".format(p))
if isinstance(tower_conf['job_template_id'], string_types):
tower_conf['job_template_id'] = urlparse.quote(tower_conf['job_template_id'])
tpl = string.Template(textwrap.dedent("""#!/bin/bash
set -x
retry_attempts=10
attempt=0
while [[ $attempt -lt $retry_attempts ]]
do
status_code=`curl --max-time 10 -v -k -s -i \
--data "host_config_key=${host_config_key}" \
'https://${tower_address}/api/v2/job_templates/${template_id}/callback/' \
| head -n 1 \
| awk '{print $2}'`
if [[ $status_code == 404 ]]
then
status_code=`curl --max-time 10 -v -k -s -i \
--data "host_config_key=${host_config_key}" \
'https://${tower_address}/api/v1/job_templates/${template_id}/callback/' \
| head -n 1 \
| awk '{print $2}'`
# fall back to using V1 API for Tower 3.1 and below, since v2 API will always 404
fi
if [[ $status_code == 201 ]]
then
exit 0
fi
attempt=$(( attempt + 1 ))
echo "$${status_code} received... retrying in 1 minute. (Attempt $${attempt})"
sleep 60
done
exit 1
"""))
return tpl.safe_substitute(tower_address=tower_conf['tower_address'],
template_id=tower_conf['job_template_id'],
host_config_key=tower_conf['host_config_key'])
raise NotImplementedError("Only windows with remote-prep or non-windows with tower job callback supported so far.")
@AWSRetry.jittered_backoff()
def manage_tags(match, new_tags, purge_tags, ec2):
changed = False
old_tags = boto3_tag_list_to_ansible_dict(match['Tags'])
tags_to_set, tags_to_delete = compare_aws_tags(
old_tags, new_tags,
purge_tags=purge_tags,
)
if tags_to_set:
ec2.create_tags(
Resources=[match['InstanceId']],
Tags=ansible_dict_to_boto3_tag_list(tags_to_set))
changed |= True
if tags_to_delete:
delete_with_current_values = dict((k, old_tags.get(k)) for k in tags_to_delete)
ec2.delete_tags(
Resources=[match['InstanceId']],
Tags=ansible_dict_to_boto3_tag_list(delete_with_current_values))
changed |= True
return changed
def build_volume_spec(params):
volumes = params.get('volumes') or []
return [ec2_utils.snake_dict_to_camel_dict(v, capitalize_first=True) for v in volumes]
def add_or_update_instance_profile(instance, desired_profile_name):
instance_profile_setting = instance.get('IamInstanceProfile')
if instance_profile_setting and desired_profile_name:
if desired_profile_name in (instance_profile_setting.get('Name'), instance_profile_setting.get('Arn')):
# great, the profile we asked for is what's there
return False
else:
desired_arn = determine_iam_role(desired_profile_name)
if instance_profile_setting.get('Arn') == desired_arn:
return False
# update association
ec2 = module.client('ec2')
try:
association = ec2.describe_iam_instance_profile_associations(Filters=[{'Name': 'instance-id', 'Values': [instance['InstanceId']]}])
except botocore.exceptions.ClientError as e:
# check for InvalidAssociationID.NotFound
module.fail_json_aws(e, "Could not find instance profile association")
try:
resp = ec2.replace_iam_instance_profile_association(
AssociationId=association['IamInstanceProfileAssociations'][0]['AssociationId'],
IamInstanceProfile={'Arn': determine_iam_role(desired_profile_name)}
)
return True
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e, "Could not associate instance profile")
if not instance_profile_setting and desired_profile_name:
# create association
ec2 = module.client('ec2')
try:
resp = ec2.associate_iam_instance_profile(
IamInstanceProfile={'Arn': determine_iam_role(desired_profile_name)},
InstanceId=instance['InstanceId']
)
return True
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e, "Could not associate new instance profile")
return False
def build_network_spec(params, ec2=None):
"""
Returns list of interfaces [complex]
Interface type: {
'AssociatePublicIpAddress': True|False,
'DeleteOnTermination': True|False,
'Description': 'string',
'DeviceIndex': 123,
'Groups': [
'string',
],
'Ipv6AddressCount': 123,
'Ipv6Addresses': [
{
'Ipv6Address': 'string'
},
],
'NetworkInterfaceId': 'string',
'PrivateIpAddress': 'string',
'PrivateIpAddresses': [
{
'Primary': True|False,
'PrivateIpAddress': 'string'
},
],
'SecondaryPrivateIpAddressCount': 123,
'SubnetId': 'string'
},
"""
if ec2 is None:
ec2 = module.client('ec2')
interfaces = []
network = params.get('network') or {}
if not network.get('interfaces'):
# they only specified one interface
spec = {
'DeviceIndex': 0,
}
if network.get('assign_public_ip') is not None:
spec['AssociatePublicIpAddress'] = network['assign_public_ip']
if params.get('vpc_subnet_id'):
spec['SubnetId'] = params['vpc_subnet_id']
else:
default_vpc = get_default_vpc(ec2)
if default_vpc is None:
raise module.fail_json(
msg="No default subnet could be found - you must include a VPC subnet ID (vpc_subnet_id parameter) to create an instance")
else:
sub = get_default_subnet(ec2, default_vpc)
spec['SubnetId'] = sub['SubnetId']
if network.get('private_ip_address'):
spec['PrivateIpAddress'] = network['private_ip_address']
if params.get('security_group') or params.get('security_groups'):
groups = discover_security_groups(
group=params.get('security_group'),
groups=params.get('security_groups'),
subnet_id=spec['SubnetId'],
ec2=ec2
)
spec['Groups'] = [g['GroupId'] for g in groups]
# TODO more special snowflake network things
return [spec]
# handle list of `network.interfaces` options
for idx, interface_params in enumerate(network.get('interfaces', [])):
spec = {
'DeviceIndex': idx,
}
if isinstance(interface_params, string_types):
# naive case where user gave
# network_interfaces: [eni-1234, eni-4567, ....]
# put into normal data structure so we don't dupe code
interface_params = {'id': interface_params}
if interface_params.get('id') is not None:
# if an ID is provided, we don't want to set any other parameters.
spec['NetworkInterfaceId'] = interface_params['id']
interfaces.append(spec)
continue
spec['DeleteOnTermination'] = interface_params.get('delete_on_termination', True)
if interface_params.get('ipv6_addresses'):
spec['Ipv6Addresses'] = [{'Ipv6Address': a} for a in interface_params.get('ipv6_addresses', [])]
if interface_params.get('private_ip_address'):
spec['PrivateIpAddress'] = interface_params.get('private_ip_address')
if interface_params.get('description'):
spec['Description'] = interface_params.get('description')
if interface_params.get('subnet_id', params.get('vpc_subnet_id')):
spec['SubnetId'] = interface_params.get('subnet_id', params.get('vpc_subnet_id'))
elif not spec.get('SubnetId') and not interface_params['id']:
# TODO grab a subnet from default VPC
raise ValueError('Failed to assign subnet to interface {0}'.format(interface_params))
interfaces.append(spec)
return interfaces
def warn_if_public_ip_assignment_changed(instance):
# This is a non-modifiable attribute.
assign_public_ip = (module.params.get('network') or {}).get('assign_public_ip')
if assign_public_ip is None:
return
# Check that public ip assignment is the same and warn if not
public_dns_name = instance.get('PublicDnsName')
if (public_dns_name and not assign_public_ip) or (assign_public_ip and not public_dns_name):
module.warn(
"Unable to modify public ip assignment to {0} for instance {1}. "
"Whether or not to assign a public IP is determined during instance creation.".format(
assign_public_ip, instance['InstanceId']))
def warn_if_cpu_options_changed(instance):
# This is a non-modifiable attribute.
cpu_options = module.params.get('cpu_options')
if cpu_options is None:
return
# Check that the CpuOptions set are the same and warn if not
core_count_curr = instance['CpuOptions'].get('CoreCount')
core_count = cpu_options.get('core_count')
threads_per_core_curr = instance['CpuOptions'].get('ThreadsPerCore')
threads_per_core = cpu_options.get('threads_per_core')
if core_count_curr != core_count:
module.warn(
"Unable to modify core_count from {0} to {1}. "
"Assigning a number of core is determinted during instance creation".format(
core_count_curr, core_count))
if threads_per_core_curr != threads_per_core:
module.warn(
"Unable to modify threads_per_core from {0} to {1}. "
"Assigning a number of threads per core is determined during instance creation.".format(
threads_per_core_curr, threads_per_core))
def discover_security_groups(group, groups, parent_vpc_id=None, subnet_id=None, ec2=None):
if ec2 is None:
ec2 = module.client('ec2')
if subnet_id is not None:
try:
sub = ec2.describe_subnets(SubnetIds=[subnet_id])
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'InvalidGroup.NotFound':
module.fail_json(
"Could not find subnet {0} to associate security groups. Please check the vpc_subnet_id and security_groups parameters.".format(
subnet_id
)
)
module.fail_json_aws(e, msg="Error while searching for subnet {0} parent VPC.".format(subnet_id))
except botocore.exceptions.BotoCoreError as e:
module.fail_json_aws(e, msg="Error while searching for subnet {0} parent VPC.".format(subnet_id))
parent_vpc_id = sub['Subnets'][0]['VpcId']
vpc = {
'Name': 'vpc-id',
'Values': [parent_vpc_id]
}
# because filter lists are AND in the security groups API,
# make two separate requests for groups by ID and by name
id_filters = [vpc]
name_filters = [vpc]
if group:
name_filters.append(
dict(
Name='group-name',
Values=[group]
)
)
if group.startswith('sg-'):
id_filters.append(
dict(
Name='group-id',
Values=[group]
)
)
if groups:
name_filters.append(
dict(
Name='group-name',
Values=groups
)
)
if [g for g in groups if g.startswith('sg-')]:
id_filters.append(
dict(
Name='group-id',
Values=[g for g in groups if g.startswith('sg-')]
)
)
found_groups = []
for f_set in (id_filters, name_filters):
if len(f_set) > 1:
found_groups.extend(ec2.get_paginator(
'describe_security_groups'
).paginate(
Filters=f_set
).search('SecurityGroups[]'))
return list(dict((g['GroupId'], g) for g in found_groups).values())
def build_top_level_options(params):
spec = {}
if params.get('image_id'):
spec['ImageId'] = params['image_id']
elif isinstance(params.get('image'), dict):
image = params.get('image', {})
spec['ImageId'] = image.get('id')
if 'ramdisk' in image:
spec['RamdiskId'] = image['ramdisk']
if 'kernel' in image:
spec['KernelId'] = image['kernel']
if not spec.get('ImageId') and not params.get('launch_template'):
module.fail_json(msg="You must include an image_id or image.id parameter to create an instance, or use a launch_template.")
if params.get('key_name') is not None:
spec['KeyName'] = params.get('key_name')
if params.get('user_data') is not None:
spec['UserData'] = to_native(params.get('user_data'))
elif params.get('tower_callback') is not None:
spec['UserData'] = tower_callback_script(
tower_conf=params.get('tower_callback'),
windows=params.get('tower_callback').get('windows', False),
passwd=params.get('tower_callback').get('set_password'),
)
if params.get('launch_template') is not None:
spec['LaunchTemplate'] = {}
if not params.get('launch_template').get('id') or params.get('launch_template').get('name'):
module.fail_json(msg="Could not create instance with launch template. Either launch_template.name or launch_template.id parameters are required")
if params.get('launch_template').get('id') is not None:
spec['LaunchTemplate']['LaunchTemplateId'] = params.get('launch_template').get('id')
if params.get('launch_template').get('name') is not None:
spec['LaunchTemplate']['LaunchTemplateName'] = params.get('launch_template').get('name')
if params.get('launch_template').get('version') is not None:
spec['LaunchTemplate']['Version'] = to_native(params.get('launch_template').get('version'))
if params.get('detailed_monitoring', False):
spec['Monitoring'] = {'Enabled': True}
if params.get('cpu_credit_specification') is not None:
spec['CreditSpecification'] = {'CpuCredits': params.get('cpu_credit_specification')}
if params.get('tenancy') is not None:
spec['Placement'] = {'Tenancy': params.get('tenancy')}
if (params.get('network') or {}).get('ebs_optimized') is not None:
spec['EbsOptimized'] = params['network'].get('ebs_optimized')
if params.get('instance_initiated_shutdown_behavior'):
spec['InstanceInitiatedShutdownBehavior'] = params.get('instance_initiated_shutdown_behavior')
if params.get('termination_protection') is not None:
spec['DisableApiTermination'] = params.get('termination_protection')
if params.get('cpu_options') is not None:
spec['CpuOptions'] = {}
spec['CpuOptions']['ThreadsPerCore'] = params.get('cpu_options').get('threads_per_core')
spec['CpuOptions']['CoreCount'] = params.get('cpu_options').get('core_count')
return spec
def build_instance_tags(params, propagate_tags_to_volumes=True):
tags = params.get('tags', {})
if params.get('name') is not None:
if tags is None:
tags = {}
tags['Name'] = params.get('name')
return [
{
'ResourceType': 'volume',
'Tags': ansible_dict_to_boto3_tag_list(tags),
},
{
'ResourceType': 'instance',
'Tags': ansible_dict_to_boto3_tag_list(tags),
},
]
def build_run_instance_spec(params, ec2=None):
if ec2 is None:
ec2 = module.client('ec2')
spec = dict(
ClientToken=uuid.uuid4().hex,
MaxCount=1,
MinCount=1,
)
# network parameters
spec['NetworkInterfaces'] = build_network_spec(params, ec2)
spec['BlockDeviceMappings'] = build_volume_spec(params)
spec.update(**build_top_level_options(params))
spec['TagSpecifications'] = build_instance_tags(params)
# IAM profile
if params.get('instance_role'):
spec['IamInstanceProfile'] = dict(Arn=determine_iam_role(params.get('instance_role')))
spec['InstanceType'] = params['instance_type']
return spec
def await_instances(ids, state='OK'):
if not module.params.get('wait', True):
# the user asked not to wait for anything
return
state_opts = {
'OK': 'instance_status_ok',
'STOPPED': 'instance_stopped',
'TERMINATED': 'instance_terminated',
'EXISTS': 'instance_exists',
'RUNNING': 'instance_running',
}
if state not in state_opts:
module.fail_json(msg="Cannot wait for state {0}, invalid state".format(state))
waiter = module.client('ec2').get_waiter(state_opts[state])
try:
waiter.wait(
InstanceIds=ids,
WaiterConfig={
'Delay': 15,
'MaxAttempts': module.params.get('wait_timeout', 600) // 15,
}
)
except botocore.exceptions.WaiterConfigError as e:
module.fail_json(msg="{0}. Error waiting for instances {1} to reach state {2}".format(
to_native(e), ', '.join(ids), state))
except botocore.exceptions.WaiterError as e:
module.warn("Instances {0} took too long to reach state {1}. {2}".format(
', '.join(ids), state, to_native(e)))
def diff_instance_and_params(instance, params, ec2=None, skip=None):
"""boto3 instance obj, module params"""
if ec2 is None:
ec2 = module.client('ec2')
if skip is None:
skip = []
changes_to_apply = []
id_ = instance['InstanceId']
ParamMapper = namedtuple('ParamMapper', ['param_key', 'instance_key', 'attribute_name', 'add_value'])
def value_wrapper(v):
return {'Value': v}
param_mappings = [
ParamMapper('ebs_optimized', 'EbsOptimized', 'ebsOptimized', value_wrapper),
ParamMapper('termination_protection', 'DisableApiTermination', 'disableApiTermination', value_wrapper),
# user data is an immutable property
# ParamMapper('user_data', 'UserData', 'userData', value_wrapper),
]
for mapping in param_mappings:
if params.get(mapping.param_key) is not None and mapping.instance_key not in skip:
value = ec2.describe_instance_attribute(Attribute=mapping.attribute_name, InstanceId=id_)
if params.get(mapping.param_key) is not None and value[mapping.instance_key]['Value'] != params.get(mapping.param_key):
arguments = dict(
InstanceId=instance['InstanceId'],
# Attribute=mapping.attribute_name,
)
arguments[mapping.instance_key] = mapping.add_value(params.get(mapping.param_key))
changes_to_apply.append(arguments)
if (params.get('network') or {}).get('source_dest_check') is not None:
# network.source_dest_check is nested, so needs to be treated separately
check = bool(params.get('network').get('source_dest_check'))
if instance['SourceDestCheck'] != check:
changes_to_apply.append(dict(
InstanceId=instance['InstanceId'],
SourceDestCheck={'Value': check},
))
return changes_to_apply
def change_network_attachments(instance, params, ec2):
if (params.get('network') or {}).get('interfaces') is not None:
new_ids = []
for inty in params.get('network').get('interfaces'):
if isinstance(inty, dict) and 'id' in inty:
new_ids.append(inty['id'])
elif isinstance(inty, string_types):
new_ids.append(inty)
# network.interfaces can create the need to attach new interfaces
old_ids = [inty['NetworkInterfaceId'] for inty in instance['NetworkInterfaces']]
to_attach = set(new_ids) - set(old_ids)
for eni_id in to_attach:
ec2.attach_network_interface(
DeviceIndex=new_ids.index(eni_id),
InstanceId=instance['InstanceId'],
NetworkInterfaceId=eni_id,
)
return bool(len(to_attach))
return False
def find_instances(ec2, ids=None, filters=None):
paginator = ec2.get_paginator('describe_instances')
if ids:
return list(paginator.paginate(
InstanceIds=ids,
).search('Reservations[].Instances[]'))
elif filters is None:
module.fail_json(msg="No filters provided when they were required")
elif filters is not None:
for key in filters.keys():
if not key.startswith("tag:"):
filters[key.replace("_", "-")] = filters.pop(key)
return list(paginator.paginate(
Filters=ansible_dict_to_boto3_filter_list(filters)
).search('Reservations[].Instances[]'))
return []
@AWSRetry.jittered_backoff()
def get_default_vpc(ec2):
vpcs = ec2.describe_vpcs(Filters=ansible_dict_to_boto3_filter_list({'isDefault': 'true'}))
if len(vpcs.get('Vpcs', [])):
return vpcs.get('Vpcs')[0]
return None
@AWSRetry.jittered_backoff()
def get_default_subnet(ec2, vpc, availability_zone=None):
subnets = ec2.describe_subnets(
Filters=ansible_dict_to_boto3_filter_list({
'vpc-id': vpc['VpcId'],
'state': 'available',
'default-for-az': 'true',
})
)
if len(subnets.get('Subnets', [])):
if availability_zone is not None:
subs_by_az = dict((subnet['AvailabilityZone'], subnet) for subnet in subnets.get('Subnets'))
if availability_zone in subs_by_az:
return subs_by_az[availability_zone]
# to have a deterministic sorting order, we sort by AZ so we'll always pick the `a` subnet first
# there can only be one default-for-az subnet per AZ, so the AZ key is always unique in this list
by_az = sorted(subnets.get('Subnets'), key=lambda s: s['AvailabilityZone'])
return by_az[0]
return None
def ensure_instance_state(state, ec2=None):
if ec2 is None:
module.client('ec2')
if state in ('running', 'started'):
changed, failed, instances = change_instance_state(filters=module.params.get('filters'), desired_state='RUNNING')
if failed:
module.fail_json(
msg="Unable to start instances",
reboot_success=list(changed),
reboot_failed=failed)
module.exit_json(
msg='Instances started',
reboot_success=list(changed),
changed=bool(len(changed)),
reboot_failed=[],
instances=[pretty_instance(i) for i in instances],
)
elif state in ('restarted', 'rebooted'):
changed, failed, instances = change_instance_state(
filters=module.params.get('filters'),
desired_state='STOPPED')
changed, failed, instances = change_instance_state(
filters=module.params.get('filters'),
desired_state='RUNNING')
if failed:
module.fail_json(
msg="Unable to restart instances",
reboot_success=list(changed),
reboot_failed=failed)
module.exit_json(
msg='Instances restarted',
reboot_success=list(changed),
changed=bool(len(changed)),
reboot_failed=[],
instances=[pretty_instance(i) for i in instances],
)
elif state in ('stopped',):
changed, failed, instances = change_instance_state(
filters=module.params.get('filters'),
desired_state='STOPPED')
if failed:
module.fail_json(
msg="Unable to stop instances",
stop_success=list(changed),
stop_failed=failed)
module.exit_json(
msg='Instances stopped',
stop_success=list(changed),
changed=bool(len(changed)),
stop_failed=[],
instances=[pretty_instance(i) for i in instances],
)
elif state in ('absent', 'terminated'):
terminated, terminate_failed, instances = change_instance_state(
filters=module.params.get('filters'),
desired_state='TERMINATED')
if terminate_failed:
module.fail_json(
msg="Unable to terminate instances",
terminate_success=list(terminated),
terminate_failed=terminate_failed)
module.exit_json(
msg='Instances terminated',
terminate_success=list(terminated),
changed=bool(len(terminated)),
terminate_failed=[],
instances=[pretty_instance(i) for i in instances],
)
@AWSRetry.jittered_backoff()
def change_instance_state(filters, desired_state, ec2=None):
"""Takes STOPPED/RUNNING/TERMINATED"""
if ec2 is None:
ec2 = module.client('ec2')
changed = set()
instances = find_instances(ec2, filters=filters)
to_change = set(i['InstanceId'] for i in instances)
unchanged = set()
for inst in instances:
try:
if desired_state == 'TERMINATED':
# TODO use a client-token to prevent double-sends of these start/stop/terminate commands
# https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html
resp = ec2.terminate_instances(InstanceIds=[inst['InstanceId']])
[changed.add(i['InstanceId']) for i in resp['TerminatingInstances']]
if desired_state == 'STOPPED':
if inst['State']['Name'] == 'stopping':
unchanged.add(inst['InstanceId'])
continue
resp = ec2.stop_instances(InstanceIds=[inst['InstanceId']])
[changed.add(i['InstanceId']) for i in resp['StoppingInstances']]
if desired_state == 'RUNNING':
resp = ec2.start_instances(InstanceIds=[inst['InstanceId']])
[changed.add(i['InstanceId']) for i in resp['StartingInstances']]
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError):
# we don't care about exceptions here, as we'll fail out if any instances failed to terminate
pass
if changed:
await_instances(ids=list(changed) + list(unchanged), state=desired_state)
change_failed = list(to_change - changed)
instances = find_instances(ec2, ids=list(to_change))
return changed, change_failed, instances
def pretty_instance(i):
instance = camel_dict_to_snake_dict(i, ignore_list=['Tags'])
instance['tags'] = boto3_tag_list_to_ansible_dict(i['Tags'])
return instance
def determine_iam_role(name_or_arn):
if re.match(r'^arn:aws:iam::\d+:instance-profile/[\w+=/,.@-]+$', name_or_arn):
return name_or_arn
iam = module.client('iam', retry_decorator=AWSRetry.jittered_backoff())
try:
role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True)
return role['InstanceProfile']['Arn']
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'NoSuchEntity':
module.fail_json_aws(e, msg="Could not find instance_role {0}".format(name_or_arn))
module.fail_json_aws(e, msg="An error occurred while searching for instance_role {0}. Please try supplying the full ARN.".format(name_or_arn))
def handle_existing(existing_matches, changed, ec2, state):
if state in ('running', 'started') and [i for i in existing_matches if i['State']['Name'] != 'running']:
ins_changed, failed, instances = change_instance_state(filters=module.params.get('filters'), desired_state='RUNNING')
module.exit_json(
changed=bool(len(ins_changed)) or changed,
instances=[pretty_instance(i) for i in instances],
instance_ids=[i['InstanceId'] for i in instances],
)
changes = diff_instance_and_params(existing_matches[0], module.params)
for c in changes:
ec2.modify_instance_attribute(**c)
changed |= bool(changes)
changed |= add_or_update_instance_profile(existing_matches[0], module.params.get('instance_role'))
changed |= change_network_attachments(existing_matches[0], module.params, ec2)
altered = find_instances(ec2, ids=[i['InstanceId'] for i in existing_matches])
module.exit_json(
changed=bool(len(changes)) or changed,
instances=[pretty_instance(i) for i in altered],
instance_ids=[i['InstanceId'] for i in altered],
changes=changes,
)
def ensure_present(existing_matches, changed, ec2, state):
if len(existing_matches):
try:
handle_existing(existing_matches, changed, ec2, state)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(
e, msg="Failed to handle existing instances {0}".format(', '.join([i['InstanceId'] for i in existing_matches])),
# instances=[pretty_instance(i) for i in existing_matches],
# instance_ids=[i['InstanceId'] for i in existing_matches],
)
try:
instance_spec = build_run_instance_spec(module.params)
instance_response = run_instances(ec2, **instance_spec)
instances = instance_response['Instances']
instance_ids = [i['InstanceId'] for i in instances]
for ins in instances:
changes = diff_instance_and_params(ins, module.params, skip=['UserData', 'EbsOptimized'])
for c in changes:
try:
AWSRetry.jittered_backoff()(ec2.modify_instance_attribute)(**c)
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e, msg="Could not apply change {0} to new instance.".format(str(c)))
await_instances(instance_ids)
instances = ec2.get_paginator('describe_instances').paginate(
InstanceIds=instance_ids
).search('Reservations[].Instances[]')
module.exit_json(
changed=True,
instances=[pretty_instance(i) for i in instances],
instance_ids=instance_ids,
spec=instance_spec,
)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to create new EC2 instance")
@AWSRetry.jittered_backoff()
def run_instances(ec2, **instance_spec):
try:
return ec2.run_instances(**instance_spec)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'InvalidParameterValue' and "Invalid IAM Instance Profile ARN" in e.response['Error']['Message']:
# If the instance profile has just been created, it takes some time to be visible by ec2
# So we wait 10 second and retry the run_instances
time.sleep(10)
return ec2.run_instances(**instance_spec)
else:
raise e
def main():
global module
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(default='present', choices=['present', 'started', 'running', 'stopped', 'restarted', 'rebooted', 'terminated', 'absent']),
wait=dict(default=True, type='bool'),
wait_timeout=dict(default=600, type='int'),
# count=dict(default=1, type='int'),
image=dict(type='dict'),
image_id=dict(type='str'),
instance_type=dict(default='t2.micro', type='str'),
user_data=dict(type='str'),
tower_callback=dict(type='dict'),
ebs_optimized=dict(type='bool'),
vpc_subnet_id=dict(type='str', aliases=['subnet_id']),
availability_zone=dict(type='str'),
security_groups=dict(default=[], type='list'),
security_group=dict(type='str'),
instance_role=dict(type='str'),
name=dict(type='str'),
tags=dict(type='dict'),
purge_tags=dict(type='bool', default=False),
filters=dict(type='dict', default=None),
launch_template=dict(type='dict'),
key_name=dict(type='str'),
cpu_credit_specification=dict(type='str', choices=['standard', 'unlimited']),
cpu_options=dict(type='dict', options=dict(
core_count=dict(type='int', required=True),
threads_per_core=dict(type='int', choices=[1, 2], required=True)
)),
tenancy=dict(type='str', choices=['dedicated', 'default']),
instance_initiated_shutdown_behavior=dict(type='str', choices=['stop', 'terminate']),
termination_protection=dict(type='bool'),
detailed_monitoring=dict(type='bool'),
instance_ids=dict(default=[], type='list'),
network=dict(default=None, type='dict'),
volumes=dict(default=None, type='list'),
))
# running/present are synonyms
# as are terminated/absent
module = AnsibleAWSModule(
argument_spec=argument_spec,
mutually_exclusive=[
['security_groups', 'security_group'],
['availability_zone', 'vpc_subnet_id'],
['tower_callback', 'user_data'],
['image_id', 'image'],
],
supports_check_mode=True
)
if module.params.get('network'):
if module.params.get('network').get('interfaces'):
if module.params.get('security_group'):
module.fail_json(msg="Parameter network.interfaces can't be used with security_group")
if module.params.get('security_groups'):
module.fail_json(msg="Parameter network.interfaces can't be used with security_groups")
state = module.params.get('state')
ec2 = module.client('ec2')
if module.params.get('filters') is None:
filters = {
# all states except shutting-down and terminated
'instance-state-name': ['pending', 'running', 'stopping', 'stopped']
}
if state == 'stopped':
# only need to change instances that aren't already stopped
filters['instance-state-name'] = ['stopping', 'pending', 'running']
if isinstance(module.params.get('instance_ids'), string_types):
filters['instance-id'] = [module.params.get('instance_ids')]
elif isinstance(module.params.get('instance_ids'), list) and len(module.params.get('instance_ids')):
filters['instance-id'] = module.params.get('instance_ids')
else:
if not module.params.get('vpc_subnet_id'):
if module.params.get('network'):
# grab AZ from one of the ENIs
ints = module.params.get('network').get('interfaces')
if ints:
filters['network-interface.network-interface-id'] = []
for i in ints:
if isinstance(i, dict):
i = i['id']
filters['network-interface.network-interface-id'].append(i)
else:
sub = get_default_subnet(ec2, get_default_vpc(ec2), availability_zone=module.params.get('availability_zone'))
filters['subnet-id'] = sub['SubnetId']
else:
filters['subnet-id'] = [module.params.get('vpc_subnet_id')]
if module.params.get('name'):
filters['tag:Name'] = [module.params.get('name')]
if module.params.get('image_id'):
filters['image-id'] = [module.params.get('image_id')]
elif (module.params.get('image') or {}).get('id'):
filters['image-id'] = [module.params.get('image', {}).get('id')]
module.params['filters'] = filters
if module.params.get('cpu_options') and not module.botocore_at_least('1.10.16'):
module.fail_json(msg="cpu_options is only supported with botocore >= 1.10.16")
existing_matches = find_instances(ec2, filters=module.params.get('filters'))
changed = False
if state not in ('terminated', 'absent') and existing_matches:
for match in existing_matches:
warn_if_public_ip_assignment_changed(match)
warn_if_cpu_options_changed(match)
changed |= manage_tags(match, (module.params.get('tags') or {}), module.params.get('purge_tags', False), ec2)
if state in ('present', 'running', 'started'):
ensure_present(existing_matches=existing_matches, changed=changed, ec2=ec2, state=state)
elif state in ('restarted', 'rebooted', 'stopped', 'absent', 'terminated'):
if existing_matches:
ensure_instance_state(state, ec2)
else:
module.exit_json(
msg='No matching instances found',
changed=False,
instances=[],
)
else:
module.fail_json(msg="We don't handle the state {0}".format(state))
if __name__ == '__main__':
main()
|
gpl-3.0
| 1,229,012,916,601,802,200
| 41.253292
| 160
| 0.574932
| false
| 4.471174
| false
| false
| false
|
mblayman/markwiki
|
markwiki/storage/fs/user.py
|
1
|
4620
|
# Copyright (c) 2016, Matt Layman
import json
import hashlib
import os
from markwiki.exceptions import UserStorageError
from markwiki.models.user import User
from markwiki.storage.user import UserStorage
class FileUserStorage(UserStorage):
'''A file system based user storage'''
def __init__(self, config):
self._path = os.path.join(config['MARKWIKI_HOME'], 'users')
# An index of user ID to user file paths
self._id_index_file = os.path.join(self._path, 'id.index')
self._id_index = {}
# An index of user email to user file paths
self._email_index_file = os.path.join(self._path, 'email.index')
self._email_index = {}
def initialize(self):
if not os.path.exists(self._path):
os.mkdir(self._path)
self._write_json(self._id_index, self._id_index_file)
self._write_json(self._email_index, self._email_index_file)
else:
self._read_indices()
def create(self, user):
'''Create a new user by storing it as JSON on the file system.'''
user_file = self._get_user_file(user.name)
if os.path.exists(user_file):
raise UserStorageError('A user with that name already exists.')
if self.find_by_email(user.email) is not None:
raise UserStorageError('A user with that email already exists.')
# Everything looks good so get the user an ID and save it.
user.user_id = self._generate_user_id()
self._write_json(user.__dict__, user_file)
# Now that the user is saved, update the indices.
self._update_indices(user, user_file)
def find_by_email(self, email):
'''Find a user by their email or return ``None``.'''
user_file = self._email_index.get(email)
if user_file is None:
return None
return self._load_user(user_file)
def find_by_id(self, user_id):
'''Find a user by their ID or return ``None``.'''
user_file = self._id_index.get(user_id)
if user_file is None:
return None
return self._load_user(user_file)
def find_by_name(self, name):
'''Find a user by their name or return ``None``.'''
user_file = self._get_user_file(name)
return self._load_user(user_file)
def update(self, user):
'''Update an existing user.'''
user_file = self._get_user_file(user.name)
self._write_json(user.__dict__, user_file)
def _generate_user_id(self):
'''Generate a unique user ID.'''
# Because there might be multiple workers (like if running with
# gunicorn), refresh the in-memory indices to avoid ID clashes.
self._read_indices()
user_id = len(self._id_index)
while self.find_by_id(u'{0}'.format(user_id)) is not None:
user_id += 1
# The auth system will try to do lookups with unicode so the key might
# as well be unicode to be consistent.
return u'{0}'.format(user_id)
def _get_user_file(self, name):
'''Get the file path where the user's data will be stored.'''
m = hashlib.md5()
m.update(name.encode('utf-8'))
return os.path.join(self._path, m.hexdigest())
def _load_user(self, user_file):
'''Load a user from a file.'''
if not os.path.exists(user_file):
return None
with open(user_file, 'r') as f:
data = json.loads(f.read())
return User(data['name'], data['email'], data['login_type'],
data['password_digest'], data['user_id'])
def _read_indices(self):
'''Read the file indices into memory.'''
with open(self._id_index_file, 'r') as f:
self._id_index = json.loads(f.read())
with open(self._email_index_file, 'r') as f:
self._email_index = json.loads(f.read())
def _update_indices(self, user, user_file):
'''Update the file indices with the provided user information.'''
self._id_index[user.user_id] = user_file
self._write_json(self._id_index, self._id_index_file)
# Not every user has an associated email account.
if user.email:
self._email_index[user.email] = user_file
self._write_json(self._email_index, self._email_index_file)
def _write_json(self, data, out):
'''Write out JSON with common settings.'''
json_data = json.dumps(data, sort_keys=True, indent=2,
separators=(',', ': '))
with open(out, 'w') as f:
f.write(json_data)
|
bsd-2-clause
| 3,899,588,193,903,150,000
| 35.377953
| 78
| 0.588528
| false
| 3.719807
| false
| false
| false
|
RUB-NDS/PRET
|
fuzzer.py
|
1
|
1263
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class fuzzer():
vol = ["", ".", "\\", "/", "file:///", "C:/"]
var = ["~", "$HOME"]
win = ["%WINDIR%", "%SYSTEMROOT%", "%HOMEPATH%", "%PROGRAMFILES%"]
smb = ["\\\\127.0.0.1\\"]
web = ["http://127.0.0.1/"] # "http://hacking-printers.net/log.me"
dir = ["..", "...", "...."] # also combinations like "./.."
# sep = ["", "\\", "/", "\\\\", "//", "\\/"]
fhs = ["/etc", "/bin", "/sbin", "/home", "/proc", "/dev", "/lib",
"/opt", "/run", "/sys", "/tmp", "/usr", "/var", "/mnt",]
abs = [".profile", ["etc", "passwd"], ["bin", "sh"], ["bin", "ls"],
"boot.ini", ["windows", "win.ini"], ["windows", "cmd.exe"]]
rel = ["%WINDIR%\\win.ini",
"%WINDIR%\\repair\\sam",
"%WINDIR%\\repair\\system",
"%WINDIR%\\system32\\config\\system.sav",
"%WINDIR%\\System32\\drivers\\etc\\hosts",
"%SYSTEMDRIVE%\\boot.ini",
"%USERPROFILE%\\ntuser.dat",
"%SYSTEMDRIVE%\\pagefile.sys",
"%SYSTEMROOT%\\repair\\sam",
"%SYSTEMROOT%\\repair\\system"]
# define prefixes to use in fuzzing modes
path = vol+var+win+smb+web # path fuzzing
write = vol+var+win+smb+fhs # write fuzzing
blind = vol+var # blind fuzzing
|
gpl-2.0
| 3,648,293,130,167,252,500
| 41.1
| 69
| 0.475851
| false
| 2.950935
| false
| false
| false
|
oxyum/python-tlogger
|
tlogger/logger.py
|
1
|
4372
|
# -*- mode: python; coding: utf-8; -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .action_binder import ActionBinder
from .action_stack import action_stack
from .actions import Action
from .constants import Level
from .decorators import wrap_descriptor_method, wrap_function
from .proxies import ContextManagerProxy, IterableProxy
from .utils import is_descriptor
try:
from django import VERSION # nopep8
DJANGO_AVAILABLE = True
except ImportError:
DJANGO_AVAILABLE = False
class Logger(object):
def __init__(self, name_or_logger, action_class=Action):
self.logger = name_or_logger
self.action_class = action_class
def __call__(self, func=None, **kwargs):
if func is None:
return self.parametrized_decorator(**kwargs)
return self._decorator(func, self.action_class, self.logger)
if DJANGO_AVAILABLE:
def view(self, func=None, **kwargs):
params = self._get_view_defaults()
if func is None:
params.update(kwargs)
return self.parametrized_decorator(**params)
return self._decorator(func, self.action_class, self.logger,
**params)
def _get_view_defaults(self):
return dict(hide_params=['result'])
def parametrized_decorator(self, **kwargs):
action_class = kwargs.pop('action_class', self.action_class)
def decorator(func):
return self._decorator(func, action_class, self.logger, **kwargs)
return decorator
def _decorator(self, func, action_class, logger, **kwargs):
if is_descriptor(func):
return wrap_descriptor_method(func, action_class, logger, **kwargs)
return wrap_function(func, action_class, logger, **kwargs)
def dump(self, **kwargs):
self.event(suffix='dump_variable', payload=kwargs)
def create_ad_hoc_action(self, context_object):
return Action.create_ad_hoc(logger=self.logger,
context_object=context_object)
def event(self, suffix, payload, action=None, **kwargs):
action = action or self.get_current_action()
if action is None:
with self.create_ad_hoc_action() as ad_hoc:
ad_hoc.emit_event(suffix, payload, **kwargs)
else:
action.emit_event(suffix, payload, **kwargs)
def get_current_action(self):
return action_stack.peek()
def start_action(self, name, **kwargs):
return self.action_class(name, self.logger, **kwargs)
def _raw(self, suffix, level, msg, *args, **kwargs):
self.event(suffix, {}, level=level,
raw_msg=msg, raw_args=args, raw_kwargs=kwargs)
def debug(self, msg, *args, **kwargs):
self._raw('debug', Level.debug, msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
self._raw('info', Level.info, msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
self._raw('warning', Level.warning, msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
self._raw('error', Level.error, msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
exc_info = kwargs.pop('exc_info', 1)
self._raw('exception', Level.error, msg, *args, exc_info=exc_info,
**kwargs)
def critical(self, msg, *args, **kwargs):
self._raw('critical', Level.critical, msg, *args, **kwargs)
def set_status(self, code, msg):
self.get_current_action().set_status(code, msg)
def action_for(self, func):
return ActionBinder.get_action(func)
def iter(self, iterable, steps=False, name=None, context_object=None,
**kwargs):
action = self.start_action(
name or 'iterations', context_object=context_object, **kwargs
)
return IterableProxy(iterable, steps=steps, action=action)
def context(self, context_manager, name=None, **kwargs):
action = self.start_action(
name or 'context', context_object=context_manager, **kwargs
)
return ContextManagerProxy(context_manager, action=action)
def get_logger(name, logger_class=Logger):
return logger_class(name)
|
mit
| 1,517,032,221,352,195,000
| 32.891473
| 79
| 0.623056
| false
| 3.845207
| false
| false
| false
|
pombreda/eggy
|
eggy/model/Model.py
|
1
|
94271
|
#!/usr/bin/env python
# eggy - a useful IDE
# Copyright (c) 2008 Mark Florisson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This module provides the model extending the gui. The model forms the central
part of the application.
"""
__all__ = ['Model', 'NoSuchFileException']
import os
import re
import sys
import glob
import user
import time
import Queue
import shutil
import signal
import socket
import select
import atexit
import codecs
import textwrap
import traceback
import chardet
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4 import Qsci
from eggy.gui import MainWindow, EditorTabWidget, TextEdit
from eggy.network.Network import Network, PortTakenException
from eggy.compile.Compile import Compile
import eggy.compile.Compile as Compile_
from eggy.decorators import Decorators
from eggy.project import Project
from eggy.project import Find
class NoSuchFileException(Exception):
"""
Exception raised when actions requiring an open file are invoked
when no file is opened.
"""
class Model(MainWindow.MainWindow):
"""
This class contains most of the intelligence behind the whole application.
Most actions, buttons, etc in MainWindow.py are implemented here. It's
represents the model and controller.
"""
def __init__(self, base, appname, version, mymailaddress, website):
"""
Constructor
@param base the root of the program files
@param appname the name of the application
@param mymailaddress my mail address
@param website the project's website
"""
# our shared queue with Network
self._queue = Queue.Queue(0) # infinite
# network listen port
self._port = None
# name of the application
self._appname = appname
self.version = version
# mail address to send bugreports and other spam to
self.mymail = mymailaddress
# the project's website
self._website = website
# we must take care of not saving settings twice when we have an
# impatient user that keeps clicking on the cross
self._shuttingDown = False
# the directory of this program's files
# (for determing the path for icons in MainWindow)
self.base = base
# contains the filename as key, a list with editor (editable text) and
# whether filesaveas should be invoked as values
self._editors = {}
self._icons = (QIcon(self.base + "img/filenew.png"),
QIcon(self.base + "img/edit.png"))
# contains the list of opened files in order, used for order
self._openFileList = []
# tells the count for the name of a new file
self._count = 0
# A compile instance when the program is at the moment compiling
# useful for the "stop" button
self._compileObject = None
# name as key, Project object as value
self._projects = {}
# filename as key, widget indicating the file is being synced as value
self._syncingFiles = {}
# the directory containing all projects
self.projectDir = None
# the current chat browser widget (this is for hiding the current,
# and showing a new one when the user switches to a document in
# another project)
self._currentChatBrowser = None
# the nickname of the user
self._username = None
# contains the plugin name as key, the plugin module as value
self._plugins = {}
# some user-configurable settings
self.tabwidth = 4
self.useTabs = False
self.whiteSpaceVisible = False
self.boxedFolding = True
self.autoComplete = True
self.autoCompleteWords = True
# amount of characters to type before poping up a completion dialog
self.autoCompleteInvocationAmount = 3
# whether to show the eggy image in the project tree or not
self.stylesheet = True
# if the user removed .project.pk files, popup one dialog, not as
# many as there are directories
self.errorPoppedUp = False
self._restoreSettings()
# the filter to set for showing files in the project tree
self.fileExtensions = [".java", ".py", ".pyw", ".pyx", ".sh", ".pl",
".vhdl", ".html", ".xml", ".css", ".rb", ".cpp", ".h", ".d",
".inc", ".js", ".cs", ".c", ".sql", ".cgi", ".fcgi"]
# instance variables must be set before calling MainWindow.__init__()
super(Model, self).__init__()
self.setStylesheet()
# this must be after the call to the superclass, because we need
# connect functions from QObject
self._networkUp = False
self._networkRestart()
if self.projectCheckDir():
self.setProjectDir()
else:
self._projectSetProjectDir()
Compile_.loadCompilers()
self._loadPlugins()
try:
self.tabChanged(self.editorTabWidget.currentIndex())
except NoSuchFileException:
self._fileNew()
self.actionFileSaveAll.setEnabled(False)
self._setupSocket()
debug = Decorators.debug
network = Decorators.network
def _abspath(self, filename):
"""
Private method to determine the absolute path of a filename
@param filename the filename to get the path of
@return the path of filename or the user's home directory on failure
(str)
"""
filename = str(filename)
try:
path = filename[:filename.rindex("/")] + "/"
except ValueError:
path = user.home + "/"
return path
def _basename(self, filename):
"""
Private method to get the basename of a filename
@param filename the filename to get the basename of
@return the basename of filename or the user's home directory on
failure (str)
"""
filename = str(filename)
try:
base = filename[filename.rindex("/")+1:]
except ValueError:
base = user.home
return base
def errorMessage(self, text):
"""
Public method to display a warning to the user
@param text the message to display
"""
QMessageBox.warning(self, "Warning", text)
def infoMessage(self, text, title="Note: "):
"""
Public method to display an information message to the user
@param text the message to display
@param title the WindowText
"""
QMessageBox.information(self, title, text)
def systrayMessage(self, title, message):
if QSystemTrayIcon.supportsMessages():
self._systemtray.showMessage(title, message)
else:
self.infoMessage(message, title=title)
def _fileGetOpenFile(self, index=None):
"""
Private method to get the filename of an opened file by index
raises NoSuchFileException when there are no tabs open
@param index the index of the filename
@return the filename (str) or None on an invalid index
"""
if index is None:
index = self.editorTabWidget.currentIndex()
if -1 < index < len(self._openFileList):
return self._openFileList[index]
else:
raise NoSuchFileException("Muahaha")
def _fileGetIndex(self, filename):
"""
Private method to get the index of a filename
@param filename the filname
@return the index of filename (int)
"""
return self._openFileList.index(filename)
def _fileGetEditor(self, *args):
"""
Protected method to get the editor object by filename or by index
@param *args the filename or index of the editor to get
@return the editor object or None when *args is invalid
"""
retval = None
args = args[0]
if isinstance(args, str):
if args in self._editors:
retval = self._editors[args][0]
elif isinstance(args, int):
if args < len(self._openFileList):
retval = self._editors[self._fileGetOpenFile(args)][0]
return retval
def _getCurrentEditor(self):
"""
Private method for getting the currently selected editor object.
Raises NoSuchFileException when no documents are open
@return editor object
"""
index = self.editorTabWidget.currentIndex()
if -1 < index < len(self._openFileList):
filename = self._openFileList[index]
return self._editors[filename][0]
else:
raise NoSuchFileException()
def get(self, filename=None):
"""
Public method that makes it easy for plugins to obtain information
about the currently opened document
@return a tuple containing the filename, editor object and index of the
currently selected document
"""
index = self.editorTabWidget.currentIndex()
editor = None
if filename is None:
if -1 < index < len(self._openFileList):
filename = self._openFileList[index]
editor = self._editors[filename][0]
else:
if filename in self._openFileList:
index = self._openFileList.index(filename)
editor = self._editors[filename][0]
return (filename, editor, index)
def _fileRemoveOpenFile(self, index):
"""
Protected method to remove and close an opened file
@param index the index to remove the file at
"""
self.editorTabWidget.removeTab(index)
filename = self._fileGetOpenFile(index)
if not os.path.exists(filename) and filename.startswith("Untitled") \
and "Untitled%i" % (self._count - 1) not in self._openFileList:
self._count -= 1
self._openFileList.remove(filename)
self._editors.pop(filename)
self.emit(SIGNAL("fileClosed"), filename)
self.tabChanged(self.editorTabWidget.currentIndex())
def _fileAddOpenFile(self, fname, editor, fileSaveAs=False):
"""
Private method to add a file for opening
@param fname the name of the file
@param editor the editor object
@param fileSaveAs whether fileSaveAs should be invoked or not
"""
self._openFileList.append(fname)
self._editors[fname] = [editor, fileSaveAs]
editor.setModified(False)
self.emit(SIGNAL("fileOpened"), fname)
if os.path.exists(fname):
fname = self._basename(fname)
self.editorTabWidget.addTab(editor, self._icons[0], fname)
self.editorTabWidget.setCurrentWidget(editor)
if len(self._openFileList) == 2:
if self._openFileList[0].startswith("Untitled") and \
not self._fileGetEditor(self._openFileList[0]).isModified() and \
not fname.startswith("Untitled"):
self._fileRemoveOpenFile(0)
if len(self._openFileList) == 1:
self.tabChanged(self.editorTabWidget.currentIndex())
def _center(self, widget):
"""
Protected method to center a widget above the main window
@param widget the widget to center
"""
x = (self.width() / 2) - (widget.width() / 2)
y = (self.height() / 2) - (widget.height() / 2)
widget.move(self.pos() + QPoint(x,y))
# >>>>>>>>>>>>>>>>>>>>>> File menu actions <<<<<<<<<<<<<<<<<<<<<<
def _createEditor(self, filename=None):
"""
Private method for creating a QScintilla text editor
"""
editor = TextEdit.TextEdit(self, filename)
self.connect(editor, SIGNAL("modificationChanged(bool)"),
self._modificationChanged, Qt.QueuedConnection)
self.connect(editor, SIGNAL("modificationChanged(bool)"),
self._modificationChanged, Qt.QueuedConnection)
self.connect(editor, SIGNAL("copyAvailable(bool)"),
self.actionEditCopy.setEnabled, Qt.QueuedConnection)
self.connect(editor, SIGNAL("copyAvailable(bool)"),
self.actionEditCut.setEnabled)
return editor
def _modificationChanged(self, enable):
"""
Private method invoked when a documents modification changed
"""
self.actionFileSave.setEnabled(enable)
fileSaveAll = False
for number, filename in enumerate(self._openFileList):
if filename not in self._editors:
continue
editor, b = self._editors[filename]
modified = editor.isModified()
icon = self._icons[int(modified)]
self.editorTabWidget.setTabIcon(number, icon)
self.editorTabWidget.tabBar().setTabToolTip(number, filename)
if modified:
fileSaveAll = True
self.actionFileSave.setEnabled(enable)
self.actionFileSaveAll.setEnabled(fileSaveAll)
def _fileNew(self):
"""
Protected method to create a new (unsaved) file
"""
editor = self._createEditor()
name = "Untitled%i" % self._count
self._fileAddOpenFile(name, editor, True)
self._count += 1
def _fileOpen(self):
"""
Protected method to popup a dialog and load the selected files
"""
for filename in self._selectFiles():
self.loadFile(filename)
def _fileGetLastDir(self):
"""
Protected method to get the last accessed directory
@return last accessed directory or the user's home directory (str)
"""
settings = QSettings()
return str(settings.value("FileActions/LastDir", \
QVariant(QString(user.home))).toString())
def _fileSetLastDir(self, filename):
"""
Protected method to set the last accesses directory in the settings
"""
settings = QSettings()
settings.setValue("FileActions/LastDir", \
QVariant(QString(self._abspath(filename))))
def _selectFiles(self, filter=None):
"""
Private method for letting the user select files
@param filter the filter allowing matching files to be selected
@return the selected files (QStringList)
"""
lastdir = self._fileGetLastDir()
if filter is None:
filenames = list(QFileDialog.getOpenFileNames(self, \
"Select files for opening", lastdir))
else:
filenames = list(QFileDialog.getOpenFileNames(self, \
"Select files for opening", lastdir, filter).toStringList())
if filenames:
self._fileSetLastDir(filenames[0])
return filenames
def loadFile(self, filename=None):
"""
Public method that loads a file and adds a tab for it
@param filename the file to open
"""
if filename is None:
action = self.sender()
if isinstance(action, QAction):
filename = action.data().toString()
filename = str(filename)
if filename in self._openFileList:
self.editorTabWidget.setCurrentIndex(self._fileGetIndex(filename))
elif os.path.exists(filename) and not os.path.isdir(filename):
editor = self._createEditor(filename)
try:
encoding = 'utf8'
try:
lines = codecs.open(filename, 'rU', encoding).readlines()
except UnicodeDecodeError:
encoding = chardet.detect(open(filename).read())['encoding']
lines = codecs.open(filename, 'rU', encoding).readlines()
for line, text in enumerate(lines):
editor.insertAt(text, line, 0)
except IOError, e:
self.errorMessage("Failed to open file %s " % (filename,) + \
"because it is read-only or does not exist.")
except UnicodeDecodeError, e:
self.errorMessage("Failed to determine file's encoding.")
else:
self._fileAddOpenFile(filename, editor)
self._fileAddRecentFile(filename)
def _fileAddRecentFile(self, filename):
"""
Private method used for updating the File -> "Open Recent" menu
@param filename the file to add to the menu
"""
filename = str(filename)
if filename not in self.recentlyOpenedFiles:
self.recentlyOpenedFiles.insert(0, filename)
self.recentlyOpenedFiles = self.recentlyOpenedFiles[:12]
def _fileOpenRecentMenu(self):
"""
Protected method that creates the File Open Recent menu
"""
self.actionOpenRecentMenu.clear()
for f in self.recentlyOpenedFiles:
basename = self._basename(f)
action = self.createAction("%s %s[ %s ]" % (basename, \
(15-len(basename))*" ", f), self.loadFile, \
tip="Open file %s" % f
)
action.setData(QVariant(QString(f)))
self.actionOpenRecentMenu.addAction(action)
def fileSave(self, index=-1, filename=None):
"""
Public method for saving a file
@param index save the file specified by index, if not specified,
the currently selected file will be saved
@return True on successful save
"""
if filename is not None and filename in self._openFileList:
index = self._openFileList.index(filename)
if index == -1:
index = self.editorTabWidget.currentIndex()
retval = True
try:
filename = self._fileGetOpenFile(index)
except NoSuchFileException:
retval = False
else:
if self._editors[filename][1]:
retval = self._fileSaveAs()
else:
editor = self._editors[filename][0]
file = None
try:
file = open(filename, "w")
file.write(unicode(editor.text()).encode('utf8'))
except (IOError, UnicodeEncodeError), e:
self.errorMessage("Unable to save file %s: \n%s" % \
(filename, e))
retval = False
else:
editor.setModified(False)
self.statusbar.showMessage("Saved %s" % filename, 1500)
if file is not None:
file.close()
# self.projectRefresh()
return retval
def _fileSaveAs(self):
"""
Protected method for saving the current file as
@return True on success
"""
lastdir = self._fileGetLastDir()
index = self.editorTabWidget.currentIndex()
oldfilename = self._fileGetOpenFile(index)
filename = QFileDialog.getSaveFileName(self, "Save File As - %s" % oldfilename, lastdir)
# check for cancel
retval = False
if not filename.isEmpty():
filename = str(filename)
editor = self._fileGetEditor(oldfilename)
# set the last accessed directory...
self._fileSetLastDir(filename)
self._editors[filename] = [editor, False]
self.editorTabWidget.setTabText(index, self._basename(filename))
del self._editors[oldfilename]
self._openFileList[index] = filename
self._fileAddRecentFile(filename)
retval = self.fileSave()
return retval
def _fileSaveAll(self):
"""
Protected method for saving all opened files
"""
for index in range(len(self._openFileList)):
self.fileSave(index)
# It's possible to create a document, modify it, and close it.
# We need to disable the actions because the signal won't be emitted
self.actionFileSave.setEnabled(False)
self.actionFileSaveAll.setEnabled(False)
def _filePrint(self):
"""
Protected method for printing a file
"""
try:
filename = self._fileGetOpenFile()
editor = self._fileGetEditor(filename)
except NoSuchFileException:
pass
else:
printer = Qsci.QsciPrinter()
p = QPrintDialog(printer, self)
if p.exec_() == QDialog.Accepted:
printer.setDocName(filename)
if printer.printRange(editor):
self.infoMessage("File %s successfully printed." % filename)
else:
self.infoMessage("Failed to print file %s." % filename)
def _fileQuit(self):
"""
Protected method that closes the application
"""
self.close()
# >>>>>>>>>>>>>>>>>>>>>> Edit menu actions <<<<<<<<<<<<<<<<<<<<<<
def _editUndo(self):
"""
Protected method undoing the last operation of the user
"""
try:
self._getCurrentEditor().undo()
except NoSuchFileException:
pass
def _editRedo(self):
"""
Protected method redoing the last operation of the user
"""
try:
self._getCurrentEditor().redo()
except NoSuchFileException:
pass
def _editCut(self):
"""
Protected method cutting selected text
"""
try:
self._getCurrentEditor().cut()
except NoSuchFileException:
pass
def _editCopy(self):
"""
Protected method copying selected text
"""
try:
self._getCurrentEditor().copy()
except NoSuchFileException:
pass
def _editPaste(self):
"""
Protected method pasting copied text
"""
try:
self._getCurrentEditor().paste()
except NoSuchFileException:
pass
@property
def indentationChar(self):
# return "\t" if self.useTabs else " "
if self.useTabs:
indentationChar = "\t"
else:
indentationChar = " "
return indentationChar
def _editUnindent(self):
"""
Protected method for unindenting a line or a block of selected lines
"""
try:
editor = self._getCurrentEditor()
except NoSuchFileException:
pass
else:
if editor.hasSelectedText():
l1, i1, l2, i2 = editor.getSelection()
for linenumber in xrange(l1, l2 + 1):
self._unindentLine(editor, linenumber)
tabwidth = self.tabwidth
if self.useTabs:
tabwidth = 1
editor.setSelection(l1, i1, l2, i2 - tabwidth)
else:
line = editor.getCursorPosition()[0]
self._unindentLine(editor, line)
def _unindentLine(self, editor, line):
"""
Private method that unindents the given line
@param editor the editor to unindent the line on
@param line the line to unindent
"""
text = unicode(editor.text(line))
if self.useTabs:
if text[0] == "\t":
width = 1
else:
return
else:
spaces = 0
for spaces, char in enumerate(text):
if char != " ":
break
width = spaces % self.tabwidth
if width == 0 and spaces >= 4:
width = 4
editor.replaceLine(line, text[width:], send=True)
def _editIndent(self):
"""
Protected method that indents the given line
"""
try:
editor = self._getCurrentEditor()
except NoSuchFileException:
pass
else:
if editor.hasSelectedText():
# indent a block
l1, i1, l2, i2 = editor.getSelection()
for linenumber in xrange(l1, l2 + 1):
self._indentLine(editor, linenumber)
editor.setSelection(l1, i1, l2, i2 + self.tabwidth)
else:
line = editor.getCursorPosition()[0]
self._indentLine(editor, line)
def _indentLine(self, editor, line):
"""
Private method that indents the given line
@param editor the editor to indent the line on
@param line the line to indent
"""
text = unicode(editor.text(line))
if self.useTabs:
editor.replaceLine(line, "\t" + text, send=True)
return
spaces = 0
for spaces, char in enumerate(text):
if char != " ":
break
width = self.tabwidth - (spaces % self.tabwidth)
editor.replaceLine(line, " "*width + text, send=True)
def _editComment(self):
"""
Protected method for commenting out a line or block
"""
try:
editor = self._getCurrentEditor()
except NoSuchFileException:
pass
else:
editor.beginUndoAction()
if editor.hasSelectedText():
l1, i1, l2, i2 = editor.getSelection()
# comment each line
for linenumber in xrange(l1, l2 + 1):
self._commentLine(editor, linenumber)
# and re-set the selection
editor.setSelection(l1, i1, l2, i2 + len(editor.comment))
else:
line, index = editor.getCursorPosition()
self._commentLine(editor, line)
if re.match("^ *%s$" % editor.comment,
unicode(editor.text(line))):
# empty line comment, set cursor position after comment
editor.setCursorPosition(line,
editor.text(line).length() - 1)
editor.endUndoAction()
def _commentLine(self, editor, line):
"""
Private method that unindents line line on editor editor
@param editor the editor containing the line
@param line the line to comment
"""
text = unicode(editor.text(line))
spaces = 0
for spaces, char in enumerate(text):
if char != self.indentationChar:
break
text = "%s%s%s" % (self.indentationChar * spaces,
editor.comment, text[spaces:])
if editor.comment.startswith("<!--"):
# html comment
text = text[:-1] + " -->\n"
editor.replaceLine(line, text, send=True)
def _editUncomment(self):
"""
Protected method for commenting out a line or block
"""
try:
editor = self._getCurrentEditor()
except NoSuchFileException:
pass
else:
# make the action undoable
editor.beginUndoAction()
if editor.hasSelectedText():
l1, i1, l2, i2 = editor.getSelection()
# comment all selected lines
for linenumber in xrange(l1, l2 + 1):
self._uncommentLine(editor, linenumber)
# re-set the selection
editor.setSelection(l1, i1, l2, i2 - len(editor.comment))
else:
line = editor.getCursorPosition()[0]
self._uncommentLine(editor, line)
editor.endUndoAction()
def _uncommentLine(self, editor, line):
"""
Private method that uncomments line line on editor editor
@param editor the editor containing the line
@param line the line to uncomment
"""
text = unicode(editor.text(line))
if editor.comment.startswith("<!--"):
# undo html comment
text = text.replace("-->", "", 1)
editor.replaceLine(line, \
text.replace(editor.comment, "", 1), send=True)
def _editMoveBeginning(self):
"""
Protected method for setting the cursor to the beginning of the line
"""
try:
editor = self._getCurrentEditor()
except NoSuchFileException:
pass
else:
line, index = editor.getCursorPosition()
text = unicode(editor.text(line))
if re.match("^ *$", text) is None:
# not an empty line
index = 0
for index, char in enumerate(text):
if char != self.indentationChar:
break
editor.setCursorPosition(line, index)
def _editMoveEnd(self):
"""
Protected method for setting the cursor to the end of the line
"""
try:
editor = self._getCurrentEditor()
except NoSuchFileException:
pass
else:
line, index = editor.getCursorPosition()
# -1 for the newline character
index = editor.text(line).length()
if unicode(editor.text(line)).endswith("\n"):
index -= 1
editor.setCursorPosition(line, index)
def _editSelectAll(self):
"""
Protected method for selecting all text
"""
try:
editor = self._getCurrentEditor()
except NoSuchFileException:
pass
else:
editor.selectAll()
def _editJump(self, line):
"""
Protected method for jumping to a user-specified line
"""
editor = None
if line > 1:
line -= 1
try:
editor = self._getCurrentEditor()
except NoSuchFileException:
pass
else:
index = 0
text = unicode(editor.text(line))[:-1]
for index, char in enumerate(text):
if char != self.indentationChar:
break
editor.setLastLineJumpedFrom()
editor.setCursorPosition(line, index)
editor.setFocus()
def _editFind(self):
"""
Protected method for poppup up a find dialog
"""
self._findReplaceDlg.show()
self._findInput.selectAll()
self._findInput.setFocus()
def _editFindString(self, find, forward=True, line=-1):
"""
Private method for finding and selecting a string in a document
@param find the text to look for
@param forward whether to search forward or backward
@param line the line where the search should start from
"""
try:
self._getCurrentEditor().findFirst(find,
self._regexCheckBox.isChecked(), False, False,
True, forward, line)
except NoSuchFileException:
pass
def _editFindPrevious(self):
"""
Protected method for finding a previously found string in a document
"""
self._findReplaceDlg.show()
text = self._findInput.text()
if text:
try:
editor = self._getCurrentEditor()
except NoSuchFileException:
pass
else:
self._editFindString(text, False, editor.getCursorPosition()[0])
editor.findNext()
else:
self._findInput.setFocus()
def _editFindNext(self):
"""
Protected method for finding a next occurrence of a string
"""
text = None
try:
text = self._findInput.text()
except AttributeError:
# find next invoked from menu without find dialog
self._editFind()
self._findReplaceDlg.show()
text = self._findInput.text()
if text:
self._editFindString(text)
else:
self._findInput.setFocus()
def _editReplace(self):
"""
Protected method for replacing a selected and found text
"""
try:
editor = self._getCurrentEditor()
except NoSuchFileException:
pass
else:
if editor.hasSelectedText():
line, index = editor.getCursorPosition()
editor.removeSelectedText()
editor.insert(self._replaceInput.text())
editor.send(line, type=TextEdit.TextEdit.REPLACE)
else:
self.statusbar.showMessage("Find something first", 3000)
# >>>>>>>>>>>>>>>>>>>>>> View menu actions <<<<<<<<<<<<<<<<<<<<<<
def _viewIncreaseFont(self):
"""
Protected method increasing font size for all editors
"""
editors = self._editors.values()
for editor, boolean in editors:
editor.zoomIn()
if len(editors) > 0:
editors[0][0].increaseFontSize()
def _viewDecreaseFont(self):
"""
Protected method decreasing font size for all editors
"""
editors = self._editors.values()
for editor, boolean in editors:
editor.zoomOut()
if len(editors) > 0:
editors[0][0].decreaseFontSize()
def _hideInformationBar(self):
"""
Protected method decreasing font size for all editors
"""
if self.actionHideInformationBar.isChecked():
self.toolbox.hide()
else:
self.toolbox.show()
def _hideContextTabWidget(self):
# hide = self.contextTabWidget.isHidden()
# self.contextTabWidget.setVisible(hide)
# self.buttonHide.setIcon(self.buttonHideIcons[hide])
# self.buttonHide.setText((hide and "->") or "<-") # "v" if hide else "^")
self.contextTabWidget.setVisible(self.contextTabWidget.isHidden())
def _viewSetHighlighting(self, hl=None):
"""
Protected method setting the highlighting of the current document
@param hl the highlighting to set (str). If this is omitted, the
method is probably invoked through an action, and the action's
text is used as hl
"""
if hl is None:
action = self.sender()
if isinstance(action, QAction):
hl = str(action.text())
if hl is not None:
try:
self._getCurrentEditor().setLanguage("", hl)
except NoSuchFileException:
pass
def _viewLeftTab(self):
self.editorTabWidget.previous()
def _viewRightTab(self):
self.editorTabWidget.next()
def _viewCloseTab(self):
index = self.editorTabWidget.currentIndex()
if index > -1:
self._confirmEditorClose(index)
# >>>>>>>>>>>>>>>>>>>>>> Project menu actions <<<<<<<<<<<<<<<<<<<<<<
@debug
def projectCheckDir(self):
"""
Private method checking if the project dir is properly set
@return whether the project dir is properly set (bool)
"""
return self.projectDir is not None and os.path.exists(self.projectDir)
@debug
def _projectEnsureDir(self):
"""
Protected method ensuring the projectDir is properly set
@return false if the user doesnt want to set it (bool)
"""
if not self.projectCheckDir():
self._projectSetProjectDir()
if self.projectCheckDir():
self.projectRefresh()
return True
else:
return False
else:
return True
def _find(self, filename, widget):
"""
Protected method for finding a file in the project directory
@param filename the name of the file to find
@param widget the QTextBrowser object to display results in
"""
if self.projectCheckDir():
filename = filename.lower()
regex = re.compile(filename)
for f in Find.Find(self.projectDir).find(): #exclude=()):
if filename in f.lower() or regex.search(f.lower()):
widget.addItem(f)
def _confirmOverwrite(self, filename):
"""
Private method checking if the given filename exists and returning
whether it can be overwritten or not.
@param filename the name of the file to be checked
@return to overwrite the file (bool)
"""
retval = True
if os.path.exists(filename):
if os.path.isdir(filename):
self.errorMessage("File exists and is a directory." + \
"Please pick another name")
retval = False
else:
retval = QMessageBox.question(self, "Overwrite %s" % filename, \
"Filename %s already exists. Overwrite it?" % (filename), \
QMessageBox.Yes|QMessageBox.No) == QMessageBox.Yes
return retval
@debug
def _projectNewFile(self, project, package, filename, send=True):
"""
Protected method creating a new file for in a project
@param project the project to put the file in
@param package the package of the file
@param filename the file to be created
@param send whether we create the new file or some other host in the project
"""
if package is None:
path = os.path.join(self.projectDir, project, "")
else:
path = os.path.join(self.projectDir, project,
package.replace(".", os.sep), "")
if filename.endswith(".java"):
filename = filename.title()
fname = os.path.join(path, filename)
try:
if not os.path.isdir(path):
os.makedirs(path)
os.mknod(path + filename, 0644)
load = True
elif self._confirmOverwrite(fname):
if os.path.exists(path + filename):
# first see if it's opened, and if so, close it
try:
idx = self._fileGetIndex(fname)
except ValueError:
pass
else:
self._fileRemoveOpenFile(idx)
os.remove(fname)
os.mknod(fname, 0644)
load = True
else:
load = False
except OSError, e:
self.errorMessage("Unable to create file: %s" % e)
return
if send:
self._projectSendNewFile(project, package, filename)
if load and send:
self.loadFile(fname)
self._setRelevantText(project, package)
# set focus
self.editorTabWidget.setCurrentIndex(
self.editorTabWidget.currentIndex())
# self.projectRefresh()
def _setRelevantText(self, project, package=None):
"""
Private method setting some code in the editor
@param package the package of the file
"""
filename = self._fileGetOpenFile(self.editorTabWidget.currentIndex())
editor = self._fileGetEditor(filename)
filename = self._basename(filename)
if filename.endswith(".py") or filename.endswith(".pyw"):
editor.insert("#!/usr/bin/env python\n\n")
elif filename.endswith(".sh"):
editor.insert("#!/bin/bash\n\n")
elif filename.endswith(".java"):
editor.insert( \
"public class %s%s {\n\n"%(filename[0].upper(), filename[1:-5]) + \
" public %s%s () {\n\n"%(filename[0].upper(),filename[1:-5]) + \
" }\n\n" + \
"}\n"
)
if package is not None:
editor.insertAt("package %s.%s;\n\n" % (project, package), 0, 0)
elif filename.endswith(".pl"):
editor.insert("#!/usr/bin/env perl\n\n")
elif filename.endswith(".rb"):
editor.insert("#!/usr/bin/env ruby\n\n")
elif filename.endswith(".vhdl"):
editor.insert(
"library ieee;\n" +
"use ieee.std_logic.1164.all;\n\n" +
"entity myentity is\n" +
" port ();\n" +
"end myentity\n\n" +
"architecture behaviour of myentity is\n" +
"begin\n" +
" -- \n"
"end behaviour;\n"
)
elif filename.endswith(".c"):
editor.insert(
"\n"
"\n"
"int main(int argc, char **argv) {\n"
"\n"
"}\n"
)
self.fileSave(self.editorTabWidget.currentIndex())
@debug
def removeFile(self, filename, project=True):
"""
Public method for removing a file from a project, or a whole project
@param filename the file to remove
@param project wether the file is a file in a project (or a project)
(it could also be a template)
"""
directory = os.path.isdir(filename)
try:
if directory:
shutil.rmtree(filename)
else:
os.remove(filename)
except OSError, e:
self.errorMessage("Unable to delete file or directory: %s" % e)
return
if project:
if self._abspath(filename[:-1]) == self.projectDir and \
self._basename(filename) in self._projects:
self._projects[self._basename(filename)].close()
del self._projects[self._basename(filename)]
if directory:
# we need to check if it's a directory (e.g. when you remove
# /foo/bar/foo you don't want /foo/bar/foo.py to be closed)
filename += "/"
removed = 0
for x in xrange(len(self._openFileList)):
if self._openFileList[x - removed].startswith(filename):
self._fileRemoveOpenFile(x - removed)
removed += 1
elif filename in self._openFileList:
self._fileRemoveOpenFile(self._openFileList.index(filename))
# the timer thingy prevents a segfault, for a reason unknown
QTimer.singleShot(0, self.projectRefresh)
@debug
def renameFile(self, old, new, send=True):
"""
Public method for renaming a file or project.
@param old the old file to rename, including full path
@param new the new filename, without path
"""
newname = self._abspath(old) + new
if self._confirmOverwrite(newname):
if not os.path.exists(old):
return
if send:
project, package, filename = self._projectGetCurrentInfo(old)
self._projectSendRenameFile(project, package, filename, new)
os.rename(old, newname)
self.projectRefresh()
self._templateTree.refresh()
# discard '/' or last letter to get the path
def updateFileList():
"""
Function updating open files, if a directory along it's path
was renamed
"""
for x in xrange(len(self._openFileList)):
fname = self._openFileList[x]
if fname.startswith(old):
newfname = "".join((newname, fname[len(old):]))
self._openFileList[x] = newfname
self._editors[newfname] = self._editors.pop(fname)
# discard '/' or last letter to get the path
path = self._abspath(old[:-1])
if path == self.projectDir and self._basename(old) in self._projects:
# a project was renamed
self._projects[self._basename(old)].setName(new)
self._projects[new] = self._projects.pop(self._basename(old))
updateFileList()
elif old in self._openFileList:
# an open file was renamed
index = self._openFileList.index(old)
self._openFileList[index] = newname
self.editorTabWidget.setTabText(index, new)
self._editors[newname] = self._editors.pop(old)
elif os.path.isdir(newname):
# package renamed
updateFileList()
@debug
def projectAddFile(self, project, src):
"""
Public method for adding an existing file to the project.
@param project the project to add the selected file to
@param src the file to be added
"""
dest = "".join((self.projectDir, project, "/", self._basename(src)))
if self._confirmOverwrite(dest):
try:
shutil.copy(src, dest)
except IOError, e:
self.errorMessage("Failed to copy %s to %s:\n%s" %(src,dest,e))
return
# let other people know we added a new file (they can download it using
# the sync option)
project, package, filename = self._projectGetCurrentInfo(dest)
self._projectSendNewFile(project, package, filename)
self.loadFile(dest)
self.projectRefresh()
def projectRefresh(self):
"""
Public method that refreshes the project tree
"""
self.projectTree.projectRefresh()
def _projectSetProjectDir(self):
"""
Private method poping up a dialog asking for the directory that will
contain all projects and project files.
"""
# self.infoMessage() does not work here (segfault)
def popup():
o = QWidget()
QMessageBox.information(o, "Set Project Directory",
"Please set the directory that will contain your source "
"and project files.")
QTimer.singleShot(0, popup)
d = QFileDialog.getExistingDirectory(None, \
"Set the source directory for all projects", self._fileGetLastDir())
self.projectDir = str(d)
if self.projectCheckDir():
self._fileSetLastDir(d)
self.setProjectDir()
def setProjectDir(self):
"""
Public method used only for setting the project directory programatically.
"""
# first time d will be an empty string, so check
d = str(self.projectDir)
if os.path.isdir(d):
self.projectDir = d
Project.PROJECTDIR = d
MainWindow.PROJECTDIR = d
self.projectTree.setModel_()
self.projectDirLabel.setText(QString("<b>%s</b>" % d))
self._loadProjects()
else:
# popup a dialog
self._projectSetProjectDir()
def _loadProjects(self):
"""
Private method creating project objects from all projects in the project
directory
"""
names = [name for name in os.listdir(self.projectDir) \
if os.path.isdir(self.projectDir + name) and not name == ".templates"]
for name in names:
self._projects[name] = Project.Project(self, name)
self._projects[name].load()
def _projectCreateProject(self):
"""
Protected method for creating a new project
"""
name = self.projectInput.text()
pw = self.passwordInput.text()
if name.isEmpty():
self.errorMessage("Please provide a name for the project")
self.projectNewWidget.raise_()
self.projectNewWidget.activateWindow()
return
if QFile.exists(self.projectDir + name):
self._repeatDlg(self.projectNewWidget, "File already exists. " + \
"Please remove it or pick another name")
return
if pw.isEmpty():
# self.infoMessage("You didn't provide a password. If you at " + \
# "some point decide otherwise, you can set it via " + \
# "\"Project\" -> \"Project Settings\"", "Password")
pw = None
name = str(name)
p = Project.Project(self, name, pw)
if p.create():
self._projects[name] = p
self.projectNewWidget.close()
self.toolbox.setCurrentIndex(0) # the project
self.projectRefresh()
else:
self.projectNewWidget.raise_()
self.projectNewWidget.activateWindow()
def _projectNew(self):
"""
Protected method popping up a dialog for creating a new project
"""
if self.projectCheckDir():
self.createProjectNewDlg()
else:
if self._projectEnsureDir():
self._projectNew()
@debug
def _projectSettings(self, oldname, newname, password, visible):
"""
Protected method for setting the newly decided project settings
applyProjectSettings
@param oldname the old name of the project
@param newname the new name of the project
@param password the password for the project
@param visible the visibility of the project
"""
if oldname != newname:
self.renameFile(self.projectDir + oldname, newname)
password = password.strip() or None
self._projects[newname].setPassword(password)
self._projects[newname].setVisible(visible)
def _projectGetInfo(self, name):
"""
Protected method for retrieving project information
@param name the name of the project to retrieve information of
@return (project name (str), project password (str), project \
visibility (bool)) (tuple)
"""
if name in self._projects:
p = self._projects[name]
pw = p.password()
if pw is None:
pw = ""
return (p.getName(), pw, p.isVisible())
else:
return ("", "", "")
def _projectGetCurrentInfo(self, filename=None):
"""
Private method for obtaining information about the current file or
the one given. This method may raise NoSuchFileException.
@param filename the filename (str)
@return a tuple with project, package, filename
"""
if filename is None:
filename = self._fileGetOpenFile()
if self.projectDir is not None:
project = filename.replace(self.projectDir, "").split("/")[0]
f = self._basename(filename)
package = filename[\
len(self.projectDir) + len(project) +1 : len(filename) - len(f) -1]
package = package or Network.PASS
return (project, package, f)
else:
return ("", "", filename)
def isConnected(self, filename):
"""
Public method used by TextEdit to determine if the file is in a project
that is connected with other hosts. This is done for the 'undo' action,
since the action might be relatively resource intensive
"""
retval = False
project, package, filename = self._projectGetCurrentInfo(filename)
if project in self._projects:
retval = self._projects[project].isConnected()
return retval
def setStylesheet(self):
stylesheet = ""
if self.stylesheet:
icon = self.base + "img/eggy/eggy-tree-small.png"
stylesheet = ("QTreeView, QTextBrowser, QListWidget {"
"background-color: white;"
"background-image: url(%s); " % icon + \
"background-attachment: scroll;"
"background-repeat: vertical;"
"background-position: center;"
"}"
)
self.projectTree.setStyleSheet(stylesheet)
self._templateTree.setStyleSheet(stylesheet)
self._pluginList.setStyleSheet(stylesheet)
# for project in self._projects.itervalues():
# project.memberList().setStyleSheet(stylesheet)
self._currentMemberList.setStyleSheet(stylesheet)
def tabChanged(self, index):
"""
Public method that updates the chat widget and user list on tab change
according to the project of the newly selected tab
@param index the index of the current tab
"""
try:
if len(self._openFileList) < index < 0:
raise NoSuchFileException
project, package, filename = self._projectGetCurrentInfo()
editor = self._fileGetEditor(self._openFileList[index])
except NoSuchFileException:
pass
else:
self.emit(SIGNAL("tabchanged"))
editor.setFocus()
self.actionFileSave.setEnabled(editor.isModified())
self.actionEditCopy.setEnabled(editor.hasSelectedText())
self.actionEditCut.setEnabled(editor.hasSelectedText())
self.filenameLabel.filename = self._openFileList[index]
if project in self._projects:
project = self._projects[project]
self._currentChatBrowser.hide()
self._currentChatBrowser = project.browser()
self._currentChatBrowser.show()
self.chatLabel.setText("Project chat: <b>%s</b>" % project.getName())
self._currentMemberList.hide()
self._currentMemberList = project.memberList()
self._currentMemberList.show()
self._userLabel.setText("Members in project <b>%s</b>" % project.getName())
# >>>>>>>>>>>>>>>>>>>>>> Model->Network communication <<<<<<<<<<<<<<<<<<<<<<
@network
def _projectConnect(self, address, port, project):
"""
Protected method that lets the user connect to another project
@param address the address of the host
@param port the host's port number
"""
if project not in self._projects or not \
self._projects[project].isVisible():
# user might have removed the project or set it to invisible
# while having the dialog open
return
self._projects[project].server = address
self._projects[project].serverport = port
self._queue.put((Network.TYPE_CONNECT, address, int(port), project, \
self._projects[project].password()))
@network
def sendInsertedText(self, line, txt):
"""
Public method for sending text to the other project members
@param txt the text to be inserted (str)
"""
project, package, filename = self._projectGetCurrentInfo()
if project in self._projects:
p = self._projects[project]
timer = p.getTimer()
if p.isVisible():
self._queue.put((Network.TYPE_INSERTEDTEXT, timer.now(), \
project, package, filename, line, txt))
def projectSetVisible(self, project, add=True):
"""
Public method for setting the project visibility (and syncing this
with the network). We don't apply the network decorator, because
when the user decides to restart the network in the settings dialog,
we need our projects synced into the network.
@param project the project name
@param add whether to add or remove the project from the network
"""
project = self._projects[project]
if add:
self._queue.put((Network.TYPE_ADDPROJECT, project))
else:
self._queue.put((Network.TYPE_REMOVEPROJECT, project))
@network
def _projectSync(self, project, user):
"""
Protected method that lets the user sync all files in the project
@param project the project to be synced
@param user the victim to request all syncs from
"""
for f in Find.Find(self.projectDir).find(project, include_path=True):
project, package, filename = self._projectGetCurrentInfo(f)
w = self._syncingWidget(filename)
self._projectRequestSyncFile(w, user, project, package, filename)
@network
@debug
def _projectRequestSyncFile(self, widget, user, project, package, filename):
"""
Protected method for requesting the sync for a file
@param widget the widget that will temporarily replace the editor
@param user the user to send the request to
@param project the project the file is in
@param package the package of the file
@param filename the (base)name of the file
"""
fn = self._assemble(project, package, filename)
self._syncingFiles[fn] = widget
if fn not in self._openFileList:
self.loadFile(fn)
editor = self._fileGetEditor(fn)
assert editor is not None and fn in self._openFileList
index = self._openFileList.index(fn)
self.fileSave(index)
# hide the editor and display the "syncing widget"
self.editorTabWidget.removeTab(index)
self.editorTabWidget.insertTab(index, widget, filename)
self.editorTabWidget.setCurrentIndex(index)
editor.setState(TextEdit.TextEdit.SYNCING)
self._queue.put((Network.TYPE_REQUESTSYNC, user, project, \
package or Network.PASS, filename))
@debug
def _projectSyncCompleted(self, filename):
"""
Protected method called when the syncing was aborted or stopped
@param filename the name of the sync that sync was called on
"""
if filename in self._syncingFiles:
assert filename in self._openFileList
index = self._openFileList.index(filename)
editor = self._fileGetEditor(index)
editor.setState(TextEdit.TextEdit.NORMAL)
editor.processReceivedWhileSyncing()
# restore the tab with the editor
self.editorTabWidget.removeTab(index)
self.editorTabWidget.insertTab(index, editor, self._basename(filename))
self.editorTabWidget.setCurrentIndex(index)
del self._syncingFiles[filename]
@network
@debug
def replySync(self, args):
"""
Public method for replying to a request for the sync of a file
"""
username, project, package, f = [str(arg) for arg in args]
filename = self._assemble(project, package, f)
file = None
if filename in self._openFileList:
self.fileSave(self._openFileList.index(filename))
try:
file = open(filename, "rU")
except IOError:
self._queue.put((Network.TYPE_SYNC, username, project, package, f, None))
if file is not None:
file.close()
else:
self._queue.put((Network.TYPE_SYNC, username, project, package, f, file))
def synced(self, args):
"""
Public method for receiving the synced file
@param args a QStringList from the type [project, package, filename, file's_text]
"""
project, package, f, text = [unicode(arg) for arg in args]
filename = self._assemble(project, package, f)
if filename in self._syncingFiles and filename in self._openFileList:
editor = self._fileGetEditor(filename)
assert editor is not None
done = True
if text == Network.ERROR:
self.errorMessage("Unable to sync file, the person synced " + \
"from has probably set permissions to tight.")
elif text.startswith("insert"):
editor.setText(text[6:])
done = False # wait for append, Network.ERROR or |done| packets
elif text.startswith("append"):
editor.append(text[6:])
done = False
if done:
self._projectSyncCompleted(filename)
@network
def _projectSendNewFile(self, project, package, filename):
self._queue.put((Network.TYPE_PROJECTNEWFILE, project, \
package or Network.PASS, filename))
@network
def projectSendRemoveFile(self, filename):
project, package, filename = self._projectGetCurrentInfo(filename)
self._queue.put((Network.TYPE_PROJECTREMOVEFILE, project, package, filename))
@network
def _projectSendRenameFile(self, project, package, old, new):
if package is None:
package = Network.PASS
self._queue.put((Network.TYPE_PROJECTRENAMEFILE, project, package, old, new))
@debug
def sendProjectFiles(self, args):
"""
Public method that gets signalled from the network, after having
sent a list of addresses, that it needs
to send a list of project files to a specific user.
@param args a QStringList containing project and username
"""
project, username = [str(arg) for arg in list(args)]
text = ""
for f in Find.Find(self.projectDir).find(project):
text += "%s%s" % (Network.DELIM, f)
self._queue.put((Network.TYPE_PROJECTFILES, project, username, text))
@network
def _userChatSend(self):
"""
Protected method for sending chat text to other hosts in the project
"""
# retrieve the project from the chatlabel
project = str(self.chatLabel.text()).split(" ")[-1].replace("<b>", \
"").replace("</b>", "")
text = str(self._chatInput.text())
if text:
self._chatInput.clear()
if project in self._projects and \
self._projects[project].isVisible():
self._queue.put((Network.TYPE_SENDCHATTEXT, project, str(text)))
# let ourselves know we said something
l = QStringList()
l.append(project)
l.append(self._username)
l.append(text)
self.userChatInsertText(l)
else:
self._currentChatBrowser.setHtml(
"<b>File not in a project.</b><br><br>" + \
"You can set it to visible in Project -> Project Settings.<br>"
)
@network
def _chatChangeUsername(self, old, new):
"""
Protected method letter other users know we changed our name
@param old our old username
@param new our new username
"""
for project in self._projects:
l = QStringList()
l.append(project)
l.append(old)
l.append(new)
self.chatUsernameChanged(l, us=True)
if self._projects[project].isVisible():
self._queue.put(
(Network.TYPE_USERNAMECHANGED, project, old, new)
)
@network
def _projectsQuit(self):
"""
Private method for quitting all projects. Invoked on program shutdown
"""
for project in self._projects:
self.projectSetVisible(project, False)
def _networkRestart(self):
"""
Protected method called by the constructor or by the user from the
settings dialog to restart the network
"""
if self._settingsDlg is not None:
# save the potentially newly set port first
popup = True
self._port = int(self._settingsNetworkPort.text())
try:
self._network = Network(self, self._queue, self._username, \
port=self._port)
except PortTakenException:
self._networkUp = False
self.errorMessage("Unable to start the network, the port is " + \
"probably already taken. Please choose another in the " + \
"\"Settings\" -> \"Configure eggy\" dialog under the " + \
"\"Network\" tab or try again after some time. You will not " + \
"be able to connect or accept incoming connections until the " + \
"network is started.")
else:
Decorators.RUNNING = True
self._networkUp = True
self._network.start()
# disable the restart button
if self._settingsDlg is not None:
self._networkRestartButton.setEnabled(False)
if self._settingsDlg is not None:
self._settingsDlg.raise_()
# >>>>>>>>>>>>>>>>>>>>>> Network->Model communication <<<<<<<<<<<<<<<<<<<<<<
def _assemble(self, project, package, filename):
package = package.strip() or Network.PASS
f = "/".join((self.projectDir + project, package, filename))
return str(f.replace(Network.PASS + "/", ""))
def networkError(self, text):
self.errorMessage(text)
def receiveInsertText(self, args):
"""
Public method invoked by the network when another connected host
inserted text
@param args a QStringList containing respectively project, package,
filename, line, text
"""
project, package, filename, line, text = \
[unicode(arg) for arg in list(args)]
f = self._assemble(project, package, filename)
editor = self._fileGetEditor(f)
if editor is not None:
try:
editor.receive(int(line), text)
except ValueError:
pass
@debug
def receiveProjectNewFile(self, args):
project, package, filename = [str(arg) for arg in list(args)]
if package == Network.PASS:
package = None
self._projectNewFile(project, package, filename, send=False)
@debug
def receiveProjectRemoveFile(self, args):
user, project, package, filename = [str(arg) for arg in list(args)]
filename = self._assemble(project, package, filename)
self.projectTree.projectRemoveFile(filename=filename, \
msg="User %s want to delete %s. " % (user, filename))
@debug
def receiveProjectRenameFile(self, args):
project, package, old, new = [str(arg) for arg in list(args)]
old = self._assemble(project, package, old)
self.renameFile(old, new, send=False)
@debug
def receiveProjectFiles(self, args):
project, text = [unicode(arg) for arg in list(args)]
project = str(project)
files = text.split("|||")
for f in Find.Find(self.projectDir).find(project):
if f in files:
files.remove(f)
for f in files:
if f:
if "/" in f:
self._projectNewFile(project, self._abspath(f), \
self._basename(f), send=False)
else:
self._projectNewFile(project, None, f, send=False)
self.projectRefresh()
def userChatInsertText(self, args):
"""
Public method that handles arrived chat text from another host
"""
project, username= [str(arg) for arg in list(args)[:2]]
text = unicode(list(args)[2])
if project in self._projects:
browser = self._projects[project].browser()
browser.insertHtml("%s < <b>%s</b> > " % \
(time.strftime("%H:%M"), username))
browser.insertPlainText(text + "\n")
# browser.verticalScrollBar().setSliderDown(True)
browser.verticalScrollBar().setValue(
browser.verticalScrollBar().maximum())
def chatUsernameChanged(self, args, us=False):
"""
Public method that displays a change of username from someone in
in the right chat
@param args format: [project, old, new]
@param us if we are the ones changing our name, or some other host
"""
project, old, new = [str(arg) for arg in list(args)]
if project in self._projects:
p = self._projects[project]
p.browser().insertHtml(\
"%s -- %s is now known as <b>%s</b> --<br>\n" % \
(time.strftime("%H:%M"), old, new))
if not us:
p.removeMember(old)
p.addMember(new)
def userConnected(self, args):
"""
Public method that adds a newly connected user to the memberlist of
the project
@param args a QStringList of type [project, username]
"""
project, username = [str(arg) for arg in list(args)]
if project in self._projects:
self._projects[project].browser().insertHtml(\
"%s -- %s has <b>joined</b> the project<br>\n" % \
(time.strftime("%H:%M"), username))
self._projects[project].addMember(username)
def userQuit(self, args):
"""
Public method for removing a former participant
@param args QStringList of the format [project, username]
"""
project, username = [str(arg) for arg in list(args)]
if project in self._projects:
self._projects[project].browser().insertHtml(\
"%s -- %s has <b>left</b> the project<br>\n" % \
(time.strftime("%H:%M"), username))
self._projects[project].removeMember(username)
'''
# >>>>>>>>>>>>>>>>>>>>>> Compilation methods <<<<<<<<<<<<<<<<<<<<<<
def _compile(self):
"""
Protected method taking care of compiling and/or running the currently
selected file
"""
self.actionFileCompile.setEnabled(False)
try:
filename = self._fileGetOpenFile(self.editorTabWidget.currentIndex())
except NoSuchFileException:
return
if not os.path.exists(filename):
#self.errorMessage("Please save the file first.")
if self._fileSaveAs():
# sucessfully saved
self._compile()
return
# save the file first
self.fileSave()
# compile
self._compileCode(filename, self._compileCheckBoxCompile.isChecked(),
self._compileCheckBoxRun.isChecked())
def _compileCode(self, filename, compile, run):
"""
Private method taking care of compiling and running the given file
@param filename the filename to compile/run
@param run whether to compile only, or compile and run (interpreted
languages are run either way)
"""
try:
if self._compileCheckBoxProgram.isChecked():
programargs = str(self._programArguments.text())
else:
programargs = ""
self._compileObject = (Compile(self, filename, compile, run,
str(self._compileArguments.text()),
str(self._runArguments.text()),
str(programargs)))
except NoCompilerAvailableException:
self.errorMessage("Failed to compile, unknown file type.")
else:
self._compileText.clear()
self._compileObject.start()
def setOutput(self, text):
"""
Public method called from a compilation thread
@param text the text to be inserted
"""
self._compileText.insertPlainText(text)
self._compileText.verticalScrollBar().setValue(
self._compileText.verticalScrollBar().maximum())
def setHtml(self, html):
"""
Public method called from a compilation thread
@param html the html text to be inserted
"""
self._compileText.insertHtml(html)
self._compileText.verticalScrollBar().setValue(
self._compileText.verticalScrollBar().maximum())
def compilationStarted(self, filename):
self._compileStopButton.setEnabled(True)
self._compileButton.setEnabled(False)
self.statusbar.showMessage("Started compiling/running %s" % filename,\
3000)
def compilationFinished(self, filename):
self.actionFileCompile.setEnabled(True)
self._compileStopButton.setEnabled(False)
self._compileButton.setEnabled(True)
self.statusbar.showMessage("Finished compiling/running %s" % filename,\
3000)
def compilationKilled(self, filename):
self._compileStopButton.setEnabled(False)
self._compileButton.setEnabled(True)
self.statusbar.showMessage("Killed compiling/running %s" % filename,\
3000)
def _stop(self):
"""
Protecting method used for stopping the current compilation
"""
if self._compileObject is not None and not self._compileObject.killed:
self._compileObject.kill()
'''
# >>>>>>>>>>>>>>>>>>>>>> Template methods <<<<<<<<<<<<<<<<<<<<<<
def templateCreate(self):
try:
editor = self._getCurrentEditor()
except NoSuchFileException:
pass
else:
if not editor.hasSelectedText():
self.errorMessage("Select something first")
elif self._templateTree.templateDir() is None:
self.errorMessage("Set the project directory first")
else:
self._templateText = str(editor.selectedText())
self._templateCreateDlg()
def templateSave(self, d, filename):
if d is None:
filename = "%s%s" % (self._templateTree.templateDir(), filename)
else:
filename = "%s%s/%s" % (self._templateTree.templateDir(), d, \
filename)
if os.path.exists(filename):
if self._confirmOverwrite(filename):
self.removeFile(filename, False)
else:
return
try:
os.mknod(filename, 0774)
f = open(filename, "w")
f.write(self._templateText)
del self._templateText
self._templateTree.refresh()
except OSError, e:
self.errorMessage("Unable to save template %s: %s" % (filename, e))
return
def templatePaste(self, template):
try:
editor = self._getCurrentEditor()
except NoSuchFileException:
pass
else:
try:
f = open(template, "r")
except IOError, e:
self.errorMessage("Unable to read template: %s" % e)
return
# care for indentation
l, i = editor.getCursorPosition()
editor.beginUndoAction()
for number, line in enumerate(f):
editor.insertLine(" " * i + line, l + number)
editor.endUndoAction()
# editor.insertText(text)
def templateMkdir(self, name):
if self._projectEnsureDir():
filename = self._templateTree.templateDir() + name
if os.path.exists(filename):
if self._confirmOverwrite(filename):
self.removeFile(filename, False)
else:
return
try:
os.makedirs(filename)
except OSError, e:
self.errorMessage("Unable to create file %s: %s" % (filename, e))
self._templateTree.refresh()
# >>>>>>>>>>>>>>>>>>>>>> Settings menu actions <<<<<<<<<<<<<<<<<<<<<<
def _applySettings(self):
"""
Protected method that applies the user's configuration as set in "Settings -> Configure"
"""
settings = QSettings()
# editor
self.useTabs = self._settingsUseTabs.isChecked()
self.tabwidth = self._settingsTabwidth.value()
self.whiteSpaceVisible = self._settingsWhiteSpaceVisible.isChecked()
self.boxedFolding = self._settingsBoxedFolding.isChecked()
self.autoComplete = self._settingsAutoComplete.isChecked()
self.indentationGuides = self._settingsIndentationGuides.isChecked()
self.autoCompleteWords = self._settingsAutoCompleteWords.isChecked()
self.autoCompleteInvocationAmount = \
self._settingsAutoCompleteInvocation.value()
self.showAllFiles = self._settingsShowAllFiles.isChecked()
self.stylesheet = self._settingsShowEggyImage.isChecked()
self.setStylesheet()
self.projectTree.setFilters()
for editor, b in self._editors.itervalues():
editor.setAttrs()
for extension in self._settingsCompilers:
tpl = self._settingsCompilers[extension]
compiler = str(tpl[1].text())
interpreter = compiler
if tpl[0] is not None:
compiler = str(tpl[0].text())
Compile_.setCompiler(extension, (compiler, interpreter))
self._port = int(self._settingsNetworkPort.text())
self._settingsDlg.close()
# >>>>>>>>>>>>>>>>>>>>>> Plugins <<<<<<<<<<<<<<<<<<<<<<
def _loadPlugins(self, refresh=False):
"""
Private method loading all plugins
@param refresh if refresh is True no plugins are stopped or started
(this is used when refreshing the plugin list)
"""
plugindir = self.base + "plugins"
if not os.path.exists(plugindir):
return
# remove all .pyc files (because if we edited a plugin, reload() will
# load the old .pyc file
for fname in glob.glob("/".join((plugindir, "*.pyc"))):
try:
os.remove(fname)
except OSError:
pass
for name in self._plugins.keys():
try:
reload(self._plugins[name])
except:
self._plugins.pop(name)
self._pluginList.clear()
for fname in glob.glob("/".join((plugindir, "*.py"))):
name = self._basename(fname).split(".")[0]
if name == '__init__':
continue
if name not in self._plugins:
try:
# __import__ in 2.4 does not accept keyword arguments
plugin = __import__("%s.%s" % ("eggy.plugins", name), {}, {},
['']) # import rightmost
# check for validity
assert isinstance(plugin.author, str)
assert isinstance(plugin.version, (float, int, long))
assert isinstance(plugin.description, str)
# and for existence and callability
for function in (plugin.start, plugin.stop):
assert callable(function)
except:
print "Invalid plugin: %s" % name
import traceback
traceback.print_exc()
continue
self._plugins[name] = plugin
plugin.method = {}
plugin.widget = {}
plugin.method["load"] = self.loadFile
plugin.method["save"] = self.fileSave
plugin.method["get"] = self.get
plugin.method["close"] = self._confirmEditorClose
plugin.method["infoMessage"] = self.infoMessage
plugin.method["errorMessage"] = self.errorMessage
plugin.method["systrayMessage"] = self.systrayMessage
plugin.method["createAction"] = self.createAction
plugin.method["createButton"] = self._createButton
plugin.method["showDlg"] = self._showDlg
plugin.widget["right"] = self.toolbox
plugin.widget["bottom"] = self.contextTabWidget
self._pluginList.addItem(name)
if not refresh:
if self._autostartPlugin(name):
self._pluginStart(name)
def _pluginNew(self):
name = self.base + "Example.py"
self.loadFile(name)
self._editors[name][1] = True # invoke fileSaveAs
def _pluginStart(self, name):
try:
self._plugins[name].start(self)
except:
self.systrayMessage(name, "Unable to start plugin '%s': %s %s" % (
(name,) + sys.exc_info()[:2]))
def _pluginStop(self, name):
"""
Private method calling 'save' on all plugins. Called when eggy
is being closed
@param name the name of the plugin to stop
"""
try:
self._plugins[name].stop(self)
except:
self.systrayMessage(name, "Unable to stop plugin %s" % name)
def _pluginsStop(self):
"""
Private method stopping all plugins on eggy shutdown
"""
for name in self._plugins:
self._pluginStop(name)
def _autostartPlugin(self, name):
return QSettings().value("Plugins/" + name, QVariant(False)).toBool()
def _pluginShowInfo(self, name):
name = str(name)
if not name:
return
plugin = self._plugins[name]
desc = textwrap.wrap(textwrap.dedent(plugin.description), 40)
self._pluginInfo.setText(
"<br />".join((
"<b>Author:</b>",
" " + plugin.author,
"",
"<b>Version:</b>",
" " + str(plugin.version),
"",
"<b>Description:</b>",
" " + "<br /> ".join(desc),
# " " + plugin.description.replace("\n", "<br /> "),
)).replace(" ", " "*2)
)
check = self._autostartPlugin(name)
if check != self._pluginAutoStart.isChecked():
# ignore the state change
self._ignoreStateChange += 1
self._pluginAutoStart.setChecked(check)
# >>>>>>>>>>>>>>>>>>>>>> Methods for quitting <<<<<<<<<<<<<<<<<<<<<<
def editorClose(self):
"""
Public method called by the user from the context menu to close
the current editor
"""
self._confirmEditorClose(self.editorTabWidget.currentIndex())
def editorCloseAll(self):
"""
Public method closing all open editors
"""
for index in range(len(self._openFileList)):
if not self._confirmEditorClose():
event.ignore()
break
def _confirmEditorClose(self, index=0):
"""
Private method for confirming the closing of a tab
@param index the index of the editor/file to close
@return True if the user did not press cancel, else False
"""
try:
filename = self._fileGetOpenFile(index)
except NoSuchFileException:
# invalid index
return True
retval = True
editor = self._fileGetEditor(filename)
if editor is not None and editor.isModified():
self.editorTabWidget.setCurrentWidget(editor)
answer = QMessageBox.question(self, "%s - Save Unsaved Changes" % filename, \
"File \"%s\" has unsaved changes. Save them?" % filename, \
QMessageBox.Yes|QMessageBox.No|QMessageBox.Cancel)
if answer == QMessageBox.Yes:
self.fileSave(index)
self._fileRemoveOpenFile(index)
elif answer == QMessageBox.No:
self._fileRemoveOpenFile(index)
elif answer == QMessageBox.Cancel:
retval = False
else:
self._fileRemoveOpenFile(index)
return retval
def _saveSettings(self):
"""
Private method saving the user's settings
"""
settings = QSettings()
settings.setValue("File/RecentlyOpenedFiles",
QVariant(QStringList(self.recentlyOpenedFiles)))
if self.projectCheckDir():
settings.setValue("Project/SourceDirectory",
QVariant(QString(self.projectDir)))
settings.setValue("Editor/OpenFiles",
QVariant(QStringList(
[f for f in self._openFileList if os.path.exists(f)])))
settings.setValue("Editor/IndexSelectedFile",
QVariant(self.editorTabWidget.currentIndex()))
settings.setValue("Chat/Username", QVariant(QString(self._username)))
settings.setValue("Editor/UseTabs", QVariant(self.useTabs))
settings.setValue("Editor/TabStopWidth", QVariant(self.tabwidth))
settings.setValue("Editor/WhiteSpaceVisible",
QVariant(self.whiteSpaceVisible))
settings.setValue("Editor/BoxedFolding", QVariant(self.boxedFolding))
settings.setValue("Editor/AutoComplete", QVariant(self.autoComplete))
settings.setValue("Editor/IndentationGuides", QVariant(
self.indentationGuides))
settings.setValue("Editor/AutoCompleteWords",
QVariant(self.autoCompleteWords))
settings.setValue("Editor/AutoComleteInvocationAmount",
QVariant(self.autoCompleteInvocationAmount))
settings.setValue("ProjectTree/Image", QVariant(self.stylesheet))
settings.setValue("ProjectTree/ShowAllFiles",
QVariant(self.showAllFiles))
settings.setValue("Network/Port", QVariant(self._port))
self._pluginsStop()
@debug
def _restoreSettings(self):
"""
Private method restoring the saved user's settings
"""
settings = QSettings()
l = settings.value("File/RecentlyOpenedFiles", \
QVariant(QStringList())).toStringList()
self.recentlyOpenedFiles = []
for filename in l:
filename = str(filename)
if os.path.exists(filename):
self.recentlyOpenedFiles.append(filename)
d = settings.value("Project/SourceDirectory", QVariant(QString())).toString()
if d.isEmpty():
self.projectDir = None
else:
self.projectDir = str(d)
if "/" in user.home:
username = user.home.split("/")[-1]
else:
username = "No_username_is_set"
self._username = str(settings.value("Chat/Username", \
QVariant(QString(username))).toString())
self.useTabs = settings.value("Editor/UseTabs",
QVariant(False)).toBool()
self.tabwidth = settings.value("Editor/TabStopWidth",
QVariant(4)).toInt()[0]
self.whiteSpaceVisible = settings.value("Editor/WhiteSpaceVisible",
QVariant(False)).toBool()
self.boxedFolding = settings.value("Editor/BoxedFolding",
QVariant(True)).toBool()
self.autoComplete = settings.value("Editor/AutoComplete",
QVariant(True)).toBool()
self.indentationGuides = settings.value("Editor/IndentationGuides",
QVariant(True)).toBool()
self.autoCompleteWords = settings.value("Editor/AutoCompleteWords",
QVariant(True)).toBool()
self.autoCompleteInvocationAmount = settings.value(
"Editor/AutoComleteInvocationAmount", QVariant(3)
).toInt()[0]
self.showAllFiles = settings.value("ProjectTree/ShowAllFiles",
QVariant(False)).toBool()
self.stylesheet = settings.value("ProjectTree/Image",
QVariant(True)).toBool()
self._port = settings.value("Network/Port", QVariant(7068)).toInt()[0]
def closeEvent(self, event):
"""
Protected method called when the user attempts to close the
application. This is a reimplementation of the event
handler.
@param event the instance of the close event object
"""
if self._shuttingDown:
return
# save the files while they are still open
self._saveSettings()
# Close all projects first
for project in self._projects.itervalues():
project.save()
# cant change a list while looping over it, duh
for index in range(len(self._openFileList)):
# zero is fine since we keep removing the files
if not self._confirmEditorClose():
# user pressed cancel
event.ignore()
break
else:
# StopIteration was raised
# the user decided to shutdown (didnt press cancel on some
# unsaved file)
self._shuttingDown = True
self._saveGuiSettings()
Compile_.saveCompilers()
self._projectsQuit()
self._queue.put((Network.TYPE_QUIT, "discard"))
event.ignore()
if self._networkUp:
QTimer.singleShot(3000, self.quit)
else:
self.quit()
def quit(self):
"""
This method will be invoked from the network, when the network said
goodbye to everyone, or directly, when the network isn't running
"""
import __main__ as eggy
eggy.app.quit()
def killed(self):
"""
Public method called when the user tries to kill the program.
If the network is running, it will send emit a quit signal invoking
'quit'. If the network is not running, we should quit ourselves.
Settings will be lost.
"""
if not self._networkUp:
raise SystemExit(1)
else:
class EventFaker(object):
def ignore(self):
pass
self.closeEvent(EventFaker())
def _setupSocket(self):
"""
This method is called once on initialisation to setup a UNIX Domain
Socket for receiving filenames it must open, by another eggy process.
(it will be sent a SIGUSR1 to notify it of available data)
"""
sockfile = os.path.join(os.sep, 'tmp', 'eggy.socket')
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(sockfile)
s.listen(32)
atexit.register(s.close)
atexit.register(os.unlink, sockfile)
bufsize = os.fpathconf(s.fileno(), 'PC_PIPE_BUF')
if 4096 < bufsize < 0:
bufsize = 4096
def sigusr_handler(signo, frame):
while select.select([s], [], [], 0)[0]:
client, addr = s.accept()
data = client.recv(bufsize)
for fname in data.split('\x00'):
self.loadFile(fname)
self.raise_()
self.activateWindow()
signal.signal(signal.SIGUSR1, sigusr_handler)
|
gpl-3.0
| 8,457,566,645,242,356,000
| 34.175746
| 97
| 0.546923
| false
| 4.733668
| false
| false
| false
|
jasondunsmore/heat
|
heat/engine/update.py
|
1
|
11487
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import six
from heat.common import exception
from heat.common.i18n import _LI
from heat.common.i18n import repr_wraper
from heat.engine import dependencies
from heat.engine import scheduler
from heat.objects import resource as resource_objects
LOG = logging.getLogger(__name__)
@repr_wraper
class StackUpdate(object):
"""A Task to perform the update of an existing stack to a new template."""
def __init__(self, existing_stack, new_stack, previous_stack,
rollback=False, error_wait_time=None):
"""Initialise with the existing stack and the new stack."""
self.existing_stack = existing_stack
self.new_stack = new_stack
self.previous_stack = previous_stack
self.rollback = rollback
self.error_wait_time = error_wait_time
self.existing_snippets = dict((n, r.frozen_definition())
for n, r in self.existing_stack.items())
def __repr__(self):
if self.rollback:
return '%s Rollback' % str(self.existing_stack)
else:
return '%s Update' % str(self.existing_stack)
@scheduler.wrappertask
def __call__(self):
"""Return a co-routine that updates the stack."""
cleanup_prev = scheduler.DependencyTaskGroup(
self.previous_stack.dependencies,
self._remove_backup_resource,
reverse=True)
self.updater = scheduler.DependencyTaskGroup(
self.dependencies(),
self._resource_update,
error_wait_time=self.error_wait_time)
if not self.rollback:
yield cleanup_prev()
try:
yield self.updater()
finally:
self.previous_stack.reset_dependencies()
def _resource_update(self, res):
if res.name in self.new_stack and self.new_stack[res.name] is res:
return self._process_new_resource_update(res)
else:
return self._process_existing_resource_update(res)
@scheduler.wrappertask
def _remove_backup_resource(self, prev_res):
if prev_res.state not in ((prev_res.INIT, prev_res.COMPLETE),
(prev_res.DELETE, prev_res.COMPLETE)):
LOG.debug("Deleting backup resource %s" % prev_res.name)
yield prev_res.destroy()
@staticmethod
def _exchange_stacks(existing_res, prev_res):
resource_objects.Resource.exchange_stacks(existing_res.stack.context,
existing_res.id, prev_res.id)
prev_stack, existing_stack = prev_res.stack, existing_res.stack
prev_stack.add_resource(existing_res)
existing_stack.add_resource(prev_res)
@scheduler.wrappertask
def _create_resource(self, new_res):
res_name = new_res.name
# Clean up previous resource
if res_name in self.previous_stack:
prev_res = self.previous_stack[res_name]
if prev_res.state not in ((prev_res.INIT, prev_res.COMPLETE),
(prev_res.DELETE, prev_res.COMPLETE)):
# Swap in the backup resource if it is in a valid state,
# instead of creating a new resource
if prev_res.status == prev_res.COMPLETE:
LOG.debug("Swapping in backup Resource %s" % res_name)
self._exchange_stacks(self.existing_stack[res_name],
prev_res)
return
LOG.debug("Deleting backup Resource %s" % res_name)
yield prev_res.destroy()
# Back up existing resource
if res_name in self.existing_stack:
LOG.debug("Backing up existing Resource %s" % res_name)
existing_res = self.existing_stack[res_name]
self.previous_stack.add_resource(existing_res)
existing_res.state_set(existing_res.UPDATE, existing_res.COMPLETE)
self.existing_stack.add_resource(new_res)
# Save new resource definition to backup stack if it is not
# present in backup stack template already
# it allows to resolve all dependencies that existing resource
# can have if it was copied to backup stack
if (res_name not in
self.previous_stack.t[self.previous_stack.t.RESOURCES]):
LOG.debug("Backing up new Resource %s" % res_name)
definition = new_res.t.reparse(self.previous_stack,
new_res.stack.t)
self.previous_stack.t.add_resource(definition)
self.previous_stack.t.store(self.previous_stack.context)
yield new_res.create()
def _check_replace_restricted(self, res):
registry = res.stack.env.registry
restricted_actions = registry.get_rsrc_restricted_actions(res.name)
existing_res = self.existing_stack[res.name]
if 'replace' in restricted_actions:
ex = exception.ResourceActionRestricted(action='replace')
failure = exception.ResourceFailure(ex, existing_res,
existing_res.UPDATE)
existing_res._add_event(existing_res.UPDATE, existing_res.FAILED,
six.text_type(ex))
raise failure
@scheduler.wrappertask
def _process_new_resource_update(self, new_res):
res_name = new_res.name
if res_name in self.existing_stack:
if type(self.existing_stack[res_name]) is type(new_res):
existing_res = self.existing_stack[res_name]
try:
yield self._update_in_place(existing_res,
new_res)
except exception.UpdateReplace:
pass
else:
# Save updated resource definition to backup stack
# cause it allows the backup stack resources to be
# synchronized
LOG.debug("Backing up updated Resource %s" % res_name)
definition = existing_res.t.reparse(self.previous_stack,
existing_res.stack.t)
self.previous_stack.t.add_resource(definition)
self.previous_stack.t.store(self.previous_stack.context)
LOG.info(_LI("Resource %(res_name)s for stack "
"%(stack_name)s updated"),
{'res_name': res_name,
'stack_name': self.existing_stack.name})
return
else:
self._check_replace_restricted(new_res)
yield self._create_resource(new_res)
def _update_in_place(self, existing_res, new_res):
existing_snippet = self.existing_snippets[existing_res.name]
prev_res = self.previous_stack.get(new_res.name)
# Note the new resource snippet is resolved in the context
# of the existing stack (which is the stack being updated)
# but with the template of the new stack (in case the update
# is switching template implementations)
new_snippet = new_res.t.reparse(self.existing_stack,
self.new_stack.t)
return existing_res.update(new_snippet, existing_snippet,
prev_resource=prev_res)
@scheduler.wrappertask
def _process_existing_resource_update(self, existing_res):
res_name = existing_res.name
if res_name in self.previous_stack:
yield self._remove_backup_resource(self.previous_stack[res_name])
if res_name in self.new_stack:
new_res = self.new_stack[res_name]
if new_res.state == (new_res.INIT, new_res.COMPLETE):
# Already updated in-place
return
if existing_res.stack is not self.previous_stack:
yield existing_res.destroy()
if res_name not in self.new_stack:
self.existing_stack.remove_resource(res_name)
def dependencies(self):
"""Return the Dependencies graph for the update.
Returns a Dependencies object representing the dependencies between
update operations to move from an existing stack definition to a new
one.
"""
existing_deps = self.existing_stack.dependencies
new_deps = self.new_stack.dependencies
def edges():
# Create/update the new stack's resources in create order
for e in new_deps.graph().edges():
yield e
# Destroy/cleanup the old stack's resources in delete order
for e in existing_deps.graph(reverse=True).edges():
yield e
# Don't cleanup old resources until after they have been replaced
for name, res in six.iteritems(self.existing_stack):
if name in self.new_stack:
yield (res, self.new_stack[name])
return dependencies.Dependencies(edges())
def preview(self):
upd_keys = set(self.new_stack.resources.keys())
cur_keys = set(self.existing_stack.resources.keys())
common_keys = cur_keys.intersection(upd_keys)
deleted_keys = cur_keys.difference(upd_keys)
added_keys = upd_keys.difference(cur_keys)
updated_keys = []
replaced_keys = []
for key in common_keys:
current_res = self.existing_stack.resources[key]
updated_res = self.new_stack.resources[key]
current_props = current_res.frozen_definition().properties(
current_res.properties_schema, current_res.context)
updated_props = updated_res.frozen_definition().properties(
updated_res.properties_schema, updated_res.context)
# type comparison must match that in _process_new_resource_update
if type(current_res) is not type(updated_res):
replaced_keys.append(key)
continue
try:
if current_res.preview_update(updated_res.frozen_definition(),
current_res.frozen_definition(),
updated_props, current_props,
None):
updated_keys.append(key)
except exception.UpdateReplace:
replaced_keys.append(key)
return {
'unchanged': list(set(common_keys).difference(
set(updated_keys + replaced_keys))),
'updated': updated_keys,
'replaced': replaced_keys,
'added': list(added_keys),
'deleted': list(deleted_keys),
}
|
apache-2.0
| -289,319,108,590,892,350
| 40.172043
| 79
| 0.582746
| false
| 4.419777
| false
| false
| false
|
ray-project/ray
|
release/xgboost_tests/workloads/tune_32x4.py
|
1
|
1835
|
"""Moderate Ray Tune run (32 trials, 4 actors).
This training run will start 32 Ray Tune trials, each starting 4 actors.
The cluster comprises 32 nodes.
Test owner: krfricke
Acceptance criteria: Should run through and report final results, as well
as the Ray Tune results table. No trials should error. All trials should
run in parallel.
"""
from collections import Counter
import json
import os
import time
import ray
from ray import tune
from xgboost_ray import RayParams
from ray.util.xgboost.release_test_util import train_ray
def train_wrapper(config, ray_params):
train_ray(
path="/data/classification.parquet",
num_workers=4,
num_boost_rounds=100,
num_files=64,
regression=False,
use_gpu=False,
ray_params=ray_params,
xgboost_params=config,
)
if __name__ == "__main__":
search_space = {
"eta": tune.loguniform(1e-4, 1e-1),
"subsample": tune.uniform(0.5, 1.0),
"max_depth": tune.randint(1, 9)
}
ray.init(address="auto")
ray_params = RayParams(
elastic_training=False,
max_actor_restarts=2,
num_actors=4,
cpus_per_actor=1,
gpus_per_actor=0)
start = time.time()
analysis = tune.run(
tune.with_parameters(train_wrapper, ray_params=ray_params),
config=search_space,
num_samples=32,
resources_per_trial=ray_params.get_tune_resources())
taken = time.time() - start
result = {
"time_taken": taken,
"trial_states": dict(
Counter([trial.status for trial in analysis.trials]))
}
test_output_json = os.environ.get("TEST_OUTPUT_JSON",
"/tmp/tune_32x4.json")
with open(test_output_json, "wt") as f:
json.dump(result, f)
print("PASSED.")
|
apache-2.0
| 452,494,953,157,767,740
| 24.486111
| 73
| 0.621253
| false
| 3.410781
| false
| false
| false
|
cdapio/website
|
scripts/generate-videos/main.py
|
1
|
4086
|
#!/usr/bin/python
# Copyright © 2015-2019 Cask Data, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import json
from youtube import API
MAX_RESULTS = 50
CHANNEL_ID='UCfkRcekMTa5GA2DdNKba7Jg'
api = None
def search_videos(page_token):
return api.get('search', part='id,snippet', channelId=CHANNEL_ID, maxResults=MAX_RESULTS, pageToken=page_token, type='video')
def video_preview(video):
preview_size = ['maxres', 'high', 'medium', 'standard', 'default']
thumbnails = video['snippet']['thumbnails']
preview = ''
for size in preview_size:
if size in thumbnails:
preview = thumbnails[size]['url']
break
return preview
def extract_video_data(video):
preview = ''
video_data = {}
video_data['videoId'] = video['id']['videoId']
video_data['title'] = video['snippet']['title']
video_data['description'] = video['snippet']['description']
video_data['preview'] = video_preview(video)
return video_data
def fetch_videos():
all_videos = []
total_items = MAX_RESULTS
page_token = ''
while page_token is not None:
response = search_videos(page_token)
if 'nextPageToken' in response:
all_videos = all_videos + list(map(extract_video_data, response['items']))
page_token = response['nextPageToken']
else:
page_token = None
return all_videos
def get_original_videos(path):
try:
with open(path) as video_file:
return json.load(video_file)
except:
print('File not found: %s. Will create new one.' % path)
return {
'videos': []
}
def merge_videos(original_videos, youtube_videos, visible, update, update_props):
props = update_props.split(',')
marked = []
for video in youtube_videos:
matched_video = next((v for v in original_videos['videos'] if v['videoId'] == video['videoId']), None)
if matched_video is None:
marked = [video['videoId']] + marked
video['visible'] = visible
original_videos['videos'] = [video] + original_videos['videos']
print('Added new video:\n Link: https://www.youtube.com/watch?v=%s \n Title: %s \n' % (video['videoId'], video['title']) )
else:
marked = [video['videoId']] + marked
if update is not False:
for prop in props:
matched_video[prop] = video[prop]
original_videos['videos'] = list(filter(lambda v: v['videoId'] in marked, original_videos['videos']))
return original_videos
def save_videos(data, file_path):
with open(file_path, 'w') as outfile:
json.dump(data, outfile, indent=2)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output', help='Absolute path to output file. Output file can exist.', required=True)
parser.add_argument('-k', '--api-key', help='Youtube API key.', required=True)
parser.add_argument('-v', '--visible', help='Append new videos as visible.', default=False)
parser.add_argument('-u', '--update', help='Update video in output file if it exists.', default=False)
parser.add_argument('--update-attributes', '--update-attributes', help='Comma separated list of attributes allowed to update. Works only when --update flag is true', default='description,title,preview')
args = parser.parse_args()
global api
api = API(api_key=args.api_key, client_secret='', client_id='')
original_videos = get_original_videos(args.output)
youtube_videos = fetch_videos()
merged_videos = merge_videos(original_videos, youtube_videos, args.visible, args.update, args.update_attributes)
save_videos(merged_videos, args.output)
if __name__ == '__main__':
main()
|
apache-2.0
| 3,500,132,186,289,563,600
| 35.150442
| 204
| 0.682742
| false
| 3.570804
| false
| false
| false
|
morgangalpin/duckomatic
|
tests/utils/test_subscriber.py
|
1
|
1612
|
# -*- coding: utf-8 -*-
from pytest import raises
# The parametrize function is generated, so this doesn't work:
#
# from pytest.mark import parametrize
#
import pytest
parametrize = pytest.mark.parametrize
# from duckomatic import metadata
from duckomatic.utils.subscriber import (Subscriber, NoDataException)
class TestSubscriber(object):
@parametrize('id_prefix', [
'',
'test123'
])
def test_init(self, id_prefix):
subscriber = Subscriber(id_prefix)
assert type(subscriber) == Subscriber
assert subscriber.get_id().startswith(id_prefix)
@parametrize('topic, data', [
('', {}),
('test123', {'test': 123})
])
def test_update_and_simple_get_update(self, topic, data):
subscriber = Subscriber()
subscriber.update(topic, data)
(actual_topic, actual_data) = subscriber.get_update()
assert actual_topic == topic
assert actual_data == data
@parametrize('timeout', [
(0)
])
def test_get_update_with_timeout(self, timeout):
subscriber = Subscriber()
with raises(NoDataException):
subscriber.get_update(timeout=timeout)
# Should not get here as an exception should be raised.
assert False
# Exception was raised correctly.
assert True
@parametrize('id_prefix', [
'',
'test123'
])
def test_get_id_is_unique(self, id_prefix):
subscriber1 = Subscriber(id_prefix)
subscriber2 = Subscriber(id_prefix)
assert subscriber1.get_id() != subscriber2.get_id()
|
gpl-3.0
| -1,296,764,209,724,166,700
| 27.785714
| 69
| 0.620347
| false
| 4.01995
| true
| false
| false
|
scottwittenburg/web-hpc-manager
|
python/webserver/SecureRemoteLauncher.py
|
1
|
5903
|
import paramiko
import select
import argparse
import sys
import threading
import uuid
import tempfile
import os
import getpass
from ForwardSshTunnel import ForwardSshTunnel
class SecureRemoteLauncher(object) :
#-------------------------------------------------------------------------
# SecureRemoteLauncher constructor
#-------------------------------------------------------------------------
def __init__(self, mapFilePath) :
self.mappingFilePath = mapFilePath
self.sessionMap = {}
#-------------------------------------------------------------------------
# Create a port forwarding ssh tunnel
#-------------------------------------------------------------------------
def createTunnelOnRemotePort(self, transport, host, port) :
print 'Create a tunnel on remote port ' + str(port)
try:
tunnel = ForwardSshTunnel(port, # local port
host, # remote host
port, # remote port
transport) # SSHClient Transport object
tunnel.establishForwardTunnel()
except KeyboardInterrupt:
print 'C-c: Port forwarding stopped.'
except Exception as inst :
print 'Encountered exception in forwarding'
print inst
print 'Returning from createTunnelOnRemotePort()'
return tunnel
#-------------------------------------------------------------------------
# Rewrite the mapping file with the current session map
#-------------------------------------------------------------------------
def updateMappingFile(self) :
with open(self.mappingFilePath, 'w') as outfile :
for session in self.sessionMap :
outfile.write(session + ' ' + self.sessionMap[session] + '\n')
#-------------------------------------------------------------------------
# Wait for process to exit so that when it does we can end the tunnel
# thread and then end this waiting thread by returning from this
# function
#-------------------------------------------------------------------------
def waitOnChannelExitStatus(self, channel, sessionId, tunnel) :
# This call will block until channel process has finished
processReturnVal = channel.recv_exit_status()
# Now make sure to kill the thread which is running the port
# forwarding ssh tunnel
print 'Channel exit status ready, process has terminated'
if tunnel is not None :
print 'Attempting to end tunnel request loop...'
tunnel.terminateRequestLoop()
# Next remove this session from the map
del self.sessionMap[sessionId]
# Finally rewrite the map file with the updated session info
self.updateMappingFile()
print 'Returning from wait thread'
#-------------------------------------------------------------------------
# Try to start pvweb on remote machine until we successfully start on a
# port.
#-------------------------------------------------------------------------
def startPvwebOnOpenPortInRange(self, transport, remoteHost, fileToLoad, portRange) :
#port = random.randrange(portRange[0], portRange[1], 1)
port = 9010
# Works on mayall
#cmdFirstPart = 'export LD_LIBRARY_PATH=/opt/python-2.7.3/lib ; export DISPLAY=:0.0 ; /home/kitware/projects/ParaView/build-make-gpu/bin/pvpython /home/kitware/projects/ParaView/build-make-gpu/lib/site-packages/paraview/web/pv_web_visualizer.py --data-dir /home/kitware/Documents/haloregions --port '
# Works on solaris
cmdFirstPart = 'export DISPLAY=:0.0 ; /home/scott/projects/ParaView/build-make-gpu/bin/pvpython /home/scott/projects/ParaView/build-make-gpu/lib/site-packages/paraview/web/pv_web_visualizer.py --data-dir /home/scott/Documents/cosmodata/haloregions --port '
started = False
while started == False :
cmd = cmdFirstPart + str(port) + ' --load-file ' + fileToLoad + ' -f'
channel = transport.open_session()
channel.exec_command(cmd)
characters = ''
while True:
if channel.exit_status_ready():
break
rl, wl, xl = select.select([channel],[],[],0.0)
if len(rl) > 0 :
characters = channel.recv(1024)
if 'CannotListenError' in characters or 'Address already in use' in characters :
print 'port ' + str(port) + ' is already being used'
elif ('tarting on ' + str(port)) in characters:
print 'Ahh, we have finally started on port ' + str(port)
# write the mapping file here
sessionId = str(uuid.uuid1())
connectStr = 'localhost:' + str(port)
self.sessionMap[sessionId] = connectStr
self.updateMappingFile()
tunnel = self.createTunnelOnRemotePort(transport, remoteHost, port)
print 'Have now returned from readyCallback() !!!!'
t = threading.Thread(target=self.waitOnChannelExitStatus,
args=[channel, sessionId, tunnel],
kwargs={})
t.start()
print 'wait thread started, returning from startPvwebOnOpenPortInRange()'
return (sessionId, port)
started = True
if started == False :
#port = random.randrange(portRange[0], portRange[1], 1)
port += 1
print 'Returning from startPvwebOnOpenPortInRange()'
|
bsd-3-clause
| 8,105,040,297,267,630,000
| 42.725926
| 308
| 0.509571
| false
| 5.058269
| false
| false
| false
|
RealTimeWeb/wikisite
|
MoinMoin/macro/OrphanedPages.py
|
1
|
1487
|
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - OrphanedPages Macro
@copyright: 2001 Juergen Hermann <jh@web.de>
@license: GNU GPL, see COPYING for details.
"""
Dependencies = ["pages"]
def macro_OrphanedPages(macro):
_ = macro.request.getText
if macro.request.mode_getpagelinks: # prevent recursion
return ''
if macro.request.isSpiderAgent: # reduce bot cpu usage
return ''
# delete all linked pages from a dict of all pages
pages = macro.request.rootpage.getPageDict()
orphaned = {}
orphaned.update(pages)
for page in pages.values():
links = page.getPageLinks(macro.request)
for link in links:
if link in orphaned:
del orphaned[link]
result = []
f = macro.formatter
if not orphaned:
result.append(f.paragraph(1))
result.append(f.text(_("No orphaned pages in this wiki.")))
result.append(f.paragraph(0))
else:
# return a list of page links
orphanednames = orphaned.keys()
orphanednames.sort()
result.append(f.number_list(1))
for name in orphanednames:
if not name:
continue
result.append(f.listitem(1))
result.append(f.pagelink(1, name, generated=1))
result.append(f.text(name))
result.append(f.pagelink(0, name))
result.append(f.listitem(0))
result.append(f.number_list(0))
return ''.join(result)
|
apache-2.0
| 3,464,022,933,834,179,600
| 28.156863
| 67
| 0.599193
| false
| 3.755051
| false
| false
| false
|
zepto/musio-python2
|
examples/musioencode.py
|
1
|
8751
|
#!/usr/bin/env python2
# vim: sw=4:ts=4:sts=4:fdm=indent:fdl=0:
# -*- coding: UTF8 -*-
#
# Test the vorbis encoder.
# Copyright (C) 2013 Josiah Gordon <josiahg@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Test the vorbis encoder.
"""
from __future__ import print_function
def main(args):
""" Encode args['filename'] times.
"""
from os.path import basename as os_basename
from os.path import isfile as os_isfile
from os.path import splitext as os_splitext
from sys import stdin as sys_stdin
from sys import stdout as sys_stdout
from select import select
from time import sleep as time_sleep
from termios import tcgetattr, tcsetattr, ECHO, ICANON, TCSANOW
from termios import VMIN, VTIME
from musio import open_file, open_device
if args['debug']:
from musio import io_util
io_util.DEBUG = True
filename = args['filename']
output = os_splitext(os_basename(filename))[0] + '.' + args['filetype']
output_bytes = output.encode('utf-8', 'surrogateescape')
output_printable = output_bytes.decode('utf-8', 'ignore')
if os_isfile(output):
if raw_input("Overwrite %s (y/n): " % output_printable).lower().startswith('n'):
return
# Save the current terminal state.
normal = tcgetattr(sys_stdin)
quiet = tcgetattr(sys_stdin)
# Do not wait for key press and don't echo.
quiet[3] &= ~(ECHO | ICANON)
quiet[6][VMIN] = 0
quiet[6][VTIME] = 0
# Set the new terminal state.
tcsetattr(sys_stdin, TCSANOW, quiet)
# Value returned to tell the calling function whether to quit or
# not.
quit_val = True
if args['filetype'].lower() == 'ogg':
quality = args['quality'] / 10 if args['quality'] in range(-1, 11) else 0.5
elif args['filetype'].lower() == 'mp3':
quality = args['quality'] if args['quality'] in range(0, 10) else 2
try:
with open_file(**args) as in_file:
in_file_title = in_file._info_dict.get('title',
in_file._info_dict['name'])
comment_dict = {'title': in_file_title}
comment_dict.update(in_file._info_dict)
for i in ['title', 'artist', 'album', 'year', 'comment',
'track', 'genre']:
if args.get(i, ''):
comment_dict[i] = args[i]
with open_file(output, 'w', depth=in_file.depth, rate=in_file.rate,
channels=in_file.channels, quality=quality,
comment_dict=comment_dict) as out_file:
in_file.loops = 0
if args['show_position']:
filename_bytes = filename.encode('utf-8', 'surrogateescape')
filename_printable = filename_bytes.decode('utf-8', 'ignore')
print("Encoding: %s to %s" % (filename, output))
print(in_file)
for data in in_file:
if args['show_position']:
if in_file.length > 0:
# Calculate the percentage played.
pos = (in_file.position * 100) / float(in_file.length)
# Make the string.
pos_str = 'Position: %.2f%%' % pos
# Find the length of the string.
format_len = len(pos_str) + 2
# Print the string and after erasing the old
# one using ansi escapes.
print('\033[%dD\033[K%s' % (format_len, pos_str),
end='')
sys_stdout.flush()
out_file.write(data)
# Check for input.
r, _, _ = select([sys_stdin], [], [], 0)
# Get input if there was any otherwise continue.
if r:
command = r[0].readline().lower()
# Handle input commands.
if command.startswith('q'):
quit_val = False
break
elif command == '\n':
break
except Exception as err:
print("Error: %s" % err)
raise(err)
finally:
# Re-set the terminal state.
tcsetattr(sys_stdin, TCSANOW, normal)
if args['show_position']:
print("\nDone.")
return quit_val
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser(description="Musio encoder")
parser.add_argument('-e', '--quality', action='store', default=-10, type=int,
help='Encoding quality (1-10)', dest='quality')
parser.add_argument('-t', '--track', action='store', default=0, type=int,
help='Track to play', dest='track')
parser.add_argument('-tt', '--title', action='store', default='',
help='id3 Title tag', dest='title')
parser.add_argument('-ta', '--artist', action='store', default='',
help='id3 Artist tag', dest='artist')
parser.add_argument('-tl', '--album', action='store', default='',
help='id3 Album tag', dest='album')
parser.add_argument('-ty', '--year', action='store', default='',
help='id3 Year tag', dest='year')
parser.add_argument('-tc', '--comment', action='store', default='',
help='id3 Comment tag', dest='comment')
parser.add_argument('-tr', '--id3track', action='store', default='',
help='id3 Track tag', dest='track')
parser.add_argument('-tg', '--genre', action='store', default=0,
type=int, help='id3 Genre tag', dest='genre')
parser.add_argument('-p', '--path', action='store', default=[],
type=lambda a: a.split(','), help='Codec path',
dest='mod_path')
parser.add_argument('-b', '--blacklist', action='store', default=[],
type=lambda a: a.split(','), help='Blacklist a Codec',
dest='blacklist')
parser.add_argument('-s', '--soundfont', action='store',
default='/usr/share/soundfonts/fluidr3/FluidR3GM.SF2',
help='Soundfont to use when playing midis',
dest='soundfont')
parser.add_argument('-f', '--filetype', action='store',
default='ogg',
help='The output format',
dest='filetype')
parser.add_argument('-q', '--quiet', action='store_false', default=True,
help='Don\'t show playback percentage.',
dest='show_position')
parser.add_argument('-lg', '--list-genres', action='store_true',
default=False,
help='Print a list of valid genres and exit.',
dest='list_genres')
parser.add_argument('-d', '--debug', action='store_true', default=False,
help='Enable debug error messages.',
dest='debug')
parser.add_argument('-i', '--input', dest='input_filename', nargs='+')
args = parser.parse_args()
if args.list_genres:
# Print out valid genres.
from musio.mp3_file import get_genre_list
print("ID\tGenre")
for genre_id, genre in enumerate(get_genre_list()):
if genre:
print("%s\t%s" % (genre_id, genre))
elif args.input_filename:
# Copy the args dict to use later
args_dict = args.__dict__
# Pop the filenames list out of the args dict.
filenames = args_dict.pop('input_filename')
# Loop over all the filenames playing each one.
for filename in filenames:
# Pass only one filename to the main function.
args_dict['filename'] = filename
if not main(args_dict):
break
|
gpl-3.0
| 5,520,849,006,102,449,000
| 40.278302
| 88
| 0.531368
| false
| 4.225495
| false
| false
| false
|
locke105/mc-watchdog
|
watchdog.py
|
1
|
3354
|
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Mathew Odden <locke105@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import shlex
import socket
import subprocess
import time
import mc_info
logging.basicConfig(level=logging.DEBUG)
LOG = logging.getLogger(__name__)
SERVER_CMD = 'java -Xms512M -Xmx1G -jar ftbserver.jar'
# interval in seconds between server status checks
POLL_INTERVAL = 30
class Service(object):
def __init__(self, start_cmd):
self.start_cmd = start_cmd
self.process = None
def run(self):
"""Begin main monitoring loop of service.
Starts the service if not already running.
"""
try:
while True:
if not self.check_server():
if not self._process_dead():
LOG.warning("Server dead but process still around. "
"Attempting to kill process...")
self.stop()
LOG.warning("Server process dead. Restarting...")
self.start()
# wait awhile for next poll
time.sleep(POLL_INTERVAL)
except:
# catch keyboard interrupt
self.stop()
def start(self):
args = shlex.split(self.start_cmd)
LOG.info("Starting service with command: %s" %
' '.join(args))
self.process = subprocess.Popen(args)
def _process_dead(self):
if self.process is None:
return True
self.process.poll()
if self.process.returncode is not None:
return True
return False
def stop(self):
"""Stop the underlying service process."""
# no process running
if self.process is None:
return
self.process.poll()
if self.process.returncode is not None:
return self.process.returncode
# send first stop signal
LOG.warning("Sending SIGTERM...")
self.process.terminate()
time.sleep(15)
self.process.poll()
if self.process.returncode is not None:
return self.process.returncode
# send kill signal and wait
LOG.warning("Process still running. Sending SIGKILL...")
self.process.kill()
self.process.wait()
return self.process.returncode
def check_server(self):
try:
sinfo = mc_info.get_info(host='localhost', port=35565)
except socket.error:
LOG.warning("Couldn't get server info!")
return False
LOG.debug("Server info: %s" % sinfo)
return True
if __name__ == '__main__':
LOG = logging.getLogger('watchdog')
server = Service(SERVER_CMD)
server.run()
|
apache-2.0
| -536,500,030,523,389,630
| 27.666667
| 77
| 0.596899
| false
| 4.34456
| false
| false
| false
|
lino-framework/tera
|
lino_tera/lib/invoicing/models.py
|
1
|
1528
|
# -*- coding: UTF-8 -*-
# Copyright 2016-2019 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
"""The :xfile:`models.py` module for :mod:`lino_voga.lib.invoicing`.
"""
from __future__ import unicode_literals
from lino_xl.lib.invoicing.models import *
from lino.api import _
class Plan(Plan):
"""An extended invoicing plan.
.. attribute:: course
If this field is nonempty, select only enrolments of that
given course.
"""
class Meta(Plan.Meta):
app_label = 'invoicing'
abstract = dd.is_abstract_model(__name__, 'Plan')
course = dd.ForeignKey('courses.Course', blank=True, null=True)
Plans.detail_layout = """user area today min_date max_date
partner course
invoicing.ItemsByPlan
"""
# from lino.modlib.users.mixins import StartPlan
from lino_xl.lib.invoicing.actions import StartInvoicing
class StartInvoicingForCourse(StartInvoicing):
"""Start an invoicing plan for this course.
This is installed onto the :class:`courses.Course
<lino_voga.lib.courses.models.Course>` model as `start_invoicing`.
"""
show_in_bbar = True
select_rows = True
def get_options(self, ar):
course = ar.selected_rows[0]
assert isinstance(course, rt.models.courses.Course)
return dict(course=course, partner=None)
@dd.receiver(dd.pre_analyze)
def install_start_action(sender=None, **kwargs):
rt.models.courses.Course.start_invoicing = StartInvoicingForCourse()
|
bsd-2-clause
| -857,604,811,259,924,500
| 23.645161
| 78
| 0.684555
| false
| 3.395556
| false
| false
| false
|
NORDUnet/opennsa
|
opennsa/config.py
|
1
|
11031
|
"""
Configuration reader and defaults.
Author: Henrik Thostrup Jensen <htj@nordu.net>
Copyright: NORDUnet (2011)
"""
import os
import configparser
from opennsa import constants as cnt
# defaults
DEFAULT_CONFIG_FILE = '/etc/opennsa.conf'
DEFAULT_LOG_FILE = '/var/log/opennsa.log'
DEFAULT_TLS = 'true'
DEFAULT_TOPOLOGY_FILE = '/usr/local/share/nsi/topology.owl'
DEFAULT_TCP_PORT = 9080
DEFAULT_TLS_PORT = 9443
DEFAULT_VERIFY = True
DEFAULT_CERTIFICATE_DIR = '/etc/ssl/certs' # This will work on most mordern linux distros
# config blocks and options
BLOCK_SERVICE = 'service'
BLOCK_DUD = 'dud'
BLOCK_JUNIPER_EX = 'juniperex'
BLOCK_JUNIPER_VPLS = 'junipervpls'
BLOCK_FORCE10 = 'force10'
BLOCK_BROCADE = 'brocade'
BLOCK_NCSVPN = 'ncsvpn'
BLOCK_PICA8OVS = 'pica8ovs'
BLOCK_JUNOSMX = 'junosmx'
BLOCK_JUNOSEX = 'junosex'
BLOCK_JUNOSSPACE = 'junosspace'
BLOCK_OESS = 'oess'
BLOCK_CUSTOM_BACKEND = 'custombackend'
# service block
DOMAIN = 'domain' # mandatory
NETWORK_NAME = 'network' # legacy, used to be mandatory
LOG_FILE = 'logfile'
HOST = 'host'
PORT = 'port'
TLS = 'tls'
REST = 'rest'
NRM_MAP_FILE = 'nrmmap'
PEERS = 'peers'
POLICY = 'policy'
PLUGIN = 'plugin'
SERVICE_ID_START = 'serviceid_start'
# database
DATABASE = 'database' # mandatory
DATABASE_USER = 'dbuser' # mandatory
DATABASE_PASSWORD = 'dbpassword' # can be none (os auth)
DATABASE_HOST = 'dbhost' # can be none (local db)
# tls
KEY = 'key' # mandatory, if tls is set
CERTIFICATE = 'certificate' # mandatory, if tls is set
CERTIFICATE_DIR = 'certdir' # mandatory (but dir can be empty)
VERIFY_CERT = 'verify'
ALLOWED_HOSTS = 'allowedhosts' # comma seperated list
# generic stuff
_SSH_HOST = 'host'
_SSH_PORT = 'port'
_SSH_HOST_FINGERPRINT = 'fingerprint'
_SSH_USER = 'user'
_SSH_PASSWORD = 'password'
_SSH_PUBLIC_KEY = 'publickey'
_SSH_PRIVATE_KEY = 'privatekey'
AS_NUMBER = 'asnumber'
# TODO: Don't do backend specifics for everything, it causes confusion, and doesn't really solve anything
# juniper block - same for mx / ex backends
JUNIPER_HOST = _SSH_HOST
JUNIPER_PORT = _SSH_PORT
JUNIPER_HOST_FINGERPRINT = _SSH_HOST_FINGERPRINT
JUNIPER_USER = _SSH_USER
JUNIPER_SSH_PUBLIC_KEY = _SSH_PUBLIC_KEY
JUNIPER_SSH_PRIVATE_KEY = _SSH_PRIVATE_KEY
# force10 block
FORCE10_HOST = _SSH_HOST
FORCE10_PORT = _SSH_PORT
FORCE10_USER = _SSH_USER
FORCE10_PASSWORD = _SSH_PASSWORD
FORCE10_HOST_FINGERPRINT = _SSH_HOST_FINGERPRINT
FORCE10_SSH_PUBLIC_KEY = _SSH_PUBLIC_KEY
FORCE10_SSH_PRIVATE_KEY = _SSH_PRIVATE_KEY
# Brocade block
BROCADE_HOST = _SSH_HOST
BROCADE_PORT = _SSH_PORT
BROCADE_HOST_FINGERPRINT = _SSH_HOST_FINGERPRINT
BROCADE_USER = _SSH_USER
BROCADE_SSH_PUBLIC_KEY = _SSH_PUBLIC_KEY
BROCADE_SSH_PRIVATE_KEY = _SSH_PRIVATE_KEY
BROCADE_ENABLE_PASSWORD = 'enablepassword'
# Pica8 OVS
PICA8OVS_HOST = _SSH_HOST
PICA8OVS_PORT = _SSH_PORT
PICA8OVS_HOST_FINGERPRINT = _SSH_HOST_FINGERPRINT
PICA8OVS_USER = _SSH_USER
PICA8OVS_SSH_PUBLIC_KEY = _SSH_PUBLIC_KEY
PICA8OVS_SSH_PRIVATE_KEY = _SSH_PRIVATE_KEY
PICA8OVS_DB_IP = 'dbip'
# NCS VPN Backend
NCS_SERVICES_URL = 'url'
NCS_USER = 'user'
NCS_PASSWORD = 'password'
# JUNOS block
JUNOS_HOST = _SSH_HOST
JUNOS_PORT = _SSH_PORT
JUNOS_HOST_FINGERPRINT = _SSH_HOST_FINGERPRINT
JUNOS_USER = _SSH_USER
JUNOS_SSH_PUBLIC_KEY = _SSH_PUBLIC_KEY
JUNOS_SSH_PRIVATE_KEY = _SSH_PRIVATE_KEY
JUNOS_ROUTERS = 'routers'
#Junosspace backend
SPACE_USER = 'space_user'
SPACE_PASSWORD = 'space_password'
SPACE_API_URL = 'space_api_url'
SPACE_ROUTERS = 'routers'
SPACE_CONFIGLET_ACTIVATE_LOCAL = 'configlet_activate_local'
SPACE_CONFIGLET_ACTIVATE_REMOTE = 'configlet_activate_remote'
SPACE_CONFIGLET_DEACTIVATE_LOCAL = 'configlet_deactivate_local'
SPACE_CONFIGLET_DEACTIVATE_REMOTE = 'configlet_deactivate_remote'
# OESS
OESS_URL = 'url'
OESS_USER = 'username'
OESS_PASSWORD = 'password'
OESS_WORKGROUP = 'workgroup'
class ConfigurationError(Exception):
"""
Raised in case of invalid/inconsistent configuration.
"""
class Peer(object):
def __init__(self, url, cost):
self.url = url
self.cost = cost
def readConfig(filename):
cfg = configparser.SafeConfigParser()
cfg.add_section(BLOCK_SERVICE)
cfg.read( [ filename ] )
return cfg
def readVerifyConfig(cfg):
"""
Read a config and verify that things are correct. Will also fill in
default values where applicable.
This is supposed to be used during application creation (before service
start) to ensure that simple configuration errors do not pop up efter
daemonization.
Returns a "verified" config, which is a dictionary.
"""
vc = {}
# Check for deprecated / old invalid stuff
try:
cfg.get(BLOCK_SERVICE, NRM_MAP_FILE)
raise ConfigurationError('NRM Map file should be specified under backend')
except configparser.NoOptionError:
pass
# check / extract
try:
vc[DOMAIN] = cfg.get(BLOCK_SERVICE, DOMAIN)
except configparser.NoOptionError:
raise ConfigurationError('No domain name specified in configuration file (mandatory, see docs/migration)')
try:
cfg.get(BLOCK_SERVICE, NETWORK_NAME)
raise ConfigurationError('Network name no longer used, use domain (see docs/migration)')
except configparser.NoOptionError:
pass
try:
vc[LOG_FILE] = cfg.get(BLOCK_SERVICE, LOG_FILE)
except configparser.NoOptionError:
vc[LOG_FILE] = DEFAULT_LOG_FILE
try:
nrm_map_file = cfg.get(BLOCK_SERVICE, NRM_MAP_FILE)
if not os.path.exists(nrm_map_file):
raise ConfigurationError('Specified NRM mapping file does not exist (%s)' % nrm_map_file)
vc[NRM_MAP_FILE] = nrm_map_file
except configparser.NoOptionError:
vc[NRM_MAP_FILE] = None
try:
vc[REST] = cfg.getboolean(BLOCK_SERVICE, REST)
except configparser.NoOptionError:
vc[REST] = False
try:
peers_raw = cfg.get(BLOCK_SERVICE, PEERS)
vc[PEERS] = [ Peer(purl.strip(), 1) for purl in peers_raw.split('\n') ]
except configparser.NoOptionError:
vc[PEERS] = None
try:
vc[HOST] = cfg.get(BLOCK_SERVICE, HOST)
except configparser.NoOptionError:
vc[HOST] = None
try:
vc[TLS] = cfg.getboolean(BLOCK_SERVICE, TLS)
except configparser.NoOptionError:
vc[TLS] = DEFAULT_TLS
try:
vc[PORT] = cfg.getint(BLOCK_SERVICE, PORT)
except configparser.NoOptionError:
vc[PORT] = DEFAULT_TLS_PORT if vc[TLS] else DEFAULT_TCP_PORT
try:
policies = cfg.get(BLOCK_SERVICE, POLICY).split(',')
for policy in policies:
if not policy in (cnt.REQUIRE_USER, cnt.REQUIRE_TRACE, cnt.AGGREGATOR, cnt.ALLOW_HAIRPIN):
raise ConfigurationError('Invalid policy: %s' % policy)
vc[POLICY] = policies
except configparser.NoOptionError:
vc[POLICY] = []
try:
vc[PLUGIN] = cfg.get(BLOCK_SERVICE, PLUGIN)
except configparser.NoOptionError:
vc[PLUGIN] = None
# database
try:
vc[DATABASE] = cfg.get(BLOCK_SERVICE, DATABASE)
except configparser.NoOptionError:
raise ConfigurationError('No database specified in configuration file (mandatory)')
try:
vc[DATABASE_USER] = cfg.get(BLOCK_SERVICE, DATABASE_USER)
except configparser.NoOptionError:
raise ConfigurationError('No database user specified in configuration file (mandatory)')
try:
vc[DATABASE_PASSWORD] = cfg.get(BLOCK_SERVICE, DATABASE_PASSWORD)
except configparser.NoOptionError:
vc[DATABASE_PASSWORD] = None
try:
vc[DATABASE_HOST] = cfg.get(BLOCK_SERVICE, DATABASE_HOST)
except configparser.NoOptionError:
vc[DATABASE_HOST] = None
try:
vc[SERVICE_ID_START] = cfg.get(BLOCK_SERVICE, SERVICE_ID_START)
except configparser.NoOptionError:
vc[SERVICE_ID_START] = None
# we always extract certdir and verify as we need that for performing https requests
try:
certdir = cfg.get(BLOCK_SERVICE, CERTIFICATE_DIR)
if not os.path.exists(certdir):
raise ConfigurationError('Specified certdir does not exist (%s)' % certdir)
vc[CERTIFICATE_DIR] = certdir
except configparser.NoOptionError:
vc[CERTIFICATE_DIR] = DEFAULT_CERTIFICATE_DIR
try:
vc[VERIFY_CERT] = cfg.getboolean(BLOCK_SERVICE, VERIFY_CERT)
except configparser.NoOptionError:
vc[VERIFY_CERT] = DEFAULT_VERIFY
# tls
if vc[TLS]:
try:
hostkey = cfg.get(BLOCK_SERVICE, KEY)
hostcert = cfg.get(BLOCK_SERVICE, CERTIFICATE)
if not os.path.exists(hostkey):
raise ConfigurationError('Specified hostkey does not exist (%s)' % hostkey)
if not os.path.exists(hostcert):
raise ConfigurationError('Specified hostcert does not exist (%s)' % hostcert)
vc[KEY] = hostkey
vc[CERTIFICATE] = hostcert
try:
allowed_hosts_cfg = cfg.get(BLOCK_SERVICE, ALLOWED_HOSTS)
vc[ALLOWED_HOSTS] = allowed_hosts_cfg.split(',')
except:
pass
except configparser.NoOptionError as e:
# Not enough options for configuring tls context
raise ConfigurationError('Missing TLS option: %s' % str(e))
# backends
backends = {}
for section in cfg.sections():
if section == 'service':
continue
if ':' in section:
backend_type, name = section.split(':',2)
else:
backend_type = section
name = ''
if name in backends:
raise ConfigurationError('Can only have one backend named "%s"' % name)
if backend_type in (BLOCK_DUD, BLOCK_JUNIPER_EX, BLOCK_JUNIPER_VPLS, BLOCK_JUNOSMX, BLOCK_FORCE10, BLOCK_BROCADE,
BLOCK_NCSVPN, BLOCK_PICA8OVS, BLOCK_OESS, BLOCK_JUNOSSPACE, BLOCK_JUNOSEX,
BLOCK_CUSTOM_BACKEND, 'asyncfail'):
backend_conf = dict( cfg.items(section) )
backend_conf['_backend_type'] = backend_type
backends[name] = backend_conf
vc['backend'] = backends
return vc
|
bsd-3-clause
| -2,675,240,323,979,953,000
| 30.42735
| 121
| 0.619164
| false
| 3.516417
| true
| false
| false
|
NoBodyCam/TftpPxeBootBareMetal
|
nova/network/api.py
|
1
|
16995
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import inspect
from nova.db import base
from nova import flags
from nova.network import model as network_model
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
def refresh_cache(f):
"""
Decorator to update the instance_info_cache
Requires context and instance as function args
"""
argspec = inspect.getargspec(f)
@functools.wraps(f)
def wrapper(self, context, *args, **kwargs):
res = f(self, context, *args, **kwargs)
try:
# get the instance from arguments (or raise ValueError)
instance = kwargs.get('instance')
if not instance:
instance = args[argspec.args.index('instance') - 2]
except ValueError:
msg = _('instance is a required argument to use @refresh_cache')
raise Exception(msg)
# get nw_info from return if possible, otherwise call for it
nw_info = res if isinstance(res, network_model.NetworkInfo) else None
update_instance_cache_with_nw_info(self, context, instance, nw_info,
*args, **kwargs)
# return the original function's return value
return res
return wrapper
def update_instance_cache_with_nw_info(api, context, instance,
nw_info=None,
*args,
**kwargs):
try:
nw_info = nw_info or api._get_instance_nw_info(context, instance)
# update cache
cache = {'network_info': nw_info.json()}
api.db.instance_info_cache_update(context, instance['uuid'], cache)
except Exception as e:
LOG.exception('Failed storing info cache', instance=instance)
LOG.debug(_('args: %s') % (args or {}))
LOG.debug(_('kwargs: %s') % (kwargs or {}))
class API(base.Base):
"""API for interacting with the network manager."""
def get_all(self, context):
return rpc.call(context,
FLAGS.network_topic,
{'method': 'get_all_networks'})
def get(self, context, network_uuid):
return rpc.call(context,
FLAGS.network_topic,
{'method': 'get_network',
'args': {'network_uuid': network_uuid}})
def create(self, context, **kwargs):
return rpc.call(context,
FLAGS.network_topic,
{'method': 'create_networks',
'args': kwargs})
def delete(self, context, network_uuid):
return rpc.call(context,
FLAGS.network_topic,
{'method': 'delete_network',
'args': {'fixed_range': None,
'uuid': network_uuid}})
def disassociate(self, context, network_uuid):
return rpc.call(context,
FLAGS.network_topic,
{'method': 'disassociate_network',
'args': {'network_uuid': network_uuid}})
def get_fixed_ip(self, context, id):
return rpc.call(context,
FLAGS.network_topic,
{'method': 'get_fixed_ip',
'args': {'id': id}})
def get_fixed_ip_by_address(self, context, address):
return rpc.call(context,
FLAGS.network_topic,
{'method': 'get_fixed_ip_by_address',
'args': {'address': address}})
def get_floating_ip(self, context, id):
return rpc.call(context,
FLAGS.network_topic,
{'method': 'get_floating_ip',
'args': {'id': id}})
def get_floating_ip_pools(self, context):
return rpc.call(context,
FLAGS.network_topic,
{'method': 'get_floating_pools'})
def get_floating_ip_by_address(self, context, address):
return rpc.call(context,
FLAGS.network_topic,
{'method': 'get_floating_ip_by_address',
'args': {'address': address}})
def get_floating_ips_by_project(self, context):
return rpc.call(context,
FLAGS.network_topic,
{'method': 'get_floating_ips_by_project'})
def get_floating_ips_by_fixed_address(self, context, fixed_address):
return rpc.call(context,
FLAGS.network_topic,
{'method': 'get_floating_ips_by_fixed_address',
'args': {'fixed_address': fixed_address}})
def get_instance_id_by_floating_address(self, context, address):
# NOTE(tr3buchet): i hate this
return rpc.call(context,
FLAGS.network_topic,
{'method': 'get_instance_id_by_floating_address',
'args': {'address': address}})
def get_vifs_by_instance(self, context, instance):
# NOTE(vish): When the db calls are converted to store network
# data by instance_uuid, this should pass uuid instead.
return rpc.call(context,
FLAGS.network_topic,
{'method': 'get_vifs_by_instance',
'args': {'instance_id': instance['id']}})
def get_vif_by_mac_address(self, context, mac_address):
return rpc.call(context,
FLAGS.network_topic,
{'method': 'get_vif_by_mac_address',
'args': {'mac_address': mac_address}})
def allocate_floating_ip(self, context, pool=None):
"""Adds a floating ip to a project from a pool. (allocates)"""
# NOTE(vish): We don't know which network host should get the ip
# when we allocate, so just send it to any one. This
# will probably need to move into a network supervisor
# at some point.
return rpc.call(context,
FLAGS.network_topic,
{'method': 'allocate_floating_ip',
'args': {'project_id': context.project_id,
'pool': pool}})
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Removes floating ip with address from a project. (deallocates)"""
rpc.call(context,
FLAGS.network_topic,
{'method': 'deallocate_floating_ip',
'args': {'address': address,
'affect_auto_assigned': affect_auto_assigned}})
@refresh_cache
def associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
"""Associates a floating ip with a fixed ip.
ensures floating ip is allocated to the project in context
"""
orig_instance_uuid = rpc.call(context,
FLAGS.network_topic,
{'method': 'associate_floating_ip',
'args': {'floating_address': floating_address,
'fixed_address': fixed_address,
'affect_auto_assigned': affect_auto_assigned}})
if orig_instance_uuid:
msg_dict = dict(address=floating_address,
instance_id=orig_instance_uuid)
LOG.info(_('re-assign floating IP %(address)s from '
'instance %(instance_id)s') % msg_dict)
orig_instance = self.db.instance_get_by_uuid(context,
orig_instance_uuid)
# purge cached nw info for the original instance
update_instance_cache_with_nw_info(self, context, orig_instance)
@refresh_cache
def disassociate_floating_ip(self, context, instance, address,
affect_auto_assigned=False):
"""Disassociates a floating ip from fixed ip it is associated with."""
rpc.call(context,
FLAGS.network_topic,
{'method': 'disassociate_floating_ip',
'args': {'address': address}})
@refresh_cache
def allocate_for_instance(self, context, instance, **kwargs):
"""Allocates all network structures for an instance.
:returns: network info as from get_instance_nw_info() below
"""
args = kwargs
args['instance_id'] = instance['id']
args['instance_uuid'] = instance['uuid']
args['project_id'] = instance['project_id']
args['host'] = instance['host']
args['rxtx_factor'] = instance['instance_type']['rxtx_factor']
nw_info = rpc.call(context, FLAGS.network_topic,
{'method': 'allocate_for_instance',
'args': args})
return network_model.NetworkInfo.hydrate(nw_info)
def deallocate_for_instance(self, context, instance, **kwargs):
"""Deallocates all network structures related to instance."""
args = kwargs
args['instance_id'] = instance['id']
args['project_id'] = instance['project_id']
args['host'] = instance['host']
rpc.call(context, FLAGS.network_topic,
{'method': 'deallocate_for_instance',
'args': args})
def add_fixed_ip_to_instance(self, context, instance, network_id):
"""Adds a fixed ip to instance from specified network."""
args = {'instance_id': instance['id'],
'host': instance['host'],
'network_id': network_id}
rpc.call(context, FLAGS.network_topic,
{'method': 'add_fixed_ip_to_instance',
'args': args})
def remove_fixed_ip_from_instance(self, context, instance, address):
"""Removes a fixed ip from instance from specified network."""
args = {'instance_id': instance['id'],
'host': instance['host'],
'address': address}
rpc.call(context, FLAGS.network_topic,
{'method': 'remove_fixed_ip_from_instance',
'args': args})
def add_network_to_project(self, context, project_id):
"""Force adds another network to a project."""
rpc.call(context, FLAGS.network_topic,
{'method': 'add_network_to_project',
'args': {'project_id': project_id}})
@refresh_cache
def get_instance_nw_info(self, context, instance):
"""Returns all network info related to an instance."""
return self._get_instance_nw_info(context, instance)
def _get_instance_nw_info(self, context, instance):
"""Returns all network info related to an instance."""
args = {'instance_id': instance['id'],
'instance_uuid': instance['uuid'],
'rxtx_factor': instance['instance_type']['rxtx_factor'],
'host': instance['host'],
'project_id': instance['project_id']}
nw_info = rpc.call(context, FLAGS.network_topic,
{'method': 'get_instance_nw_info',
'args': args})
return network_model.NetworkInfo.hydrate(nw_info)
def validate_networks(self, context, requested_networks):
"""validate the networks passed at the time of creating
the server
"""
args = {'networks': requested_networks}
return rpc.call(context, FLAGS.network_topic,
{'method': 'validate_networks',
'args': args})
def get_instance_uuids_by_ip_filter(self, context, filters):
"""Returns a list of dicts in the form of
{'instance_uuid': uuid, 'ip': ip} that matched the ip_filter
"""
args = {'filters': filters}
return rpc.call(context, FLAGS.network_topic,
{'method': 'get_instance_uuids_by_ip_filter',
'args': args})
def get_dns_domains(self, context):
"""Returns a list of available dns domains.
These can be used to create DNS entries for floating ips.
"""
return rpc.call(context,
FLAGS.network_topic,
{'method': 'get_dns_domains'})
def add_dns_entry(self, context, address, name, dns_type, domain):
"""Create specified DNS entry for address"""
args = {'address': address,
'name': name,
'dns_type': dns_type,
'domain': domain}
return rpc.call(context, FLAGS.network_topic,
{'method': 'add_dns_entry',
'args': args})
def modify_dns_entry(self, context, name, address, domain):
"""Create specified DNS entry for address"""
args = {'address': address,
'name': name,
'domain': domain}
return rpc.call(context, FLAGS.network_topic,
{'method': 'modify_dns_entry',
'args': args})
def delete_dns_entry(self, context, name, domain):
"""Delete the specified dns entry."""
args = {'name': name, 'domain': domain}
return rpc.call(context, FLAGS.network_topic,
{'method': 'delete_dns_entry',
'args': args})
def delete_dns_domain(self, context, domain):
"""Delete the specified dns domain."""
args = {'domain': domain}
return rpc.call(context, FLAGS.network_topic,
{'method': 'delete_dns_domain',
'args': args})
def get_dns_entries_by_address(self, context, address, domain):
"""Get entries for address and domain"""
args = {'address': address, 'domain': domain}
return rpc.call(context, FLAGS.network_topic,
{'method': 'get_dns_entries_by_address',
'args': args})
def get_dns_entries_by_name(self, context, name, domain):
"""Get entries for name and domain"""
args = {'name': name, 'domain': domain}
return rpc.call(context, FLAGS.network_topic,
{'method': 'get_dns_entries_by_name',
'args': args})
def create_private_dns_domain(self, context, domain, availability_zone):
"""Create a private DNS domain with nova availability zone."""
args = {'domain': domain, 'av_zone': availability_zone}
return rpc.call(context, FLAGS.network_topic,
{'method': 'create_private_dns_domain',
'args': args})
def create_public_dns_domain(self, context, domain, project=None):
"""Create a private DNS domain with optional nova project."""
args = {'domain': domain, 'project': project}
return rpc.call(context, FLAGS.network_topic,
{'method': 'create_public_dns_domain',
'args': args})
def setup_networks_on_host(self, context, instance, host=None,
teardown=False):
"""Setup or teardown the network structures on hosts related to
instance"""
host = host or instance['host']
# NOTE(tr3buchet): host is passed in cases where we need to setup
# or teardown the networks on a host which has been migrated to/from
# and instance['host'] is not yet or is no longer equal to
args = {'instance_id': instance['id'],
'host': host,
'teardown': teardown}
# NOTE(tr3buchet): the call is just to wait for completion
rpc.call(context, FLAGS.network_topic,
{'method': 'setup_networks_on_host',
'args': args})
|
apache-2.0
| 8,339,819,754,926,506,000
| 40.756757
| 78
| 0.540983
| false
| 4.474724
| false
| false
| false
|
electrumalt/electrum-ixc
|
lib/blockchain.py
|
1
|
20235
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@ecdsa.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import threading, time, Queue, os, sys, shutil, traceback, json, auxpow
import zlib
from util import user_dir, appdata_dir, print_error
from bitcoin import *
from transaction import BCDataStream
import pprint
pp = pprint.PrettyPrinter(indent=4)
max_target = 0x00000000FFFF0000000000000000000000000000000000000000000000000000
class Blockchain(threading.Thread):
def __init__(self, config, network):
threading.Thread.__init__(self)
self.daemon = True
self.config = config
self.network = network
self.lock = threading.Lock()
self.local_height = 0
self.running = False
self.headers_url = 'http://electrum-alt.org/ixcoin/blockchain_headers'
self.set_local_height()
self.queue = Queue.Queue()
def height(self):
return self.local_height
def stop(self):
with self.lock: self.running = False
def is_running(self):
with self.lock: return self.running
def run(self):
self.init_headers_file()
self.set_local_height()
print_error( "blocks:", self.local_height )
with self.lock:
self.running = True
while self.is_running():
try:
result = self.queue.get()
except Queue.Empty:
continue
if not result: continue
i, header = result
if not header: continue
height = header.get('block_height')
if height <= self.local_height:
continue
if height > self.local_height + 50:
if not self.get_and_verify_chunks(i, header, height):
continue
if height > self.local_height:
# get missing parts from interface (until it connects to my chain)
chain = self.get_chain( i, header )
# skip that server if the result is not consistent
if not chain:
print_error('e')
continue
# verify the chain
if self.verify_chain( chain ):
print_error("height:", height, i.server)
for header in chain:
self.save_header(header)
else:
print_error("error", i.server)
# todo: dismiss that server
continue
self.network.new_blockchain_height(height, i)
def verify_chain(self, chain):
first_header = chain[0]
prev_header = self.read_header(first_header.get('block_height') -1)
for header in chain:
height = header.get('block_height')
prev_hash = self.hash_header(prev_header)
bits, target = self.get_target(height, chain)
_hash = self.hash_header(header)
pow_hash = _hash
try:
if height >= 45000 and header['version'] == 196865:
assert auxpow.verify(_hash, auxpow.get_our_chain_id(), header['auxpow'])
pow_hash = self.hash_header(header['auxpow']['parent_block'])
assert prev_hash == header.get('prev_block_hash')
assert bits == header.get('bits')
assert int('0x'+pow_hash,16) < target
except Exception:
print traceback.format_exc()
print 'error validating chain at height ', height
print 'block ', height, '(',_hash,') failed validation'
pprint.pprint(header)
return False
prev_header = header
return True
def verify_chunk(self, index, hexdata):
hex_to_int = lambda s: int('0x' + s[::-1].encode('hex'), 16)
data = hexdata.decode('hex')
disk_data = ''
height = index * 2016
num = hex_to_int(data[0:4])
data = data[4:]
auxpowdata = data[num*88:]
auxpowbaseoffset = 0
if index == 0:
previous_hash = ("0"*64)
else:
prev_header = self.read_header(index*2016-1)
if prev_header is None: raise
previous_hash = self.hash_header(prev_header)
bits, target = self.get_target(height)
chain = []
for i in range(num):
height = index * 2016 + i
raw_header = data[i*88:(i+1)*88]
disk_data += raw_header[0:80] # strip auxpow data
header = self.header_from_string(raw_header)
_hash = self.hash_header(header)
_prev_hash = _hash
header['block_height'] = height
if (i == 0):
auxpowbaseoffset = header['auxpow_offset']
start = header['auxpow_offset'] - auxpowbaseoffset
end = start + header['auxpow_length']
if (end > start):
header['auxpow'] = self.auxpow_from_string(auxpowdata[start:end].decode('hex'))
#print header['auxpow']
if height >= 20160 and (height % 144) == 0:
#print height , '%', 144 , '=', height % 144
bits, target = self.get_target(height, chain)
if height >= 45000 and header['version'] == 196865: #TODO getAuxPowVersion()
#todo: check that auxpow.get_chain_id(header) == auxpow.get_our_chain_id?
#print header['auxpow']
try:
assert auxpow.verify(_hash, auxpow.get_our_chain_id(), header['auxpow'])
except Exception as e:
print traceback.format_exc()
print 'block ', height, '(',_hash,') failed validation'
print 'auxpow failed verification'
pp.pprint(header['auxpow'])
raise e
#pp.pprint(header)
#pp.pprint(parent_header)
_hash = self.hash_header(header['auxpow']['parent_block'])
#print _hash
# todo: verify auxpow data
#_hash = '' # auxpow.getHash()
try:
assert previous_hash == header.get('prev_block_hash')
assert bits == header.get('bits')
assert int('0x'+_hash,16) < target
except Exception as e:
print 'block ', height, ' failed validation'
raise e
if height % 144 == 0:
print 'block ', height, ' validated'
chain.append(header)
previous_header = header
previous_hash = _prev_hash
self.save_chunk(index, disk_data)
print_error("validated chunk %d"%height)
#def parent_block_to_header(self, parent_block):
#h = {}
#h['version'] = parent_block['version']
#h['prev_block_hash'] = parent_block['previousblockhash']
#h['merkle_root'] = parent_block['merkleroot']
#h['timestamp'] = parent_block['time']
#h['bits'] = int(parent_block['bits'], 16) #not sure
#h['nonce'] = parent_block['nonce']
#return h
def header_to_string(self, res):
s = int_to_hex(res.get('version'),4) \
+ rev_hex(res.get('prev_block_hash')) \
+ rev_hex(res.get('merkle_root')) \
+ int_to_hex(int(res.get('timestamp')),4) \
+ int_to_hex(int(res.get('bits')),4) \
+ int_to_hex(int(res.get('nonce')),4)
return s
def auxpow_from_string(self, s):
res = {}
res['coinbasetx'], s = tx_from_string(s)
res['coinbaseMerkleBranch'], res['coinbaseIndex'], s = merkle_branch_from_string(s)
res['chainMerkleBranch'], res['chainIndex'], s = merkle_branch_from_string(s)
res['parent_block'] = header_from_string(s)
return res
def header_from_string(self, s):
# hmmm why specify 0x at beginning if 16 is already specified??
hex_to_int = lambda s: int('0x' + s[::-1].encode('hex'), 16)
h = {}
h['version'] = hex_to_int(s[0:4])
h['prev_block_hash'] = hash_encode(s[4:36])
h['merkle_root'] = hash_encode(s[36:68])
h['timestamp'] = hex_to_int(s[68:72])
h['bits'] = hex_to_int(s[72:76])
h['nonce'] = hex_to_int(s[76:80])
if (len(s) > 80):
h['auxpow_offset'] = hex_to_int(s[80:84])
h['auxpow_length'] = hex_to_int(s[84:88])
return h
def hash_header(self, header):
return rev_hex(Hash(self.header_to_string(header).decode('hex')).encode('hex'))
def path(self):
return os.path.join( self.config.path, 'blockchain_headers')
# the file hosted on the server has extra data to index auxpow data
# we need to remove that data to have 80 byte block headers instead of 88
def remove_auxpow_indexes(self, filename):
size = os.path.getsize(filename)
f = open(self.path(), 'wb+')
fa = open(filename, 'rb')
i = 0
j = 0
while (i < size):
fa.seek(i)
f.seek(j)
chunk = fa.read(80)
f.write(chunk)
j += 80
i += 88
f.close()
fa.close()
os.remove(filename)
def init_headers_file(self):
filename = self.path()
if os.path.exists(filename):
return
try:
import urllib, socket
socket.setdefaulttimeout(30)
print_error('downloading ', self.headers_url )
urllib.urlretrieve(self.headers_url, filename + '_auxpow')
self.remove_auxpow_indexes(filename + '_auxpow')
print_error("done.")
except Exception:
print_error( 'download failed. creating file', filename + '_auxpow' )
open(filename,'wb+').close()
def save_chunk(self, index, chunk):
filename = self.path()
f = open(filename,'rb+')
f.seek(index*2016*80)
h = f.write(chunk)
f.close()
self.set_local_height()
def save_header(self, header):
data = self.header_to_string(header).decode('hex')
assert len(data) == 80
height = header.get('block_height')
filename = self.path()
f = open(filename,'rb+')
f.seek(height*80)
h = f.write(data)
f.close()
self.set_local_height()
def set_local_height(self):
name = self.path()
if os.path.exists(name):
h = os.path.getsize(name)/80 - 1
if self.local_height != h:
self.local_height = h
def read_header(self, block_height):
name = self.path()
if os.path.exists(name):
f = open(name,'rb')
f.seek(block_height*80)
h = f.read(80)
f.close()
if len(h) == 80:
h = self.header_from_string(h)
return h
def get_ixcoin_target(self, height, chain=None):
if chain is None:
chain = [] # Do not use mutables as default values!
nTargetTimespan = 24 * 60 * 60 #ixcoin: 144 blocks ever 24 hours
nInterval = 144
blockstogoback = nInterval
if (height >= 43000):
blockstogoback = nInterval + 1
last_height = (height / 144) * 144 - 1
first_height = (height / 144) * 144 - blockstogoback
#print 'new target at... ' , height
#print 'first height: '
#print first_height
#print 'last height: '
#print last_height
first = self.read_header(first_height)
last = self.read_header(last_height)
if first is None:
for h in chain:
if h.get('block_height') == first_height:
first = h
if last is None:
for h in chain:
if h.get('block_height') == last_height:
last = h
nActualTimespan = last.get('timestamp') - first.get('timestamp')
# https://github.com/FrictionlessCoin/iXcoin/blob/master/src/main.cpp#L1240
nTwoPercent = nTargetTimespan / 50
if nActualTimespan < nTargetTimespan:
#print 'smaller actual timespan'
if nActualTimespan < (nTwoPercent * 16):
#print 'a'
nActualTimespan = nTwoPercent * 45
elif nActualTimespan < (nTwoPercent * 32):
#print 'b'
nActualTimespan = nTwoPercent * 47
else:
#print 'c'
nActualTimespan = nTwoPercent * 49
elif nActualTimespan > (nTargetTimespan * 4):
#print 'd'
nActualTimespan = nTargetTimespan * 4
return self.get_target_from_timespans(last.get('bits'), nActualTimespan, nTargetTimespan)
def get_target_from_timespans(self, bits, nActualTimespan, nTargetTimespan):
# convert to bignum
MM = 256*256*256
a = bits%MM
if a < 0x8000:
a *= 256
target = (a) * pow(2, 8 * (bits/MM - 3))
# new target
new_target = min( max_target, (target * nActualTimespan)/nTargetTimespan )
# convert it to bits
c = ("%064X"%new_target)[2:]
i = 31
while c[0:2]=="00":
c = c[2:]
i -= 1
c = int('0x'+c[0:6],16)
if c >= 0x800000:
c /= 256
i += 1
new_bits = c + MM * i
#print 'new bits: ', hex(new_bits)
#print 'new target: ', hex(new_target)
return new_bits, new_target
def get_target(self, height, chain=None):
if chain is None:
chain = [] # Do not use mutables as default values!
# Ixcoin: target changes every 144 blocks after block 20160
# https://github.com/FrictionlessCoin/iXcoin/blob/master/src/main.cpp#L1196
if height >= 20160:
#print height , '%', 144 , '=', height % 144
return self.get_ixcoin_target(height, chain)
index = height / 2016
if index == 0: return 0x1d00ffff, max_target
first = self.read_header((index-1)*2016)
last = self.read_header(index*2016-1)
if last is None:
for h in chain:
if h.get('block_height') == index*2016-1:
last = h
nActualTimespan = last.get('timestamp') - first.get('timestamp')
nTargetTimespan = 14*24*60*60
nActualTimespan = max(nActualTimespan, nTargetTimespan/4)
nActualTimespan = min(nActualTimespan, nTargetTimespan*4)
return self.get_target_from_timespans(last.get('bits'), nActualTimespan, nTargetTimespan)
def request_header(self, i, h, queue):
print_error("requesting header %d from %s"%(h, i.server))
i.send_request({'method':'blockchain.block.get_header', 'params':[h]}, queue)
def retrieve_request(self, queue):
while True:
try:
ir = queue.get(timeout=1)
except Queue.Empty:
print_error('blockchain: request timeout')
continue
i, r = ir
result = r['result']
return result
def get_chain(self, interface, final_header):
header = final_header
chain = [ final_header ]
requested_header = False
queue = Queue.Queue()
while self.is_running():
if requested_header:
header = self.retrieve_request(queue)
if not header: return
chain = [ header ] + chain
requested_header = False
height = header.get('block_height')
previous_header = self.read_header(height -1)
if not previous_header:
self.request_header(interface, height - 1, queue)
requested_header = True
continue
# verify that it connects to my chain
prev_hash = self.hash_header(previous_header)
if prev_hash != header.get('prev_block_hash'):
print_error("reorg")
self.request_header(interface, height - 1, queue)
requested_header = True
continue
else:
# the chain is complete
return chain
def get_and_verify_chunks(self, i, header, height):
queue = Queue.Queue()
min_index = (self.local_height + 1)/2016
max_index = (height + 1)/2016
n = min_index
while n < max_index + 1:
print_error( "Requesting chunk:", n )
# todo: ixcoin get_auxblock_chunk after block 45000...?
# todo: call blockchain.block.get_auxblock from verify_chunk instead?
i.send_request({'method':'blockchain.block.get_chunk', 'params':[n]}, queue)
r = self.retrieve_request(queue)
#print 'chunk compressed length : ', len(r)
r = zlib.decompress(r.decode('hex'))
#print 'chunk uncompressed length : ', len(r)
try:
self.verify_chunk(n, r)
n = n + 1
except Exception:
print traceback.format_exc()
print_error('Verify chunk failed!')
n = n - 1
if n < 0:
return False
return True
# START electrum-ixc-server
# the following code was copied from the server's utils.py file
def tx_from_string(s):
vds = BCDataStream()
vds.write(s)
#vds.write(raw.decode('hex'))
d = {}
d['version'] = vds.read_int32()
n_vin = vds.read_compact_size()
d['vin'] = []
for i in xrange(n_vin):
txin = {}
# dirty hack: add outpoint structure to get correct txid later
outpoint_pos = vds.read_cursor
txin['coinbase'] = vds.read_bytes(vds.read_compact_size()).encode('hex')
txin['sequence'] = vds.read_uint32()
d['vin'].append(txin)
n_vout = vds.read_compact_size()
d['vout'] = []
for i in xrange(n_vout):
txout = {}
txout['value'] = vds.read_int64()
txout['scriptPubKey'] = vds.read_bytes(vds.read_compact_size()).encode('hex')
d['vout'].append(txout)
d['lockTime'] = vds.read_uint32()
# compute txid
# dirty hack to insert coinbase outpoint structure before hashing
raw = s[0:outpoint_pos]
COINBASE_OP = '0' * 64 + 'F' * 8
raw += (COINBASE_OP).decode('hex')
raw += s[outpoint_pos:vds.read_cursor]
d['txid'] = Hash(raw)[::-1].encode('hex')
return d, s[vds.read_cursor:] # +1?
def merkle_branch_from_string(s):
vds = BCDataStream()
vds.write(s)
#vds.write(raw.decode('hex'))
hashes = []
n_hashes = vds.read_compact_size()
for i in xrange(n_hashes):
_hash = vds.read_bytes(32)
hashes.append(hash_encode(_hash))
index = vds.read_int32()
return hashes, index, s[vds.read_cursor:]
def hex_to_int(s):
return int('0x' + s[::-1].encode('hex'), 16)
def header_from_string(s):
#OK ixcoin todo: include auxpow position in auxpow file (offset(s))
res = {
'version': hex_to_int(s[0:4]),
'prev_block_hash': hash_encode(s[4:36]),
'merkle_root': hash_encode(s[36:68]),
'timestamp': hex_to_int(s[68:72]),
'bits': hex_to_int(s[72:76]),
'nonce': hex_to_int(s[76:80]),
}
if (len(s) > 80):
res['auxpow_offset'] = hex_to_int(s[80:84])
res['auxpow_length'] = hex_to_int(s[84:88])
return res
# END electrum-ixc-server
|
gpl-3.0
| 960,714,723,618,996,600
| 31.584541
| 97
| 0.540252
| false
| 3.792877
| false
| false
| false
|
sebgoa/client-python
|
kubernetes/client/models/v1_container_state.py
|
2
|
4601
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ContainerState(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, running=None, terminated=None, waiting=None):
"""
V1ContainerState - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'running': 'V1ContainerStateRunning',
'terminated': 'V1ContainerStateTerminated',
'waiting': 'V1ContainerStateWaiting'
}
self.attribute_map = {
'running': 'running',
'terminated': 'terminated',
'waiting': 'waiting'
}
self._running = running
self._terminated = terminated
self._waiting = waiting
@property
def running(self):
"""
Gets the running of this V1ContainerState.
Details about a running container
:return: The running of this V1ContainerState.
:rtype: V1ContainerStateRunning
"""
return self._running
@running.setter
def running(self, running):
"""
Sets the running of this V1ContainerState.
Details about a running container
:param running: The running of this V1ContainerState.
:type: V1ContainerStateRunning
"""
self._running = running
@property
def terminated(self):
"""
Gets the terminated of this V1ContainerState.
Details about a terminated container
:return: The terminated of this V1ContainerState.
:rtype: V1ContainerStateTerminated
"""
return self._terminated
@terminated.setter
def terminated(self, terminated):
"""
Sets the terminated of this V1ContainerState.
Details about a terminated container
:param terminated: The terminated of this V1ContainerState.
:type: V1ContainerStateTerminated
"""
self._terminated = terminated
@property
def waiting(self):
"""
Gets the waiting of this V1ContainerState.
Details about a waiting container
:return: The waiting of this V1ContainerState.
:rtype: V1ContainerStateWaiting
"""
return self._waiting
@waiting.setter
def waiting(self, waiting):
"""
Sets the waiting of this V1ContainerState.
Details about a waiting container
:param waiting: The waiting of this V1ContainerState.
:type: V1ContainerStateWaiting
"""
self._waiting = waiting
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ContainerState):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
apache-2.0
| -1,741,430,488,379,125,000
| 26.224852
| 105
| 0.562921
| false
| 4.714139
| false
| false
| false
|
manparvesh/manparvesh.github.io
|
oldsitejekyll/markdown_generator/professional.py
|
1
|
3829
|
# coding: utf-8
# # Publications markdown generator for academicpages
#
# Takes a TSV of publications with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook, with the core python code in publications.py. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one that fits your format.
#
# TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style.
#
# ## Data format
#
# The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top.
#
# - `excerpt` and `paper_url` can be blank, but the others must have values.
# - `pub_date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]`
# ## Import pandas
#
# We are using the very handy pandas library for dataframes.
# In[2]:
import pandas as pd
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
professional = pd.read_csv("professional.tsv", sep="\t", header=0)
professional
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page. If you don't want something to appear (like the "Recommended citation")
# In[5]:
import os
SPACE = ' '
STAR = '*'
TAB = SPACE + SPACE
TAB_BULLET = SPACE + STAR + SPACE
ENDL = '\n'
WIP = '*[ WIP ]*'
TODO = '*[TODO]*'
def is_not_NaN(num):
return num == num
def is_not_empty(s):
return is_not_NaN(s) and len(str(s)) > 0
def bold(s):
return STAR + STAR + str(s) + STAR + STAR
def italicize(s):
return STAR + str(s) + STAR
def coursera_icon_link(s):
return '<a href="' + str(s) + '" target="_blank"><i class="ai ai-courser"></i></a>'
def github_icon_link(s):
return '<a href="' + str(s) + '" target="_blank"><i class="fa fa-github" aria-hidden="true"></i> </a>'
def certificate_icon_link(s):
return '<a href="' + str(s) + '" target="_blank"><i class="fa fa-certificate" aria-hidden="true"></i> </a>'
with open("../_pages/professional.md", 'w') as f:
for row, item in professional.iterrows():
md = ''
md += TAB_BULLET
md += str(item.course_name)
md += SPACE
md += "by "
md += '[' + str(item.provider) + '](' + str(item.provider_url) + ')'
md += SPACE
if is_not_empty(item.certificate_link):
md += certificate_icon_link(item.certificate_link)
md += ENDL
f.write(md)
|
mit
| 1,995,675,648,712,673,500
| 34.453704
| 346
| 0.665709
| false
| 3.26428
| false
| false
| false
|
qxf2/qxf2-page-object-model
|
utils/excel_compare.py
|
1
|
2729
|
"""
Qxf2 Services: Utility script to compare two excel files using openxl module
"""
import openpyxl
import os
class Excel_Compare():
def is_equal(self,xl_actual,xl_expected):
"Method to compare the Actual and Expected xl file"
result_flag = True
if not os.path.exists(xl_actual):
result_flag = False
print('Could not locate the excel file: %s'%xl_actual)
if not os.path.exists(xl_expected):
result_flag = False
print('Could not locate the excel file %s'%xl_expected)
if os.path.exists(xl_actual) and os.path.exists(xl_expected):
#Open the xl file and put the content to list
actual_xlfile = openpyxl.load_workbook(xl_actual)
xl_sheet = actual_xlfile.active
actual_file = []
for row in xl_sheet.iter_rows(min_row=1, max_col=xl_sheet.max_column, max_row=xl_sheet.max_row):
for cell in row:
actual_file.append(cell.value)
exp_xlfile = openpyxl.load_workbook(xl_expected)
xl_sheet = exp_xlfile.active
exp_file = []
for row in xl_sheet.iter_rows(min_row=1, max_col=xl_sheet.max_column, max_row=xl_sheet.max_row):
for cell in row:
exp_file.append(cell.value)
#If there is row and column mismatch result_flag = False
if (len(actual_file)!= len(exp_file)):
result_flag = False
print("Mismatch in number of rows or columns. The actual row or column count didn't match with expected row or column count")
else:
for actual_row, actual_col in zip(actual_file,exp_file):
if actual_row == actual_col:
pass
else:
print("Mismatch between actual and expected file at position(each row consists of 23 coordinates):",actual_file.index(actual_row))
print("Data present only in Actual file: %s"%actual_row)
print("Data present only in Expected file: %s"%actual_col)
result_flag = False
return result_flag
#---USAGE EXAMPLES
if __name__=='__main__':
print("Start of %s"%__file__)
# Enter the path details of the xl files here
file1 = 'Add path to the first xl file'
file2 = 'Add path to the second xl file'
#Initialize the excel object
xl_obj = Excel_Compare()
#Sample code to compare excel files
if xl_obj.is_equal(file1,file2) is True:
print("Data matched in both the excel files\n")
else:
print("Data mismatch between the actual and expected excel files")
|
mit
| 4,930,645,045,380,420,000
| 39.746269
| 154
| 0.585562
| false
| 3.983942
| false
| false
| false
|
HackatONG-ProgramAR/cordoba-aprende
|
aulalibre/aulavirtual/models.py
|
1
|
4354
|
# -*- coding: utf-8 -*-
from django.db import models
from educar import get_descripciones_ebooks
# Create your models here.
class Alumno(models.Model):
apellido = models.CharField(max_length=100)
nombre = models.CharField(max_length=100)
colegio = models.ForeignKey('Colegio')
curso = models.ForeignKey('Curso')
email = models.EmailField(blank=True)
class Meta:
verbose_name = ('Alumno')
verbose_name_plural = ('Alumnos')
def __unicode__(self):
return "%s, %s" % (self.apellido, self.nombre)
class Profesor(models.Model):
apellido = models.CharField(max_length=100)
nombre = models.CharField(max_length=100)
colegio = models.ForeignKey('Colegio')
email = models.EmailField(blank=True)
class Meta:
verbose_name = ('Profesor')
verbose_name_plural = ('Profesores')
def __unicode__(self):
return "%s, %s" % (self.apellido, self.nombre)
class Colegio(models.Model):
nombre = models.CharField(max_length=100)
ciudad = models.CharField(max_length=100)
class Meta:
verbose_name = ('Colegio')
verbose_name_plural = ('Colegios')
def __unicode__(self):
return self.nombre
class Curso(models.Model):
colegio = models.ForeignKey('Colegio')
anio = models.IntegerField(verbose_name=u'Año')
division = models.CharField(max_length=100)
class Meta:
verbose_name = ('Curso')
verbose_name_plural = ('Cursos')
def __unicode__(self):
return u'%s "%s" - %s' % (self.anio, self.division, self.colegio)
class CursoMateria(models.Model):
curso = models.ForeignKey('Curso')
profesor = models.ForeignKey('Profesor')
anio_materia = models.ForeignKey('AnioMateria')
class Meta:
verbose_name = ('Curso Materia')
verbose_name_plural = ('Curso Materias')
def __unicode__(self):
return u"%s - %s - %s" % (self.curso, self.anio_materia, self.profesor)
class AnioMateria(models.Model):
materia = models.ForeignKey('Materia')
anio = models.IntegerField(verbose_name=u'Año')
class Meta:
verbose_name = (u'Año Materia')
verbose_name_plural = (u'Año Materias')
def __unicode__(self):
return u"%s - %s" % (self.materia, self.anio)
class Materia(models.Model):
nombre = models.CharField(max_length=100)
area_tematica = models.ForeignKey('AreaTematica')
class Meta:
verbose_name = ('Materia')
verbose_name_plural = ('Materias')
def __unicode__(self):
return self.nombre
class AreaTematica(models.Model):
nombre = models.CharField(max_length=100)
class Meta:
verbose_name = ('Área Temática')
verbose_name_plural = ('Áreas Temáticas')
def __unicode__(self):
return self.nombre
@classmethod
def crear_areas(cls):
areas = ["Matemática", "Lengua", "Ciencias"]
for n in areas:
cls.objects.create(nombre=n)
class Eje(models.Model):
nombre = models.CharField(max_length=100)
anio_materia = models.ForeignKey('AnioMateria')
# contenidos
class Meta:
verbose_name = ('Eje')
verbose_name_plural = ('Ejes')
def __unicode__(self):
return self.nombre
class Recurso(models.Model):
tipo = models.CharField(max_length=100)
nombre = models.CharField(max_length=100)
descripcion = models.TextField()
enlace = models.TextField()
adjunto = models.FileField(upload_to='recursos')
area_tematica = models.ForeignKey('AreaTematica')
anio = models.IntegerField(verbose_name=u'Año')
class Meta:
verbose_name = ('Recurso')
verbose_name_plural = ('Recursos')
@classmethod
def cargar_ebooks(cls, descripciones=None):
if descripciones is None:
descripciones = get_descripciones_ebooks()
# TODO: traer el area posta
area = AreaTematica.objects.get(nombre="Ciencias")
for desc in descripciones:
cls.objects.create(
tipo="ebook",
nombre=desc[u'titulo'],
descripcion=desc['descripcion'],
area_tematica=area,
anio=3,
enlace=desc['visualizacion_educar']
)
def __unicode__(self):
return self.nombre
|
gpl-2.0
| 926,056,478,757,999,900
| 26.320755
| 79
| 0.617173
| false
| 3.323642
| false
| false
| false
|
zsjohny/jumpserver
|
apps/users/views/login.py
|
1
|
6958
|
# ~*~ coding: utf-8 ~*~
from __future__ import unicode_literals
from django.shortcuts import render
from django.views.generic import RedirectView
from django.core.files.storage import default_storage
from django.shortcuts import reverse, redirect
from django.utils.translation import ugettext as _
from django.views.generic.base import TemplateView
from django.conf import settings
from django.urls import reverse_lazy
from formtools.wizard.views import SessionWizardView
from django.views.generic import FormView
from common.utils import get_object_or_none
from common.permissions import PermissionsMixin, IsValidUser
from ..models import User
from ..utils import (
send_reset_password_mail, get_password_check_rules, check_password_rules
)
from .. import forms
__all__ = [
'UserLoginView', 'UserForgotPasswordSendmailSuccessView',
'UserResetPasswordSuccessView', 'UserResetPasswordSuccessView',
'UserResetPasswordView', 'UserForgotPasswordView', 'UserFirstLoginView',
]
class UserLoginView(RedirectView):
url = reverse_lazy('authentication:login')
query_string = True
class UserForgotPasswordView(FormView):
template_name = 'users/forgot_password.html'
form_class = forms.UserForgotPasswordForm
def form_valid(self, form):
request = self.request
email = form.cleaned_data['email']
user = get_object_or_none(User, email=email)
if not user:
error = _('Email address invalid, please input again')
form.add_error('email', error)
return self.form_invalid(form)
elif not user.can_update_password():
error = _('User auth from {}, go there change password')
form.add_error('email', error.format(user.get_source_display()))
return self.form_invalid(form)
else:
send_reset_password_mail(user)
return redirect('users:forgot-password-sendmail-success')
class UserForgotPasswordSendmailSuccessView(TemplateView):
template_name = 'flash_message_standalone.html'
def get_context_data(self, **kwargs):
context = {
'title': _('Send reset password message'),
'messages': _('Send reset password mail success, '
'login your mail box and follow it '),
'redirect_url': reverse('authentication:login'),
}
kwargs.update(context)
return super().get_context_data(**kwargs)
class UserResetPasswordSuccessView(TemplateView):
template_name = 'flash_message_standalone.html'
def get_context_data(self, **kwargs):
context = {
'title': _('Reset password success'),
'messages': _('Reset password success, return to login page'),
'redirect_url': reverse('authentication:login'),
'auto_redirect': True,
}
kwargs.update(context)
return super().get_context_data(**kwargs)
class UserResetPasswordView(FormView):
template_name = 'users/reset_password.html'
form_class = forms.UserTokenResetPasswordForm
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
errors = kwargs.get('errors')
if errors:
context['errors'] = errors
return self.render_to_response(context)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
token = self.request.GET.get('token', '')
user = User.validate_reset_password_token(token)
if not user:
context['errors'] = _('Token invalid or expired')
context['token_invalid'] = True
check_rules = get_password_check_rules()
context['password_check_rules'] = check_rules
return context
def form_valid(self, form):
token = self.request.GET.get('token')
user = User.validate_reset_password_token(token)
if not user:
error = _('Token invalid or expired')
form.add_error('new_password', error)
return self.form_invalid(form)
if not user.can_update_password():
error = _('User auth from {}, go there change password')
form.add_error('new_password', error.format(user.get_source_display()))
return self.form_invalid(form)
password = form.cleaned_data['new_password']
is_ok = check_password_rules(password)
if not is_ok:
error = _('* Your password does not meet the requirements')
form.add_error('new_password', error)
return self.form_invalid(form)
user.reset_password(password)
User.expired_reset_password_token(token)
return redirect('users:reset-password-success')
class UserFirstLoginView(PermissionsMixin, SessionWizardView):
template_name = 'users/first_login.html'
permission_classes = [IsValidUser]
form_list = [
forms.UserProfileForm,
forms.UserPublicKeyForm,
forms.UserMFAForm,
forms.UserFirstLoginFinishForm
]
file_storage = default_storage
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated and not request.user.is_first_login:
return redirect(reverse('index'))
return super().dispatch(request, *args, **kwargs)
def done(self, form_list, **kwargs):
user = self.request.user
for form in form_list:
for field in form:
if field.value():
setattr(user, field.name, field.value())
user.is_first_login = False
user.save()
context = {
'user_guide_url': settings.USER_GUIDE_URL
}
return render(self.request, 'users/first_login_done.html', context)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({'app': _('Users'), 'action': _('First login')})
return context
def get_form_initial(self, step):
user = self.request.user
if step == '0':
return {
'username': user.username or '',
'name': user.name or user.username,
'email': user.email or '',
'wechat': user.wechat or '',
'phone': user.phone or ''
}
return super().get_form_initial(step)
def get_form(self, step=None, data=None, files=None):
form = super().get_form(step, data, files)
form.instance = self.request.user
if isinstance(form, forms.UserMFAForm):
choices = form.fields["mfa_level"].choices
if self.request.user.mfa_force_enabled:
choices = [(k, v) for k, v in choices if k == 2]
else:
choices = [(k, v) for k, v in choices if k in [0, 1]]
form.fields["mfa_level"].choices = choices
form.fields["mfa_level"].initial = self.request.user.mfa_level
return form
|
gpl-2.0
| 1,119,947,825,107,735,700
| 35.621053
| 83
| 0.624174
| false
| 4.07377
| false
| false
| false
|
centaurialpha/ninja-ide
|
ninja_ide/core/file_handling/file_manager.py
|
1
|
10239
|
# -*- coding: utf-8 -*-
#
# This file is part of NINJA-IDE (http://ninja-ide.org).
#
# NINJA-IDE is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# NINJA-IDE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NINJA-IDE; If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import sys
import os
import re
import threading
import shutil
from PyQt5 import QtCore
from ninja_ide.core import settings
if sys.version_info.major == 3:
python3 = True
else:
python3 = False
# Lock to protect the file's writing operation
file_store_content_lock = threading.Lock()
class NinjaIOException(Exception):
"""
IO operation's exception
"""
pass
class NinjaNoFileNameException(Exception):
"""
Tried to write a file but I lack a file name
"""
pass
class NinjaFileExistsException(Exception):
"""
Try to override existing file without confirmation exception.
"""
def __init__(self, filename=''):
Exception.__init__(self, 'The file already exists.')
self.filename = filename
def create_init_file(folderName):
"""Create a __init__.py file in the folder received."""
if not os.path.isdir(folderName):
raise NinjaIOException("The destination folder does not exist")
name = os.path.join(folderName, '__init__.py')
if file_exists(name):
raise NinjaFileExistsException(name)
f = open(name, 'w')
f.flush()
f.close()
def create_init_file_complete(folderName):
"""Create a __init__.py file in the folder received.
This __init__.py will contain the information of the files inside
this folder."""
if not os.path.isdir(folderName):
raise NinjaIOException("The destination folder does not exist")
patDef = re.compile('^def .+')
patClass = re.compile('^class .+')
patExt = re.compile('.+\\.py')
files = os.listdir(folderName)
files = list(filter(patExt.match, files))
files.sort()
imports_ = []
for f in files:
read = open(os.path.join(folderName, f), 'r')
imp = [re.split('\\s|\\(', line)[1] for line in read.readlines()
if patDef.match(line) or patClass.match(line)]
imports_ += ['from ' + f[:-3] + ' import ' + i for i in imp]
name = os.path.join(folderName, '__init__.py')
fi = open(name, 'w')
for import_ in imports_:
fi.write(import_ + '\n')
fi.flush()
fi.close()
def create_folder(folderName, add_init_file=True):
"""Create a new Folder inside the one received as a param."""
if os.path.exists(folderName):
raise NinjaIOException("The folder already exist")
os.makedirs(folderName)
if add_init_file:
create_init_file(folderName)
def create_tree_folders(folderName):
"""Create a group of folders, one inside the other."""
if os.path.exists(folderName):
raise NinjaIOException("The folder already exist")
os.makedirs(folderName)
def folder_exists(folderName):
"""Check if a folder already exists."""
return os.path.isdir(folderName)
def file_exists(path, fileName=''):
"""Check if a file already exists."""
if fileName != '':
path = os.path.join(path, fileName)
return os.path.isfile(path)
def _search_coding_line(txt):
"""Search a pattern like this: # -*- coding: utf-8 -*-."""
coding_pattern = "coding[:=]\s*([-\w.]+)"
pat_coding = re.search(coding_pattern, txt)
if pat_coding and pat_coding.groups()[0] != 'None':
return pat_coding.groups()[0]
return None
def get_file_encoding(content):
"""Try to get the encoding of the file using the PEP 0263 rules
search the first or the second line of the file
Returns the encoding or the default UTF-8
"""
encoding = None
try:
lines_to_check = content.split("\n", 2)
for index in range(2):
if len(lines_to_check) > index:
line_encoding = _search_coding_line(lines_to_check[index])
if line_encoding:
encoding = line_encoding
break
except UnicodeDecodeError as error:
# add logger
print(error)
# if not encoding is set then use UTF-8 as default
if encoding is None:
encoding = "UTF-8"
return encoding
def read_file_content(fileName):
"""Read a file content, this function is used to load Editor content."""
try:
with open(fileName, 'rU') as f:
content = f.read()
except IOError as reason:
raise NinjaIOException(reason)
except:
raise
return content
def get_basename(fileName):
"""Get the name of a file or folder specified in a path."""
if fileName.endswith(os.path.sep):
fileName = fileName[:-1]
return os.path.basename(fileName)
def get_folder(fileName):
"""Get the name of the folder containing the file or folder received."""
return os.path.dirname(fileName)
def store_file_content(fileName, content, addExtension=True, newFile=False):
"""Save content on disk with the given file name."""
if fileName == '':
raise Exception()
ext = (os.path.splitext(fileName)[-1])[1:]
if ext == '' and addExtension:
fileName += '.py'
if newFile and file_exists(fileName):
raise NinjaFileExistsException(fileName)
try:
flags = QtCore.QIODevice.WriteOnly | QtCore.QIODevice.Truncate
f = QtCore.QFile(fileName)
if settings.use_platform_specific_eol():
flags |= QtCore.QIODevice.Text
if not f.open(flags):
raise NinjaIOException(f.errorString())
stream = QtCore.QTextStream(f)
encoding = get_file_encoding(content)
if encoding:
stream.setCodec(encoding)
encoded_stream = stream.codec().fromUnicode(content)
f.write(encoded_stream)
f.flush()
f.close()
except:
raise
return os.path.abspath(fileName)
def open_project(path):
"""Return a dict structure containing the info inside a folder."""
return open_project_with_extensions(settings.SUPPORTED_EXTENSIONS)
def open_project_with_extensions(path, extensions):
"""Return a dict structure containing the info inside a folder.
This function uses the extensions specified by each project."""
if not os.path.exists(path):
raise NinjaIOException("The folder does not exist")
valid_extensions = [ext.lower() for ext in extensions
if not ext.startswith('-')]
d = {}
for root, dirs, files in os.walk(path, followlinks=True):
for f in files:
ext = os.path.splitext(f.lower())[-1]
if ext in valid_extensions or '.*' in valid_extensions:
d[root] = [f, dirs]
elif ext == '' and '*' in valid_extensions:
d[root] = [f, dirs]
return d
def delete_file(path, fileName=None):
"""Delete the proper file.
If fileName is None, path and fileName are joined to create the
complete path, otherwise path is used to delete the file."""
if fileName:
path = os.path.join(path, fileName)
if os.path.isfile(path):
os.remove(path)
def delete_folder(path, fileName=None):
"""Delete the proper folder."""
if fileName:
path = os.path.join(path, fileName)
if os.path.isdir(path):
shutil.rmtree(path)
def rename_file(old, new):
"""Rename a file, changing its name from 'old' to 'new'."""
if os.path.isfile(old):
if file_exists(new):
raise NinjaFileExistsException(new)
os.rename(old, new)
return new
return ''
def get_file_extension(fileName):
"""Get the file extension in the form of: 'py'"""
return os.path.splitext(fileName.lower())[-1][1:]
def get_file_name(fileName):
"""Get the file name, without the extension."""
return os.path.splitext(fileName)[0]
def get_module_name(fileName):
"""Get the name of the file without the extension."""
module = os.path.basename(fileName)
return (os.path.splitext(module)[0])
def convert_to_relative(basePath, fileName):
"""Convert a absolut path to relative based on its start with basePath."""
if fileName.startswith(basePath):
fileName = fileName.replace(basePath, '')
if fileName.startswith(os.path.sep):
fileName = fileName[1:]
return fileName
def create_path(*args):
"""Join the paths provided in order to create an absolut path."""
return os.path.join(*args)
def belongs_to_folder(path, fileName):
"""Determine if fileName is located under path structure."""
if not path.endswith(os.path.sep):
path += os.path.sep
return fileName.startswith(path)
def get_last_modification(fileName):
"""Get the last time the file was modified."""
return QtCore.QFileInfo(fileName).lastModified()
def has_write_permission(fileName):
"""Check if the file has writing permissions."""
return os.access(fileName, os.W_OK)
def check_for_external_modification(fileName, old_mtime):
"""Check if the file was modified outside ninja."""
new_modification_time = get_last_modification(fileName)
# check the file mtime attribute calling os.stat()
if new_modification_time > old_mtime:
return True
return False
def get_files_from_folder(folder, ext):
"""Get the files in folder with the specified extension."""
try:
filesExt = os.listdir(folder)
except:
filesExt = []
filesExt = [f for f in filesExt if f.endswith(ext)]
return filesExt
def is_supported_extension(filename, extensions=None):
if extensions is None:
extensions = settings.SUPPORTED_EXTENSIONS
if os.path.splitext(filename.lower())[-1] in extensions:
return True
return False
|
gpl-3.0
| 8,059,544,523,464,693,000
| 28.938596
| 78
| 0.64518
| false
| 3.876941
| false
| false
| false
|
pkimber/checkout
|
checkout/migrations/0002_auto_20150625_1159.py
|
1
|
1640
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def _init_state(model, name, slug):
try:
model.objects.get(slug=slug)
except model.DoesNotExist:
instance = model(**dict(name=name, slug=slug))
instance.save()
instance.full_clean()
def _init_state_action(model, name, slug, payment):
try:
obj = model.objects.get(slug=slug)
except model.DoesNotExist:
instance = model(**dict(name=name, slug=slug, payment=payment))
instance.save()
instance.full_clean()
def default_state(apps, schema_editor):
"""Create default states.
We can't import a model directly as it may be a newer version than this
migration expects. We use the historical version.
"""
model = apps.get_model('checkout', 'CheckoutAction')
_init_state_action(model, 'Card Refresh', 'card_refresh', False)
_init_state_action(model, 'Charge', 'charge', True)
_init_state_action(model, 'Invoice', 'invoice', False)
_init_state_action(model, 'Manual', 'manual', False)
_init_state_action(model, 'Payment', 'payment', True)
_init_state_action(model, 'Payment Plan', 'payment_plan', False)
model = apps.get_model('checkout', 'CheckoutState')
_init_state(model, 'Fail', 'fail')
_init_state(model, 'Pending', 'pending')
_init_state(model, 'Request', 'request')
_init_state(model, 'Success', 'success')
class Migration(migrations.Migration):
dependencies = [
('checkout', '0001_initial'),
]
operations = [
migrations.RunPython(default_state),
]
|
apache-2.0
| -5,201,387,871,116,438,000
| 28.818182
| 75
| 0.643902
| false
| 3.636364
| false
| false
| false
|
dials/dials
|
algorithms/background/gmodel/algorithm.py
|
1
|
2792
|
import pickle
class ModelCache:
"""
A class to cache the model
"""
def __init__(self):
"""
Create a model dictionary
"""
self.model = dict()
def get(self, name):
"""
Get the model
"""
if name is None:
raise RuntimeError("Model is not specified")
try:
model = self.model[name]
except KeyError:
with open(name, "rb") as infile:
model = pickle.load(infile)
self.model[name] = model
return model
# Instance of the model cache
global_model_cache = ModelCache()
class BackgroundAlgorithm:
"""Class to do background subtraction."""
def __init__(
self,
experiments,
model=None,
robust=False,
tuning_constant=1.345,
min_pixels=10,
):
"""
Initialise the algorithm.
:param experiments: The list of experiments
:param model: The background model
:param robust: Use the robust background algorithm
:param tuning_constant: The robust tuning constant
"""
from dials.algorithms.background.gmodel import Creator
# Get the model
model = global_model_cache.get(model)
# Create the background creator
self._create = Creator(model=model, robust=robust, min_pixels=min_pixels)
def compute_background(self, reflections, image_volume=None):
"""
Compute the background.
:param reflections: The list of reflections
"""
# Do the background subtraction
if image_volume is None:
success = self._create(reflections)
reflections["background.mean"] = reflections[
"shoebox"
].mean_modelled_background()
else:
success = self._create(reflections, image_volume)
reflections.set_flags(~success, reflections.flags.dont_integrate)
return success
class GModelBackgroundCalculatorFactory:
"""Class to do background subtraction."""
@staticmethod
def create(experiments, model=None, robust=False, min_pixels=10):
"""
Initialise the algorithm.
:param experiments: The list of experiments
:param model: The background model
:param robust: Use the robust background algorithm
:param tuning_constant: The robust tuning constant
"""
from dials.algorithms.integration.parallel_integrator import (
GModelBackgroundCalculator,
)
# Get the model
model = global_model_cache.get(model)
# Create the background creator
return GModelBackgroundCalculator(
model=model, robust=robust, min_pixels=min_pixels
)
|
bsd-3-clause
| -6,660,291,910,596,019,000
| 26.372549
| 81
| 0.59563
| false
| 4.584565
| false
| false
| false
|
solackerman/sqlalchemy-redshift
|
sqlalchemy_redshift/dialect.py
|
1
|
30445
|
import re
from collections import defaultdict
import pkg_resources
import sqlalchemy as sa
from sqlalchemy import Column, exc, inspect, schema
from sqlalchemy.dialects.postgresql.base import PGCompiler, PGDDLCompiler
from sqlalchemy.dialects.postgresql.psycopg2 import PGDialect_psycopg2
from sqlalchemy.engine import reflection
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import (
BinaryExpression, BooleanClauseList, Delete
)
from sqlalchemy.types import VARCHAR, NullType
from .commands import CopyCommand, UnloadFromSelect
from .compat import string_types
try:
from alembic.ddl import postgresql
except ImportError:
pass
else:
from alembic.ddl.base import RenameTable
compiles(RenameTable, 'redshift')(postgresql.visit_rename_table)
class RedshiftImpl(postgresql.PostgresqlImpl):
__dialect__ = 'redshift'
__all__ = ['CopyCommand', 'UnloadFromSelect', 'RedshiftDialect']
# Regex for parsing and identity constraint out of adsrc, e.g.:
# "identity"(445178, 0, '1,1'::text)
IDENTITY_RE = re.compile(r"""
"identity" \(
(?P<current>-?\d+)
,\s
(?P<base>-?\d+)
,\s
'(?P<seed>-?\d+),(?P<step>-?\d+)'
.*
\)
""", re.VERBOSE)
# Regex for SQL identifiers (valid table and column names)
SQL_IDENTIFIER_RE = re.compile(r"""
[_a-zA-Z][\w$]* # SQL standard identifier
| # or
(?:"[^"]+")+ # SQL delimited (quoted) identifier
""", re.VERBOSE)
# Regex for foreign key constraints, e.g.:
# FOREIGN KEY(col1) REFERENCES othertable (col2)
# See https://docs.aws.amazon.com/redshift/latest/dg/r_names.html
# for a definition of valid SQL identifiers.
FOREIGN_KEY_RE = re.compile(r"""
^FOREIGN\ KEY \s* \( # FOREIGN KEY, arbitrary whitespace, literal '('
(?P<columns> # Start a group to capture the referring columns
(?: # Start a non-capturing group
\s* # Arbitrary whitespace
([_a-zA-Z][\w$]* | ("[^"]+")+) # SQL identifier
\s* # Arbitrary whitespace
,? # There will be a colon if this isn't the last one
)+ # Close the non-capturing group; require at least one
) # Close the 'columns' group
\s* \) # Arbitrary whitespace and literal ')'
\s* REFERENCES \s*
((?P<referred_schema>([_a-zA-Z][\w$]* | ("[^"]*")+))\.)? # SQL identifier
(?P<referred_table>[_a-zA-Z][\w$]* | ("[^"]*")+) # SQL identifier
\s* \( # FOREIGN KEY, arbitrary whitespace, literal '('
(?P<referred_columns> # Start a group to capture the referring columns
(?: # Start a non-capturing group
\s* # Arbitrary whitespace
([_a-zA-Z][\w$]* | ("[^"]+")+) # SQL identifier
\s* # Arbitrary whitespace
,? # There will be a colon if this isn't the last one
)+ # Close the non-capturing group; require at least one
) # Close the 'columns' group
\s* \) # Arbitrary whitespace and literal ')'
""", re.VERBOSE)
# Regex for primary key constraints, e.g.:
# PRIMARY KEY (col1, col2)
PRIMARY_KEY_RE = re.compile(r"""
^PRIMARY \s* KEY \s* \( # FOREIGN KEY, arbitrary whitespace, literal '('
(?P<columns> # Start a group to capture column names
(?:
\s* # Arbitrary whitespace
# SQL identifier or delimited identifier
( [_a-zA-Z][\w$]* | ("[^"]*")+ )
\s* # Arbitrary whitespace
,? # There will be a colon if this isn't the last one
)+ # Close the non-capturing group; require at least one
)
\s* \) \s* # Arbitrary whitespace and literal ')'
""", re.VERBOSE)
def _get_relation_key(name, schema):
if schema is None:
return name
else:
return schema + "." + name
def _get_schema_and_relation(key):
if '.' not in key:
return (None, key)
identifiers = SQL_IDENTIFIER_RE.findall(key)
if len(identifiers) == 1:
return (None, key)
elif len(identifiers) == 2:
return identifiers
raise ValueError("%s does not look like a valid relation identifier")
def unquoted(key):
"""
Return *key* with one level of double quotes removed.
Redshift stores some identifiers without quotes in internal tables,
even though the name must be quoted elsewhere.
In particular, this happens for tables named as a keyword.
"""
if key.startswith('"') and key.endswith('"'):
return key[1:-1]
return key
class RedshiftCompiler(PGCompiler):
def visit_now_func(self, fn, **kw):
return "SYSDATE"
class RedshiftDDLCompiler(PGDDLCompiler):
"""
Handles Redshift-specific ``CREATE TABLE`` syntax.
Users can specify the `diststyle`, `distkey`, `sortkey` and `encode`
properties per table and per column.
Table level properties can be set using the dialect specific syntax. For
example, to specify a distribution key and style you apply the following:
>>> import sqlalchemy as sa
>>> from sqlalchemy.schema import CreateTable
>>> engine = sa.create_engine('redshift+psycopg2://example')
>>> metadata = sa.MetaData()
>>> user = sa.Table(
... 'user',
... metadata,
... sa.Column('id', sa.Integer, primary_key=True),
... sa.Column('name', sa.String),
... redshift_diststyle='KEY',
... redshift_distkey='id',
... redshift_interleaved_sortkey=['id', 'name'],
... )
>>> print(CreateTable(user).compile(engine))
<BLANKLINE>
CREATE TABLE "user" (
id INTEGER NOT NULL,
name VARCHAR,
PRIMARY KEY (id)
) DISTSTYLE KEY DISTKEY (id) INTERLEAVED SORTKEY (id, name)
<BLANKLINE>
<BLANKLINE>
A single sort key can be applied without a wrapping list:
>>> customer = sa.Table(
... 'customer',
... metadata,
... sa.Column('id', sa.Integer, primary_key=True),
... sa.Column('name', sa.String),
... redshift_sortkey='id',
... )
>>> print(CreateTable(customer).compile(engine))
<BLANKLINE>
CREATE TABLE customer (
id INTEGER NOT NULL,
name VARCHAR,
PRIMARY KEY (id)
) SORTKEY (id)
<BLANKLINE>
<BLANKLINE>
Column-level special syntax can also be applied using the column info
dictionary. For example, we can specify the ENCODE for a column:
>>> product = sa.Table(
... 'product',
... metadata,
... sa.Column('id', sa.Integer, primary_key=True),
... sa.Column('name', sa.String, info={'encode': 'lzo'})
... )
>>> print(CreateTable(product).compile(engine))
<BLANKLINE>
CREATE TABLE product (
id INTEGER NOT NULL,
name VARCHAR ENCODE lzo,
PRIMARY KEY (id)
)
<BLANKLINE>
<BLANKLINE>
We can also specify the distkey and sortkey options:
>>> sku = sa.Table(
... 'sku',
... metadata,
... sa.Column('id', sa.Integer, primary_key=True),
... sa.Column(
... 'name', sa.String, info={'distkey': True, 'sortkey': True}
... )
... )
>>> print(CreateTable(sku).compile(engine))
<BLANKLINE>
CREATE TABLE sku (
id INTEGER NOT NULL,
name VARCHAR DISTKEY SORTKEY,
PRIMARY KEY (id)
)
<BLANKLINE>
<BLANKLINE>
"""
def post_create_table(self, table):
text = ""
info = table.dialect_options['redshift']
diststyle = info.get('diststyle')
if diststyle:
diststyle = diststyle.upper()
if diststyle not in ('EVEN', 'KEY', 'ALL'):
raise exc.CompileError(
u"diststyle {0} is invalid".format(diststyle)
)
text += " DISTSTYLE " + diststyle
distkey = info.get('distkey')
if distkey:
text += " DISTKEY ({0})".format(self.preparer.quote(distkey))
sortkey = info.get('sortkey')
interleaved_sortkey = info.get('interleaved_sortkey')
if sortkey and interleaved_sortkey:
raise exc.ArgumentError(
"Parameters sortkey and interleaved_sortkey are "
"mutually exclusive; you may not specify both."
)
if sortkey or interleaved_sortkey:
if isinstance(sortkey, string_types):
keys = [sortkey]
else:
keys = sortkey or interleaved_sortkey
keys = [key.name if isinstance(key, Column) else key
for key in keys]
if interleaved_sortkey:
text += " INTERLEAVED"
sortkey_string = ", ".join(self.preparer.quote(key)
for key in keys)
text += " SORTKEY ({0})".format(sortkey_string)
return text
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column)
colspec += " " + self.dialect.type_compiler.process(column.type)
default = self.get_column_default_string(column)
if default is not None:
# Identity constraints show up as *default* when reflected.
m = IDENTITY_RE.match(default)
if m:
colspec += " IDENTITY({seed},{step})".format(**m.groupdict())
else:
colspec += " DEFAULT " + default
colspec += self._fetch_redshift_column_attributes(column)
if not column.nullable:
colspec += " NOT NULL"
return colspec
def _fetch_redshift_column_attributes(self, column):
text = ""
if not hasattr(column, 'info'):
return text
info = column.info
identity = info.get('identity')
if identity:
text += " IDENTITY({0},{1})".format(identity[0], identity[1])
encode = info.get('encode')
if encode:
text += " ENCODE " + encode
distkey = info.get('distkey')
if distkey:
text += " DISTKEY"
sortkey = info.get('sortkey')
if sortkey:
text += " SORTKEY"
return text
class RedshiftDialect(PGDialect_psycopg2):
"""
Define Redshift-specific behavior.
Most public methods are overrides of the underlying interfaces defined in
:class:`~sqlalchemy.engine.interfaces.Dialect` and
:class:`~sqlalchemy.engine.Inspector`.
"""
name = 'redshift'
statement_compiler = RedshiftCompiler
ddl_compiler = RedshiftDDLCompiler
max_identifier_length = 127
construct_arguments = [
(schema.Index, {
"using": False,
"where": None,
"ops": {}
}),
(schema.Table, {
"ignore_search_path": False,
"diststyle": None,
"distkey": None,
"sortkey": None,
"interleaved_sortkey": None,
}),
]
def __init__(self, *args, **kw):
super(RedshiftDialect, self).__init__(*args, **kw)
# Cache domains, as these will be static;
# Redshift does not support user-created domains.
self._domains = None
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
"""
Return information about columns in `table_name`.
Overrides interface
:meth:`~sqlalchemy.engine.interfaces.Dialect.get_columns`.
"""
cols = self._get_redshift_columns(connection, table_name, schema, **kw)
if not self._domains:
self._domains = self._load_domains(connection)
domains = self._domains
columns = []
for col in cols:
column_info = self._get_column_info(
name=col.name, format_type=col.format_type,
default=col.default, notnull=col.notnull, domains=domains,
enums=[], schema=col.schema, encode=col.encode)
columns.append(column_info)
return columns
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
"""
Return information about the primary key constraint on `table_name`.
Overrides interface
:meth:`~sqlalchemy.engine.interfaces.Dialect.get_pk_constraint`.
"""
constraints = self._get_redshift_constraints(connection, table_name,
schema)
pk_constraints = [c for c in constraints if c.contype == 'p']
if not pk_constraints:
return {'constrained_columns': [], 'name': ''}
pk_constraint = pk_constraints[0]
m = PRIMARY_KEY_RE.match(pk_constraint.condef)
colstring = m.group('columns')
constrained_columns = SQL_IDENTIFIER_RE.findall(colstring)
return {
'constrained_columns': constrained_columns,
'name': None,
}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
"""
Return information about foreign keys in `table_name`.
Overrides interface
:meth:`~sqlalchemy.engine.interfaces.Dialect.get_pk_constraint`.
"""
constraints = self._get_redshift_constraints(connection, table_name,
schema)
fk_constraints = [c for c in constraints if c.contype == 'f']
uniques = defaultdict(lambda: defaultdict(dict))
for con in fk_constraints:
uniques[con.conname]["key"] = con.conkey
uniques[con.conname]["condef"] = con.condef
fkeys = []
for conname, attrs in uniques.items():
m = FOREIGN_KEY_RE.match(attrs['condef'])
colstring = m.group('referred_columns')
referred_columns = SQL_IDENTIFIER_RE.findall(colstring)
referred_table = m.group('referred_table')
referred_schema = m.group('referred_schema')
colstring = m.group('columns')
constrained_columns = SQL_IDENTIFIER_RE.findall(colstring)
fkey_d = {
'name': None,
'constrained_columns': constrained_columns,
'referred_schema': referred_schema,
'referred_table': referred_table,
'referred_columns': referred_columns,
}
fkeys.append(fkey_d)
return fkeys
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
"""
Return a list of table names for `schema`.
Overrides interface
:meth:`~sqlalchemy.engine.interfaces.Dialect.get_table_names`.
"""
return self._get_table_or_view_names('r', connection, schema, **kw)
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
"""
Return a list of view names for `schema`.
Overrides interface
:meth:`~sqlalchemy.engine.interfaces.Dialect.get_view_names`.
"""
return self._get_table_or_view_names('v', connection, schema, **kw)
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
"""Return view definition.
Given a :class:`.Connection`, a string `view_name`,
and an optional string `schema`, return the view definition.
Overrides interface
:meth:`~sqlalchemy.engine.interfaces.Dialect.get_view_definition`.
"""
view = self._get_redshift_relation(connection, view_name, schema, **kw)
return sa.text(view.view_definition)
def get_indexes(self, connection, table_name, schema, **kw):
"""
Return information about indexes in `table_name`.
Because Redshift does not support traditional indexes,
this always returns an empty list.
Overrides interface
:meth:`~sqlalchemy.engine.interfaces.Dialect.get_indexes`.
"""
return []
@reflection.cache
def get_unique_constraints(self, connection, table_name,
schema=None, **kw):
"""
Return information about unique constraints in `table_name`.
Overrides interface
:meth:`~sqlalchemy.engine.interfaces.Dialect.get_unique_constraints`.
"""
constraints = self._get_redshift_constraints(connection,
table_name, schema)
constraints = [c for c in constraints if c.contype == 'u']
uniques = defaultdict(lambda: defaultdict(dict))
for con in constraints:
uniques[con.conname]["key"] = con.conkey
uniques[con.conname]["cols"][con.attnum] = con.attname
return [
{'name': None,
'column_names': [uc["cols"][i] for i in uc["key"]]}
for name, uc in uniques.items()
]
@reflection.cache
def get_table_options(self, connection, table_name, schema, **kw):
"""
Return a dictionary of options specified when the table of the
given name was created.
Overrides interface
:meth:`~sqlalchemy.engine.Inspector.get_table_options`.
"""
def keyfunc(column):
num = int(column.sortkey)
# If sortkey is interleaved, column numbers alternate
# negative values, so take abs.
return abs(num)
table = self._get_redshift_relation(connection, table_name,
schema, **kw)
columns = self._get_redshift_columns(connection, table_name,
schema, **kw)
sortkey_cols = sorted([col for col in columns if col.sortkey],
key=keyfunc)
interleaved = any([int(col.sortkey) < 0 for col in sortkey_cols])
sortkey = [col.name for col in sortkey_cols]
interleaved_sortkey = None
if interleaved:
interleaved_sortkey = sortkey
sortkey = None
distkeys = [col.name for col in columns if col.distkey]
distkey = distkeys[0] if distkeys else None
return {
'redshift_diststyle': table.diststyle,
'redshift_distkey': distkey,
'redshift_sortkey': sortkey,
'redshift_interleaved_sortkey': interleaved_sortkey,
}
def create_connect_args(self, *args, **kwargs):
"""
Build DB-API compatible connection arguments.
Overrides interface
:meth:`~sqlalchemy.engine.interfaces.Dialect.create_connect_args`.
"""
default_args = {
'sslmode': 'verify-full',
'sslrootcert': pkg_resources.resource_filename(
__name__,
'redshift-ssl-ca-cert.pem'
),
}
cargs, cparams = super(RedshiftDialect, self).create_connect_args(
*args, **kwargs
)
default_args.update(cparams)
return cargs, default_args
def _get_table_or_view_names(self, relkind, connection, schema=None, **kw):
default_schema = inspect(connection).default_schema_name
if not schema:
schema = default_schema
info_cache = kw.get('info_cache')
all_relations = self._get_all_relation_info(connection,
info_cache=info_cache)
relation_names = []
for key, relation in all_relations.items():
this_schema, this_relation = _get_schema_and_relation(key)
if this_schema is None:
this_schema = default_schema
if this_schema == schema and relation.relkind == relkind:
relation_names.append(this_relation)
return relation_names
def _get_column_info(self, *args, **kwargs):
kw = kwargs.copy()
encode = kw.pop('encode', None)
column_info = super(RedshiftDialect, self)._get_column_info(
*args,
**kw
)
if isinstance(column_info['type'], VARCHAR):
if column_info['type'].length is None:
column_info['type'] = NullType()
if 'info' not in column_info:
column_info['info'] = {}
if encode and encode != 'none':
column_info['info']['encode'] = encode
return column_info
def _get_redshift_relation(self, connection, table_name,
schema=None, **kw):
info_cache = kw.get('info_cache')
all_relations = self._get_all_relation_info(connection,
info_cache=info_cache)
key = _get_relation_key(table_name, schema)
if key not in all_relations.keys():
key = unquoted(key)
try:
return all_relations[key]
except KeyError:
raise sa.exc.NoSuchTableError(key)
def _get_redshift_columns(self, connection, table_name, schema=None, **kw):
info_cache = kw.get('info_cache')
all_columns = self._get_all_column_info(connection,
info_cache=info_cache)
key = _get_relation_key(table_name, schema)
if key not in all_columns.keys():
key = unquoted(key)
return all_columns[key]
def _get_redshift_constraints(self, connection, table_name,
schema=None, **kw):
info_cache = kw.get('info_cache')
all_constraints = self._get_all_constraint_info(connection,
info_cache=info_cache)
key = _get_relation_key(table_name, schema)
if key not in all_constraints.keys():
key = unquoted(key)
return all_constraints[key]
@reflection.cache
def _get_all_relation_info(self, connection, **kw):
result = connection.execute("""
SELECT
c.relkind,
n.oid as "schema_oid",
n.nspname as "schema",
c.oid as "rel_oid",
c.relname,
CASE c.reldiststyle
WHEN 0 THEN 'EVEN' WHEN 1 THEN 'KEY' WHEN 8 THEN 'ALL' END
AS "diststyle",
c.relowner AS "owner_id",
u.usename AS "owner_name",
TRIM(TRAILING ';' FROM pg_catalog.pg_get_viewdef(c.oid, true))
AS "view_definition",
pg_catalog.array_to_string(c.relacl, '\n') AS "privileges"
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
JOIN pg_catalog.pg_user u ON u.usesysid = c.relowner
WHERE c.relkind IN ('r', 'v', 'm', 'S', 'f')
AND n.nspname !~ '^pg_'
ORDER BY c.relkind, n.oid, n.nspname;
""")
relations = {}
for rel in result:
schema = rel.schema
if schema == inspect(connection).default_schema_name:
schema = None
key = _get_relation_key(rel.relname, schema)
relations[key] = rel
return relations
@reflection.cache
def _get_all_column_info(self, connection, **kw):
all_columns = defaultdict(list)
with connection.contextual_connect() as cc:
# We fetch the current search_path, which may or may not quote
# '$user' depending on whether other schemas need quoting.
search_path = cc.execute("SHOW search_path").scalar()
if '$user' in search_path and '"$user"' not in search_path:
search_path = search_path.replace('$user', '"$user"')
# Because pg_table_def only shows results for schemas on the
# search_path, we explicitly include all non-system schemas, then
# replace the original value for search_path.
schema_names = ['"%s"' % r.name for r in cc.execute("""
SELECT nspname AS "name"
FROM pg_catalog.pg_namespace
WHERE nspname !~ '^pg_' AND nspname <> 'information_schema'
ORDER BY 1
""")]
modified_search_path = ','.join(schema_names)
cc.execute("SET LOCAL search_path TO %s" % modified_search_path)
result = cc.execute("""
SELECT
n.nspname as "schema",
c.relname as "table_name",
d.column as "name",
encoding as "encode",
type, distkey, sortkey, "notnull", adsrc, attnum,
pg_catalog.format_type(att.atttypid, att.atttypmod),
pg_catalog.pg_get_expr(ad.adbin, ad.adrelid) AS DEFAULT,
n.oid as "schema_oid",
c.oid as "table_oid"
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n
ON n.oid = c.relnamespace
JOIN pg_catalog.pg_table_def d
ON (d.schemaname, d.tablename) = (n.nspname, c.relname)
JOIN pg_catalog.pg_attribute att
ON (att.attrelid, att.attname) = (c.oid, d.column)
LEFT JOIN pg_catalog.pg_attrdef ad
ON (att.attrelid, att.attnum) = (ad.adrelid, ad.adnum)
WHERE n.nspname !~ '^pg_'
ORDER BY n.nspname, c.relname, att.attnum
""")
for col in result:
schema = col.schema
if schema == inspect(connection).default_schema_name:
schema = None
key = _get_relation_key(col.table_name, schema)
all_columns[key].append(col)
cc.execute("SET LOCAL search_path TO %s" % search_path)
return dict(all_columns)
@reflection.cache
def _get_all_constraint_info(self, connection, **kw):
result = connection.execute("""
SELECT
n.nspname as "schema",
c.relname as "table_name",
t.contype,
t.conname,
t.conkey,
a.attnum,
a.attname,
pg_catalog.pg_get_constraintdef(t.oid, true) as condef,
n.oid as "schema_oid",
c.oid as "rel_oid"
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n
ON n.oid = c.relnamespace
JOIN pg_catalog.pg_constraint t
ON t.conrelid = c.oid
JOIN pg_catalog.pg_attribute a
ON t.conrelid = a.attrelid AND a.attnum = ANY(t.conkey)
WHERE n.nspname !~ '^pg_'
ORDER BY n.nspname, c.relname
""")
all_constraints = defaultdict(list)
for con in result:
schema = con.schema
if schema == inspect(connection).default_schema_name:
schema = None
key = _get_relation_key(con.table_name, schema)
all_constraints[key].append(con)
return all_constraints
def gen_columns_from_children(root):
"""
Generates columns that are being used in child elements of the delete query
this will be used to determine tables for the using clause.
:param root: the delete query
:return: a generator of columns
"""
if isinstance(root, (Delete, BinaryExpression, BooleanClauseList)):
for child in root.get_children():
yc = gen_columns_from_children(child)
for it in yc:
yield it
elif isinstance(root, sa.Column):
yield root
@compiles(Delete, 'redshift')
def visit_delete_stmt(element, compiler, **kwargs):
"""
Adds redshift-dialect specific compilation rule for the
delete statement.
Redshift DELETE syntax can be found here:
https://docs.aws.amazon.com/redshift/latest/dg/r_DELETE.html
.. :code-block: sql
DELETE [ FROM ] table_name
[ { USING } table_name, ...]
[ WHERE condition ]
By default, SqlAlchemy compiles DELETE statements with the
syntax:
.. :code-block: sql
DELETE [ FROM ] table_name
[ WHERE condition ]
problem illustration:
>>> from sqlalchemy import Table, Column, Integer, MetaData, delete
>>> from sqlalchemy_redshift.dialect import RedshiftDialect
>>> meta = MetaData()
>>> table1 = Table(
... 'table_1',
... meta,
... Column('pk', Integer, primary_key=True)
... )
...
>>> table2 = Table(
... 'table_2',
... meta,
... Column('pk', Integer, primary_key=True)
... )
...
>>> del_stmt = delete(table1).where(table1.c.pk==table2.c.pk)
>>> str(del_stmt.compile(dialect=RedshiftDialect()))
'DELETE FROM table_1 USING table_2 WHERE table_1.pk = table_2.pk'
>>> str(del_stmt)
'DELETE FROM table_1 WHERE table_1.pk = table_2.pk'
>>> del_stmt2 = delete(table1)
>>> str(del_stmt2)
'DELETE FROM table_1'
>>> del_stmt3 = delete(table1).where(table1.c.pk > 1000)
>>> str(del_stmt3)
'DELETE FROM table_1 WHERE table_1.pk > :pk_1'
>>> str(del_stmt3.compile(dialect=RedshiftDialect()))
'DELETE FROM table_1 WHERE table_1.pk > %(pk_1)s'
"""
# Set empty strings for the default where clause and using clause
whereclause = ''
usingclause = ''
# determine if the delete query needs a ``USING`` injected
# by inspecting the whereclause's children & their children...
# first, the where clause text is buit, if applicable
# then, the using clause text is built, if applicable
# note:
# the tables in the using clause are sorted in the order in
# which they first appear in the where clause.
delete_stmt_table = compiler.process(element.table, asfrom=True, **kwargs)
whereclause_tuple = element.get_children()
if whereclause_tuple:
usingclause_tables = []
whereclause = ' WHERE {clause}'.format(
clause=compiler.process(*whereclause_tuple, **kwargs)
)
whereclause_columns = gen_columns_from_children(element)
for col in whereclause_columns:
table = compiler.process(col.table, asfrom=True, **kwargs)
if table != delete_stmt_table and table not in usingclause_tables:
usingclause_tables.append(table)
if usingclause_tables:
usingclause = ' USING {clause}'.format(
clause=', '.join(usingclause_tables)
)
return 'DELETE FROM {table}{using}{where}'.format(
table=delete_stmt_table,
using=usingclause,
where=whereclause)
|
mit
| 1,824,985,647,472,262,000
| 35.461078
| 79
| 0.570636
| false
| 4.021797
| false
| false
| false
|
wgwoods/blivet
|
blivet/flags.py
|
1
|
3889
|
# flags.py
#
# Copyright (C) 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): David Lehman <dlehman@redhat.com>
#
import shlex
import selinux
class Flags(object):
def __init__(self):
#
# mode of operation
#
self.testing = False
self.installer_mode = False
#
# minor modes (installer-specific)
#
self.automated_install = False
self.live_install = False
self.image_install = False
#
# enable/disable functionality
#
self.selinux = selinux.is_selinux_enabled()
self.multipath = True
self.dmraid = True
self.ibft = True
self.noiswmd = False
self.gfs2 = True
self.jfs = True
self.reiserfs = True
self.arm_platform = None
self.gpt = False
self.multipath_friendly_names = True
# set to False to suppress the default LVM behavior of saving
# backup metadata in /etc/lvm/{archive,backup}
self.lvm_metadata_backup = True
# whether to include nodev filesystems in the devicetree (only
# meaningful when flags.installer_mode is False)
self.include_nodev = False
self.boot_cmdline = {}
self.update_from_boot_cmdline()
self.allow_degraded_mdraid = True
def get_boot_cmdline(self):
buf = open("/proc/cmdline").read().strip()
args = shlex.split(buf)
for arg in args:
(opt, _equals, val) = arg.partition("=")
if val:
self.boot_cmdline[opt] = val
def update_from_boot_cmdline(self):
self.get_boot_cmdline()
if "nompath" in self.boot_cmdline:
self.multipath = False
if "nodmraid" in self.boot_cmdline:
self.dmraid = False
if "noiswmd" in self.boot_cmdline:
self.noiswmd = True
def update_from_anaconda_flags(self, anaconda_flags):
self.installer_mode = True
self.testing = anaconda_flags.testing
self.automated_install = anaconda_flags.automatedInstall
self.live_install = anaconda_flags.livecdInstall
self.image_install = anaconda_flags.imageInstall
self.selinux = anaconda_flags.selinux
self.gfs2 = "gfs2" in self.boot_cmdline
self.jfs = "jfs" in self.boot_cmdline
self.reiserfs = "reiserfs" in self.boot_cmdline
self.arm_platform = anaconda_flags.armPlatform
self.gpt = anaconda_flags.gpt
self.multipath_friendly_names = anaconda_flags.mpathFriendlyNames
self.allow_degraded_mdraid = anaconda_flags.rescue_mode
self.ibft = anaconda_flags.ibft
self.dmraid = anaconda_flags.dmraid
# We don't want image installs writing backups of the *image* metadata
# into the *host's* /etc/lvm. This can get real messy on build systems.
if self.image_install:
self.lvm_metadata_backup = False
flags = Flags()
|
gpl-2.0
| -8,068,387,037,196,000,000
| 31.957627
| 79
| 0.646953
| false
| 3.846686
| false
| false
| false
|
beaker-project/beaker
|
Server/bkr/server/model/openstack.py
|
1
|
1285
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from sqlalchemy.schema import Column, ForeignKey
from sqlalchemy.types import Integer, Unicode
from sqlalchemy.orm import relationship
from bkr.server.model.base import DeclarativeMappedObject
from bkr.server.model.lab import LabController
from .types import UUID
# Currently Beaker does not understand OpenStack regions, so there should only
# be one row in this table, created by the administrator. In future this can be
# expanded to track multiple regions associated with different lab controllers.
class OpenStackRegion(DeclarativeMappedObject):
__tablename__ = 'openstack_region'
__table_args__ = {'mysql_engine': 'InnoDB'}
id = Column(Integer, autoincrement=True, nullable=False, primary_key=True)
lab_controller_id = Column(Integer, ForeignKey('lab_controller.id',
name='openstack_region_lab_controller_id_fk'), nullable=False)
lab_controller = relationship(LabController, back_populates='openstack_regions')
# NULL ipxe_image_id means not uploaded yet
ipxe_image_id = Column(UUID)
|
gpl-2.0
| 1,555,254,236,307,001,600
| 46.592593
| 84
| 0.768093
| false
| 4.158576
| false
| false
| false
|
talset/monitoring-plugins
|
disk/check_disk.py
|
1
|
4769
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Florian Lambert <flambert@redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Requirments: python
#
import sys
import argparse
import subprocess
VERSION = '1.1'
STATE_TEXT = ['Ok', 'Warning', 'Critical', 'Unknow']
PARSER = argparse.ArgumentParser(description='Disk check recurcive')
PARSER.add_argument("-b", "--base",
type=str,
help='base directory to monitor. For example if you want to monitor only volume mounted under /host/ (Default: /)',
default="/")
PARSER.add_argument("-e", "--excludes",
type=str, nargs='+',
help='List of mountpoint to exclude recurcively ex: /var/lib will exclude /var/lib*',
default=[])
PARSER.add_argument("-w", "--warning",
type=int,
help='Warning value (Default: 85)',
default=85)
PARSER.add_argument("-c", "--critical",
type=int,
help='Critical value (Default: 95)',
default=95)
PARSER.add_argument("-v", "--version",
action='store_true',
help='Print script version')
ARGS = PARSER.parse_args()
def check_df(base,warning,critical,excludes):
STATE_OK = 0
STATE_WARNING = 1
STATE_CRITICAL = 2
STATE_UNKNOWN = 3
STATE = STATE_OK
df_cmd = ("df --exclude-type=tmpfs "
"--exclude-type=devtmpfs "
"--output=source,target,fstype,iused,itotal,ipcent,used,size,pcent "
"--block-size G")
stdout = subprocess.check_output(df_cmd, shell=True).strip().split("\n")
# remove the header output
del stdout[0]
_output_message = []
_disk_ok = []
for line in stdout:
# Exclude filter on target mount point
col = line.split()
# 0: source
# 1: target
# 2: fstype
# 3: iused
# 4: itotal
# 5: ipcent
# 6: used
# 7: size
# 8: pcent
if not is_based(base,col[1]) or is_excluded(excludes,col[1]):
continue
_disk_ok.append(col[1])
# csize: pourcent usage
csize = int(col[8].rstrip('%'))
if csize >= int(critical): # CRITICAL
STATE = STATE_CRITICAL
_output_message.append("Disk Block %s %s Used" % (col[1], col[8]))
elif csize >= int(warning): # WARNING
# Update state warning only if the current is not critical
if STATE < STATE_CRITICAL:
STATE = STATE_WARNING
_output_message.append("Disk Block %s %s Used" % (col[1], col[8]))
# cinode: pourcent usage inode
cinode = int(col[5].rstrip('%'))
if cinode >= int(critical): # CRITICAL
STATE = STATE_CRITICAL
_output_message.append("Disk Inode %s %s Used" % (col[1], col[5]))
elif cinode >= int(warning): # WARNING
# Update state warning only if the current is not critical
if STATE < STATE_CRITICAL:
STATE = STATE_WARNING
_output_message.append("Disk Inode %s %s Used" % (col[1], col[5]))
if STATE == STATE_OK:
output_message = "Disk %s" % (' || '.join(_disk_ok))
else:
output_message = ' || '.join(_output_message)
return output_message,STATE
def is_excluded(excludes,path):
#Check if the mount path is in the excludes
for ex in excludes:
if path.startswith(ex):
return True
return False
def is_based(base,path):
#Check if the mount path is in the base path
if path.startswith(base):
return True
return False
if __name__ == "__main__":
if ARGS.version:
print "version: %s" % (VERSION)
sys.exit(0)
(OUTPUT_MESSAGE,STATE) = check_df(base=ARGS.base,
warning=ARGS.warning,
critical=ARGS.critical,
excludes=ARGS.excludes)
try:
print "%s: %s" % (STATE_TEXT[STATE], OUTPUT_MESSAGE)
sys.exit(STATE)
except ValueError:
print "Oops! cant return STATE"
sys.exit(STATE_UNKNOWN)
|
apache-2.0
| -8,505,828,684,046,612,000
| 32.118056
| 135
| 0.563011
| false
| 3.915435
| false
| false
| false
|
feend78/evennia
|
evennia/typeclasses/migrations/0008_lock_and_perm_rename.py
|
2
|
1717
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2017-01-25 22:30
from __future__ import unicode_literals
import re
from django.db import migrations
def update_perms_and_locks(apps, schema_editor):
# update all permissions
Tag = apps.get_model('typeclasses', 'Tag')
perm_map = {"guests": "guest", "players": "player", "playerhelpers": "helper",
"builders": "builder", "wizards": "admin", "immortals": "developer"}
for perm in Tag.objects.filter(db_tagtype="permission"):
if perm.db_key in perm_map:
perm.db_key = perm_map[perm.db_key]
perm.save(update_fields=("db_key",))
# update all locks on all entities
apps_models = [("objects", "ObjectDB"), ("accounts", "AccountDB"), ("scripts", "ScriptDB"),
("comms", "ChannelDB")]
p_reg = re.compile(r"(?<=perm\()(\w+)(?=\))|(?<=perm_above\()(\w+)(?=\))",
re.IGNORECASE + re.UNICODE)
def _sub(match):
perm = match.group(1)
return perm_map[perm.lower()].capitalize() if (perm and perm.lower() in perm_map) else perm
for app_tuple in apps_models:
TClass = apps.get_model(*app_tuple)
for obj in TClass.objects.filter(db_lock_storage__icontains="perm"):
orig_lock = obj.db_lock_storage
repl_lock = p_reg.sub(_sub, orig_lock)
if repl_lock != orig_lock:
obj.db_lock_storage = repl_lock
obj.save(update_fields=('db_lock_storage',))
class Migration(migrations.Migration):
dependencies = [
('typeclasses', '0007_tag_migrations_may_be_slow'),
]
operations = [
migrations.RunPython(update_perms_and_locks)
]
|
bsd-3-clause
| -2,164,045,350,960,106,800
| 33.34
| 99
| 0.585323
| false
| 3.511247
| false
| false
| false
|
beeryardtech/scripts
|
python/dk_test/scenarios/scenario_common.py
|
2
|
4295
|
__author__ = "Travis Goldie"
__email__ = "test_automation@us.sios.com"
__date__ = "11/14/12"
__copyright__ = "(c) SIOS Technology Corp 2012"
#For unittesting
import os
import sys
from time import sleep
sys.path.insert(0, r"C:\Program Files\dk_test\libs")
from dkconfig import dkconfig
from exectests import checkAppGUICmd, runTests
from winfuncs import deletealljobs, deleteallmirrors
from dklog import markTestScenEvent
class scenario_common(object):
"""
Purpose:
The top level scenario class that will set the basic (or most common)
functionality for each scenario.
Child classes can override the class methods and class properties to
customize for each test scenario
Class Methods:
runTest
executeTest
setUp
tearDown
"""
@classmethod
def __init__(self, _config, _settings, _loadedTestCases, _scenarioName):
#Setup
self.config = _config
self.logger = self.config.logger
self.results = self.config.results
self.settings = _settings
self.repeatcount = int(self.settings["repeat"])
self.sleep = float(self.settings.get("sleep",
self.config.settings["defaultsleep"]))
self.testCases = _loadedTestCases
self.scenarioName = _scenarioName
@classmethod
@markTestScenEvent
def runTest(self):
"""
Purpose:
Use the generated test cases to execute test cases as defined by
the parameter list. All these functions can be overriden in each
scenario module that inherent from this file.
"""
numOfTestCases = len(self.testCases)
numTestsPassed = 0
runCounter = 0
successfulTestCount = 0
#Run setup and prepare environment for test cases
self.setUp()
#Iterate ove the possible test cases. A test case should end in a state
#that will let the next test to run.
for testName, cmdParams in self.testCases.items():
successfulTestCount = 0 #Used to check pass/fail of each scenario
self.logger.info("START {}".format(testName))
self.results("START {}".format(testName))
#TODO Need to do this differently...
if self.repeatcount == -1:
numberOfRuns = 0
try:
while True:
if self.executeTest(testName, cmdParams):
self.results("PASSED {}".format(testName))
successfulTestCount += 1
else:
self.results("FAILED {}".format(testName))
numberOfRuns += 1
sleep(1.0)
except KeyboardInterrupt as err:
self.logger.info("Keyboard Interrupt recieved. Test ended.")
runCounter = numberOfRuns
elif self.repeatcount > 0:
#Repeat each test case based on the repeat count setting
for index in range(self.repeatcount):
successfulTestCount += self.executeTest(testName, cmdParams)
runCounter = self.repeatcount
#Did the test pass or fail?
if successfulTestCount == runCounter:
self.results("PASSED {}".format(testName))
numTestsPassed += 1
else:
self.results("FAILED {}".format(testName))
self.results("Scenario {}: PASSED {}, FAILED {}".
format(self.scenarioName,
numTestsPassed,
(numOfTestCases - numTestsPassed) ))
#After all the tests have run, teardown the environment and clean up.
self.tearDown()
@classmethod
def executeTest(self, testName, cmdParams):
"""
Purpose:
Execute the test commands. This can be overriden in the test case.
"""
successfulTestFlag = True
#Run all the commands and count how many returned correctly
for cmd, params in cmdParams.items():
if not checkAppGUICmd(self.config, testName, cmd, params):
successfulTestFlag = False
sleep(self.sleep)
return successfulTestFlag
@classmethod
def setUp(self):
"""
Purpose:
Prepares the environment for the test case. This can be overriden
in the test case.
"""
deleteallmirrors(self.config)
@classmethod
def tearDown(self):
"""
Purpose:
Cleans up after the completion of a test case. This can be overriden
in the test case.
"""
deleteallmirrors(self.config)
if __name__ == '__main__':
config = dkconfig(r"C:\Program Files\dk_test\scenarios\cluster.ini")
config.runEnvSetup()
runTests(config, r"C:\Program Files\dk_test\testsuite\test_smoke.ini")
|
apache-2.0
| 98,617,112,899,321,780
| 26.071895
| 74
| 0.677299
| false
| 3.460919
| true
| false
| false
|
vergecurrency/VERGE
|
test/functional/wallet_txn_doublespend.py
|
1
|
5648
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet accounts properly when there is a double-spend conflict."""
from decimal import Decimal
from test_framework.test_framework import VergeTestFramework
from test_framework.util import (
assert_equal,
connect_nodes,
disconnect_nodes,
find_output,
sync_blocks,
)
class TxnMallTest(VergeTestFramework):
def set_test_params(self):
self.num_nodes = 4
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
super().setup_network()
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
def run_test(self):
# All nodes should start with 1,250 XSH:
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar addresses:
node0_address_foo = self.nodes[0].getnewaddress()
fund_foo_txid = self.nodes[0].sendtoaddress(node0_address_foo, 1219)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress()
fund_bar_txid = self.nodes[0].sendtoaddress(node0_address_bar, 29)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(),
starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress()
# First: use raw transaction API to send 1240 XSH to node1_address,
# but don't broadcast:
doublespend_fee = Decimal('-.02')
rawtx_input_0 = {}
rawtx_input_0["txid"] = fund_foo_txid
rawtx_input_0["vout"] = find_output(self.nodes[0], fund_foo_txid, 1219)
rawtx_input_1 = {}
rawtx_input_1["txid"] = fund_bar_txid
rawtx_input_1["vout"] = find_output(self.nodes[0], fund_bar_txid, 29)
inputs = [rawtx_input_0, rawtx_input_1]
change_address = self.nodes[0].getnewaddress()
outputs = {}
outputs[node1_address] = 1240
outputs[change_address] = 1248 - 1240 + doublespend_fee
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransactionwithwallet(rawtx)
assert_equal(doublespend["complete"], True)
# Create two spends using 1 50 XSH coin each
txid1 = self.nodes[0].sendtoaddress(node1_address, 40)
txid2 = self.nodes[0].sendtoaddress(node1_address, 20)
# Have node0 mine a block:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50XSH for another
# matured block, minus 40, minus 20, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block:
expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance(), starting_balance - tx1["amount"] - tx2["amount"])
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend and its parents to miner:
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
doublespend_txid = self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
assert_equal(self.nodes[0].gettransaction(doublespend_txid)["confirmations"], 2)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -2)
assert_equal(tx2["confirmations"], -2)
# Node0's total balance should be starting balance, plus 100XSH for
# two more matured blocks, minus 1240 for the double-spend, plus fees (which are
# negative):
expected = starting_balance + 100 - 1240 + fund_foo_tx["fee"] + fund_bar_tx["fee"] + doublespend_fee
assert_equal(self.nodes[0].getbalance(), expected)
# Node1's balance should be its initial balance (1250 for 25 block rewards) plus the doublespend:
assert_equal(self.nodes[1].getbalance(), 1250 + 1240)
if __name__ == '__main__':
TxnMallTest().main()
|
mit
| -443,130,276,645,554,200
| 41.787879
| 111
| 0.633853
| false
| 3.677083
| true
| false
| false
|
NavarraBiomed/clips
|
clips_app/management/commands/test.py
|
1
|
12162
|
from django.core.management.base import BaseCommand, CommandError
from clips_app.models import Case
import django
import csv
def parse_num(value):
if value.isdigit():
return int(value)
else:
return None;
def parse_date(value):
pieces = value.split("/")
try:
return (pieces[2]+"-"+pieces[0]+"-"+pieces[1])
except IndexError:
return None;
class Command(BaseCommand):
help = 'Command test'
def add_arguments(self, parser):
parser.add_argument('file', nargs='+', type = str)
def handle(self, *args, **options):
input_file = options['file'][0]
print("Reading data from " + input_file)
model_to_row = {
'doctor' : 'Medicoresponsable',
'date' : 'Date',
'name' : 'Name',
'id_number' : 'IDnumber',
'age' : 'Age',
'age_interval' : 'Tramos_edad',
'sex' : 'Sex',
'asa' : 'ASA' ,
#'hypertension' : '',
'hb' : 'HB',
'platelets' : 'Platelets',
'inr' : 'INR',
'pt' : 'PT',
'aspirin' : 'Aspirin',
'anticoagulants' : 'Anticoagulants',
'antiplatelet_anticoagulant' : 'Antiplatelet_anticoagulant',
#'heparinbridgetherapy' : '',
#'nombre_p_activo_antiagreg_anticoag' : '',
#'day_of_reintroduction_antiagregant' : '',
'paris_calif' : 'ParisClasif',
'lst_yn' : 'LSTyn',
#'lst_morphology' : '',
'large_nodule_one_cm' : 'LargeNodule1cm',
'demacrated_depressed_area' : 'DemarcatedDepressedArea',
'sclerous_wall_change' : 'SclerousWallChange',
'fold_convergency' : 'FoldConvergency',
'chicken_skin_mucosa_around' : 'ChickenSkinMucosaAround',
'maximum_size_mm' : 'Size.mm', #?
'area_square_cm' : 'Areacm2',
'location' : 'Location',
'ileocecal_valve_involvement' : 'Ileocecalvalveinvolvement',
'high_definition' : 'HighDefinition',
#'histologyigh_definition' : '',
'endoscopemodel' : 'Endoscopemodel',
'nbi' : 'NBI',
'nbi_sano' : 'NBI.Sano',
'nbi_nice' : 'NBI.NICE',
'cromoendoscopy' : 'cromoendoscopy',
'kudo' : 'Kudo',
'prepathologic_endoscopic_diagnostic_a' : 'PrepathologicEndoscopicDiagnosticA',
'prepathologic_endoscopic_diagnostic_b' : 'PrepathologicEndoscopicDiagnosticB',
'correct_dx_adenoma_serrated': 'CorrectDxAdenomaSerrado',
'correct_dx_invasion' : 'CorrectDxInvasiónprofunda',
'histology' : 'Histology',
'histol_simplified' : 'Histol_simplified',
'time_of_procedure_in_mins' : 'Timeofprocedureinmins',
'difficulty_of_emr' : 'DifficultyofEMR',
'accesibility' : 'Accessibility',
'resection' : 'Resection',
'resection_yn' : 'ResectionYN',
'previous_biopsy' : 'Previous.biopsy',
'previous_attempt' : 'Previous.attempt',
'non_lifting_sign' : 'Nonliftingsign',
'technique' : 'Technique',
'technique_two' : 'Technique2',
'limit_marks' : 'LimitMarks',
'injection' : 'Injection',
'adrenaline' : 'Adrenaline',
'endocut' : 'Endocut',
'electrosurgical_generator_model' : 'Electrosurgicalgeneratormodel',
'polyp_retrieval' : 'PolypRetrieval',
'argon_PC' : 'ArgonPC',
'argon_coagulacion' : 'argón_coagulación',
'coagulation_forceps' : 'Coagulationforceps',
'clips' : 'Clipping', #?
#'clips_control_group' : '',
#'clips_tratment_group' : '',
#'not_tired_closure_by' : '',
#'closure_technique' : '',
'number_clips_needed' : 'ClipsNeeded',
'perforation' : 'Perforation',
'surgery_from_endoscopy' : 'Surgeryfromendoscopy',
'surgery_by_complication' : 'Surgerybycomplication',
'bleeding' : 'Bleeding',
#'immediate_bleeding' : '',
'delayed_bleeding' : 'Delayedbleeding',
'bleeding_treatment' : 'BleedingTreatment',
'transfusion' : 'Trasfusion',
'pps' : 'SPP', #?
#'fever' : '',
#'pain_requiring_medical_intervention' : '',
'hospital_stay_by_technique' : 'HospitalStayByTechniche',
'hospital_stay_by_complication' : 'HospitalStayByComplication',
'follow_up_months' : 'FollowUpInMonths',
'successful_treatment' : 'Successfultreatment',
'sedation' : 'Sedation',
'last_date_endoscopic_follow_up' : 'LastDateEndoscopicFollowUp',
'recurrence_three_six_months_control' : 'Recurrence3monthscontrol',
'recurrenec_one_year_control' : 'Recurrence1yearcontrol',
'global_recurrence' : 'Globalrecurrence',
'other_complications_comments' : 'OtherComplicationsComments',
'other_comments' : 'OtherComments'
}
with open(input_file, 'rt') as f:
reader = csv.DictReader(f)
#reader_list = list(reader)
#print(reader_list[0].keys())
for index, row in enumerate(reader):
#row = reader_list[0]
print("-------- Case #"+ str(index)+" ----------")
for field in Case._meta.get_fields():
if type(field) is django.db.models.fields.IntegerField:
try:
row[model_to_row[field.name]] = parse_num(row[model_to_row[field.name]])
except KeyError:
print("KeyError: "+field.name)
elif type(field) is django.db.models.fields.DateField:
try:
row[model_to_row[field.name]] = parse_date(row[model_to_row[field.name]])
except:
print("Date format error in :"+model_to_row[field.name]+ " -> "+row[model_to_row[field.name]])
Case.objects.create(
doctor = row['Medicoresponsable'],
date = row['Date'],
name = row['Name'],
id_number = row['IDnumber'],
age = row['Age'],
age_interval = row['Tramos_edad'],
sex = row['Sex'],
asa = row['ASA'] ,
#hypertension = row[],
hb = row['HB'],
platelets = row['Platelets'],
inr = row['INR'],
pt = row['PT'],
aspirin = row['Aspirin'],
anticoagulants = row['Anticoagulants'],
antiplatelet_anticoagulant = row['Antiplatelet_anticoagulant'],
#heparinbridgetherapy = row[''],
# nombre_p_activo_antiagreg_anticoag = row[''],
# day_of_reintroduction_antiagregant = row[''],
paris_calif = row['ParisClasif'],
lst_yn = row['LSTyn'],
#lst_morphology = row[''],
large_nodule_one_cm = row['LargeNodule1cm'],
demacrated_depressed_area = row['DemarcatedDepressedArea'],
sclerous_wall_change = row['SclerousWallChange'],
fold_convergency = row['FoldConvergency'],
chicken_skin_mucosa_around = row['ChickenSkinMucosaAround'],
maximum_size_mm = row['Size.mm'], #?
area_square_cm = row['Areacm2'],
location = row['Location'],
ileocecal_valve_involvement = row['Ileocecalvalveinvolvement'],
high_definition = row['HighDefinition'],
#histologyigh_definition = row[''],
endoscopemodel = row['Endoscopemodel'],
nbi = row['NBI'],
nbi_sano = row['NBI.Sano'],
nbi_nice = row['NBI.NICE'],
cromoendoscopy = row['cromoendoscopy'],
kudo = row['Kudo'],
prepathologic_endoscopic_diagnostic_a = row['PrepathologicEndoscopicDiagnosticA'],
prepathologic_endoscopic_diagnostic_b = row['PrepathologicEndoscopicDiagnosticB'],
correct_dx_adenoma_serrated= row['CorrectDxAdenomaSerrado'],
correct_dx_invasion = row['CorrectDxInvasiónprofunda'],
histology = row['Histology'],
histol_simplified = row['Histol_simplified'],
time_of_procedure_in_mins = row['Timeofprocedureinmins'],
difficulty_of_emr = row['DifficultyofEMR'],
accesibility = row['Accessibility'],
resection = row['Resection'],
resection_yn = row['ResectionYN'],
previous_biopsy = row['Previous.biopsy'],
previous_attempt = row['Previous.attempt'],
non_lifting_sign = row['Nonliftingsign'],
technique = row['Technique'],
technique_two = row['Technique2'],
limit_marks = row['LimitMarks'],
injection = row['Injection'],
adrenaline = row['Adrenaline'],
endocut = row['Endocut'],
electrosurgical_generator_model = row['Electrosurgicalgeneratormodel'],
polyp_retrieval = row['PolypRetrieval'],
argon_PC = row['ArgonPC'],
argon_coagulacion = row['argón_coagulación'],
coagulation_forceps = row['Coagulationforceps'],
clips = row['Clipping'], #?
#clips_control_group = row[''],
#clips_tratment_group = row[''],
#not_tired_closure_by = row[''],
#closure_technique = row[''],
number_clips_needed = row['ClipsNeeded'],
perforation = row['Perforation'],
surgery_from_endoscopy = row['Surgeryfromendoscopy'],
surgery_by_complication = row['Surgerybycomplication'],
bleeding = row['Bleeding'],
#immediate_bleeding = row[''],
delayed_bleeding = row['Delayedbleeding'],
bleeding_treatment = row['BleedingTreatment'],
transfusion = row['Trasfusion'],
pps = row['SPP'], #?
#fever = row[''],
#pain_requiring_medical_intervention = row[''],
hospital_stay_by_technique = row['HospitalStayByTechniche'],
hospital_stay_by_complication = row['HospitalStayByComplication'],
follow_up_months = row['FollowUpInMonths'],
successful_treatment = row['Successfultreatment'],
sedation = row['Sedation'],
last_date_endoscopic_follow_up = row['LastDateEndoscopicFollowUp'],
recurrence_three_six_months_control = row['Recurrence3monthscontrol'],
recurrenec_one_year_control = row['Recurrence1yearcontrol'],
global_recurrence = row['Globalrecurrence'],
other_complications_comments = row['OtherComplicationsComments'],
other_comments = row['OtherComments']
)
|
gpl-2.0
| 2,221,978,827,972,849,400
| 49.443983
| 122
| 0.492185
| false
| 3.850491
| false
| false
| false
|
yoseforb/lollypop
|
src/sync_mtp.py
|
1
|
12625
|
#!/usr/bin/python
# Copyright (c) 2014-2015 Cedric Bellegarde <cedric.bellegarde@adishatz.org>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import GLib, Gio
from time import sleep
from lollypop.define import Lp
from lollypop.utils import translate_artist_name, debug
class MtpSync:
"""
Init MTP synchronisation
"""
def __init__(self):
self._syncing = False
self._errors = False
self._total = 0 # Total files to sync
self._done = 0 # Handled files on sync
self._fraction = 0.0
self._copied_art_uris = []
############
# Private #
############
"""
Try to execute func 5 times
@param func as function
@param args as tuple
"""
def _retry(self, func, args, t=5):
if t == 0:
self._errors = True
return
try:
func(*args)
except Exception as e:
print("MtpSync::_retry(%s, %s): %s" % (func, args, e))
for a in args:
if isinstance(a, Gio.File):
print(a.get_uri())
sleep(5)
self._retry(func, args, t-1)
"""
Return children uris for uri
@param uri as str
@return [str]
"""
def _get_children_uris(self, uri):
children = []
dir_uris = [uri]
while dir_uris:
uri = dir_uris.pop(0)
d = Gio.File.new_for_uri(uri)
infos = d.enumerate_children(
'standard::name,standard::type',
Gio.FileQueryInfoFlags.NOFOLLOW_SYMLINKS,
None)
for info in infos:
if info.get_file_type() == Gio.FileType.DIRECTORY:
dir_uris.append(uri+'/'+info.get_name())
else:
children.append(uri+'/'+info.get_name())
return children
"""
Sync playlists with device as this
@param playlists as [str]
"""
def _sync(self, playlists):
try:
self._in_thread = True
self._errors = False
self._copied_art_uris = []
sql = Lp.db.get_cursor()
# For progress bar
self._total = 1
self._done = 0
self._fraction = 0.0
# New tracks
for playlist in playlists:
self._fraction = self._done/self._total
self._total += len(Lp.playlists.get_tracks(playlist))
# Old tracks
try:
children = self._get_children_uris(self._uri+'/tracks')
self._total += len(children)
except:
pass
GLib.idle_add(self._update_progress)
# Copy new tracks to device
if self._syncing:
self._copy_to_device(playlists, sql)
# Remove old tracks from device
if self._syncing:
self._remove_from_device(playlists, sql)
# Delete old playlists
d = Gio.File.new_for_uri(self._uri)
infos = d.enumerate_children(
'standard::name',
Gio.FileQueryInfoFlags.NOFOLLOW_SYMLINKS,
None)
for info in infos:
f = info.get_name()
if f.endswith(".m3u") and f[:-4] not in playlists:
uri = self._uri+'/'+f
d = Gio.File.new_for_uri(uri)
self._retry(d.delete, (None,))
except Exception as e:
print("DeviceManagerWidget::_sync(): %s" % e)
self._fraction = 1.0
if self._syncing:
GLib.idle_add(self._view.set_sensitive, True)
GLib.idle_add(self.emit, 'sync-finished')
GLib.idle_add(self._progress.hide)
self._syncing = False
self._in_thread = False
if self._errors:
GLib.idle_add(self._on_errors)
GLib.idle_add(self._on_finished)
"""
Copy file from playlist to device
@param playlists as [str]
@param sql cursor
"""
def _copy_to_device(self, playlists, sql):
for playlist in playlists:
try:
# Create playlist
m3u = Gio.File.new_for_path(
"/tmp/lollypop_%s.m3u" % (playlist,))
self._retry(m3u.replace_contents, (b'#EXTM3U\n', None, False,
Gio.FileCreateFlags.REPLACE_DESTINATION,
None))
stream = m3u.open_readwrite(None)
except Exception as e:
print("DeviceWidget::_copy_to_device(): %s" % e)
m3u = None
stream = None
# Start copying
tracks_id = Lp.playlists.get_tracks_id(playlist, sql)
for track_id in tracks_id:
if not self._syncing:
self._fraction = 1.0
self._in_thread = False
return
album_id = Lp.tracks.get_album_id(track_id, sql)
album_name = Lp.albums.get_name(album_id, sql)
# Sanitize file names as some MTP devices do not like this
# Or this is a Gio/GObject Introspection bug
album_name = "".join([c for c in album_name if c.isalpha()\
or c.isdigit() or c==' ']).rstrip()
artist_name = translate_artist_name(
Lp.albums.get_artist_name(album_id, sql))
# Sanitize file names as some MTP devices do not like this
# Or this is a Gio/GObject Introspection bug
artist_name = "".join([c for c in artist_name if c.isalpha()\
or c.isdigit() or c==' ']).rstrip()
track_path = Lp.tracks.get_path(track_id, sql)
on_device_album_uri = "%s/tracks/%s_%s" %\
(self._uri,
artist_name.lower(),
album_name.lower())
d = Gio.File.new_for_uri(on_device_album_uri)
if not d.query_exists(None):
self._retry(d.make_directory_with_parents, (None,))
# Copy album art
art = Lp.art.get_album_art_path(album_id, sql)
if art:
src_art = Gio.File.new_for_path(art)
art_uri = "%s/cover.jpg" % on_device_album_uri
self._copied_art_uris.append(art_uri)
dst_art = Gio.File.new_for_uri(art_uri)
if not dst_art.query_exists(None):
self._retry(src_art.copy,
(dst_art, Gio.FileCopyFlags.OVERWRITE,
None, None))
track_name = GLib.basename(track_path)
# Sanitize file names as some MTP devices do not like this
# Or this is a Gio/GObject Introspection bug
track_name = "".join([c for c in track_name if c.isalpha()\
or c.isdigit() or c==' ' or c=='.']).rstrip()
src_track = Gio.File.new_for_path(track_path)
info = src_track.query_info('time::modified',
Gio.FileQueryInfoFlags.NONE,
None)
# Prefix track with mtime to make sure updating it later
mtime = info.get_attribute_as_string('time::modified')
dst_uri = "%s/%s_%s" % (on_device_album_uri,
mtime, track_name)
if stream is not None:
line = "tracks/%s_%s/%s_%s\n" %\
(artist_name.lower(),
album_name.lower(),
mtime,
track_name)
self._retry(stream.get_output_stream().write,
(line.encode(encoding='UTF-8'), None))
dst_track = Gio.File.new_for_uri(dst_uri)
if not dst_track.query_exists(None):
self._retry(src_track.copy,
(dst_track, Gio.FileCopyFlags.OVERWRITE,
None, None))
else:
self._done += 1
self._done += 1
self._fraction = self._done/self._total
GLib.idle_add(self._update_progress)
if stream is not None:
stream.close()
if m3u is not None:
dst = Gio.File.new_for_uri(self._uri+'/'+playlist+'.m3u')
self._retry(m3u.move,
(dst, Gio.FileCopyFlags.OVERWRITE, None, None))
"""
Delete files not available in playlist
if sql None, delete all files
@param playlists as [str]
@param sql cursor
"""
def _remove_from_device(self, playlists, sql):
track_uris = []
tracks_id = []
# Get tracks ids
for playlist in playlists:
tracks_id += Lp.playlists.get_tracks_id(playlist, sql)
# Get tracks uris
for track_id in tracks_id:
if not self._syncing:
self._fraction = 1.0
self._in_thread = False
return
album_id = Lp.tracks.get_album_id(track_id, sql)
album_name = Lp.albums.get_name(album_id, sql)
# Sanitize file names as some MTP devices do not like this
# Or this is a Gio/GObject Introspection bug
album_name = "".join([c for c in album_name if c.isalpha()\
or c.isdigit() or c==' ']).rstrip()
artist_name = translate_artist_name(
Lp.albums.get_artist_name(album_id, sql))
# Sanitize file names as some MTP devices do not like this
# Or this is a Gio/GObject Introspection bug
artist_name = "".join([c for c in artist_name if c.isalpha()\
or c.isdigit() or c==' ']).rstrip()
track_path = Lp.tracks.get_path(track_id, sql)
album_uri = "%s/tracks/%s_%s" % (self._uri,
artist_name.lower(),
album_name.lower())
track_name = GLib.basename(track_path)
# Sanitize file names as some MTP devices do not like this
# Or this is a Gio/GObject Introspection bug
track_name = "".join([c for c in track_name if c.isalpha()\
or c.isdigit() or c==' ' or c=='.']).rstrip()
on_disk = Gio.File.new_for_path(track_path)
info = on_disk.query_info('time::modified',
Gio.FileQueryInfoFlags.NONE,
None)
# Prefix track with mtime to make sure updating it later
mtime = info.get_attribute_as_string('time::modified')
dst_uri = "%s/%s_%s" % (album_uri, mtime, track_name)
track_uris.append(dst_uri)
on_mtp_files = self._get_children_uris(self._uri+'/tracks')
# Delete file on device and not in playlists
for uri in on_mtp_files:
if not self._syncing:
self._fraction = 1.0
self._in_thread = False
return
if uri not in track_uris and uri not in self._copied_art_uris:
to_delete = Gio.File.new_for_uri(uri)
self._retry(to_delete.delete, (None,))
self._done += 1
self._fraction = self._done/self._total
GLib.idle_add(self._update_progress)
"""
Clean on finished. Do nothing
"""
def _on_finished(self):
pass
"""
Show something to the user. Do nothing.
"""
def _on_errors(self):
pass
|
gpl-3.0
| 2,710,664,522,675,087,400
| 38.952532
| 77
| 0.493228
| false
| 4.099026
| false
| false
| false
|
NLeSC/pointcloud-benchmark
|
python/pointcloud/postgres/blocks/LoaderOrdered.py
|
1
|
2424
|
#!/usr/bin/env python
################################################################################
# Created by Oscar Martinez #
# o.rubi@esciencecenter.nl #
################################################################################
import os, logging
from pointcloud import pdalops, postgresops
from pointcloud.postgres.blocks.Loader import Loader
class LoaderOrdered(Loader):
def getFileBlockTable(self, index):
return self.blockTable + '_' + str(index)
def process(self):
logging.info('Starting ordered data loading with PDAL (parallel by python) from ' + self.inputFolder + ' to ' + self.dbName)
return self.processMulti(self.inputFiles, self.numProcessesLoad, self.loadFromFile, self.loadFromFileSequential, True)
def loadFromFile(self, index, fileAbsPath):
# Get connection
connection = self.getConnection()
cursor = connection.cursor()
#Create a temporal blocks table for the blocks of the current file
fileBlockTable = self.getFileBlockTable(index)
self.createBlocksTable(cursor, fileBlockTable, self.indexTableSpace) # We use the index table space for the temporal table
# Add point cloud format to poinctcloud_formats table
(columns, pcid, compression) = self.addPCFormat(cursor, self.schemaFile, fileAbsPath, self.srid)
connection.close()
pdalCols = []
for c in cols:
pdalCols.append(self.DM_PDAL[c])
# Get PDAL config and run PDAL
xmlFile = os.path.basename(fileAbsPath) + '.xml'
pdalops.PostgreSQLWriter(xmlFile, fileAbsPath, self.getConnectionString(), pcid, pdalCols, fileBlockTable, self.srid, self.blockSize, compression)
pdalops.executePDAL(xmlFile)
def loadFromFileSequential(self, fileAbsPath, index, numFiles):
fileBlockTable = self.getFileBlockTable(index)
connection = self.getConnection()
cursor = connection.cursor()
# Insert the blocks on the global blocks table (with correct order)
query = "INSERT INTO " + self.blockTable + " (pa) SELECT pa FROM " + fileBlockTable + " ORDER BY id"
postgresops.mogrifyExecute(cursor, query)
# Drop the temporal table
postgresops.dropTable(cursor, fileBlockTable)
connection.close()
|
apache-2.0
| 3,891,968,732,836,190,000
| 49.5
| 154
| 0.616337
| false
| 4.351885
| false
| false
| false
|
rajalokan/glance
|
glance/db/sqlalchemy/alembic_migrations/versions/ocata_expand01_add_visibility.py
|
1
|
5764
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add visibility to images
Revision ID: ocata_expand01
Revises: mitaka02
Create Date: 2017-01-27 12:58:16.647499
"""
from alembic import op
from sqlalchemy import Column, Enum, MetaData, Table
from glance.db import migration
# revision identifiers, used by Alembic.
revision = 'ocata_expand01'
down_revision = 'mitaka02'
branch_labels = migration.EXPAND_BRANCH
depends_on = None
ERROR_MESSAGE = 'Invalid visibility value'
MYSQL_INSERT_TRIGGER = """
CREATE TRIGGER insert_visibility BEFORE INSERT ON images
FOR EACH ROW
BEGIN
-- NOTE(abashmak):
-- The following IF/ELSE block implements a priority decision tree.
-- Strict order MUST be followed to correctly cover all the edge cases.
-- Edge case: neither is_public nor visibility specified
-- (or both specified as NULL):
IF NEW.is_public <=> NULL AND NEW.visibility <=> NULL THEN
SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s';
-- Edge case: both is_public and visibility specified:
ELSEIF NOT(NEW.is_public <=> NULL OR NEW.visibility <=> NULL) THEN
SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s';
-- Inserting with is_public, set visibility accordingly:
ELSEIF NOT NEW.is_public <=> NULL THEN
IF NEW.is_public = 1 THEN
SET NEW.visibility = 'public';
ELSE
SET NEW.visibility = 'shared';
END IF;
-- Inserting with visibility, set is_public accordingly:
ELSEIF NOT NEW.visibility <=> NULL THEN
IF NEW.visibility = 'public' THEN
SET NEW.is_public = 1;
ELSE
SET NEW.is_public = 0;
END IF;
-- Edge case: either one of: is_public or visibility,
-- is explicitly set to NULL:
ELSE
SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s';
END IF;
END;
"""
MYSQL_UPDATE_TRIGGER = """
CREATE TRIGGER update_visibility BEFORE UPDATE ON images
FOR EACH ROW
BEGIN
-- Case: new value specified for is_public:
IF NOT NEW.is_public <=> OLD.is_public THEN
-- Edge case: is_public explicitly set to NULL:
IF NEW.is_public <=> NULL THEN
SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s';
-- Edge case: new value also specified for visibility
ELSEIF NOT NEW.visibility <=> OLD.visibility THEN
SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s';
-- Case: visibility not specified or specified as OLD value:
-- NOTE(abashmak): There is no way to reliably determine which
-- of the above two cases occurred, but allowing to proceed with
-- the update in either case does not break the model for both
-- N and N-1 services.
ELSE
-- Set visibility according to the value of is_public:
IF NEW.is_public <=> 1 THEN
SET NEW.visibility = 'public';
ELSE
SET NEW.visibility = 'shared';
END IF;
END IF;
-- Case: new value specified for visibility:
ELSEIF NOT NEW.visibility <=> OLD.visibility THEN
-- Edge case: visibility explicitly set to NULL:
IF NEW.visibility <=> NULL THEN
SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s';
-- Edge case: new value also specified for is_public
ELSEIF NOT NEW.is_public <=> OLD.is_public THEN
SIGNAL SQLSTATE '45000' SET MESSAGE_TEXT = '%s';
-- Case: is_public not specified or specified as OLD value:
-- NOTE(abashmak): There is no way to reliably determine which
-- of the above two cases occurred, but allowing to proceed with
-- the update in either case does not break the model for both
-- N and N-1 services.
ELSE
-- Set is_public according to the value of visibility:
IF NEW.visibility <=> 'public' THEN
SET NEW.is_public = 1;
ELSE
SET NEW.is_public = 0;
END IF;
END IF;
END IF;
END;
"""
def _add_visibility_column(meta):
enum = Enum('private', 'public', 'shared', 'community', metadata=meta,
name='image_visibility')
enum.create()
v_col = Column('visibility', enum, nullable=True, server_default=None)
op.add_column('images', v_col)
op.create_index('visibility_image_idx', 'images', ['visibility'])
def _add_triggers(engine):
if engine.engine.name == 'mysql':
op.execute(MYSQL_INSERT_TRIGGER % (ERROR_MESSAGE, ERROR_MESSAGE,
ERROR_MESSAGE))
op.execute(MYSQL_UPDATE_TRIGGER % (ERROR_MESSAGE, ERROR_MESSAGE,
ERROR_MESSAGE, ERROR_MESSAGE))
def _change_nullability_and_default_on_is_public(meta):
# NOTE(hemanthm): we mark is_public as nullable so that when new versions
# add data only to be visibility column, is_public can be null.
images = Table('images', meta, autoload=True)
images.c.is_public.alter(nullable=True, server_default=None)
def upgrade():
migrate_engine = op.get_bind()
meta = MetaData(bind=migrate_engine)
_add_visibility_column(meta)
_change_nullability_and_default_on_is_public(meta)
_add_triggers(migrate_engine)
|
apache-2.0
| 7,434,511,919,462,939,000
| 37.172185
| 78
| 0.637058
| false
| 3.897228
| false
| false
| false
|
ahirner/TabulaRazr-OS
|
data_query.py
|
1
|
6364
|
from datetime import date
import dateutil.parser as date_parser
from backend import config
from fuzzywuzzy import fuzz
from itertools import product
# Cascades:
# 1) case sensitive partial ratio on character level with penalty
# 2) case insensitive partial ratio on character level with penalty
# 3) token sorted case insensitive ratio with penalty
FUZZY_INV_CASCADES = 1.0 / 3.0
def fuzzy_str_match(query, string):
score = 1.0
inv_cascades = FUZZY_INV_CASCADES
min_fuzzy_ratio = config["min_fuzzy_ratio"]
query = query.encode('ascii', errors='ignore')
string = string.encode('ascii', errors='ignore')
#Penalize shorter target strings and early exit on null length strings
len_query = len(query)
len_string = len(string.strip())
if not len_string: return None
if not len_query: return score
penalty = min(len_string / float(len_query), 1.0)
fuzzy_partial = (fuzz.partial_ratio(query, string)/100.0) * penalty
#print ("fuzzy_partial of %s vs %s * penalty %.2f" % (query, string, penalty), fuzzy_partial)
if fuzzy_partial > min_fuzzy_ratio:
f_score = score - (1.0 - (fuzzy_partial - (1.0 - min_fuzzy_ratio)) / min_fuzzy_ratio) * inv_cascades
return f_score
score -= inv_cascades
q_l = query.lower()
s_l = string.lower()
fuzzy_partial = (fuzz.partial_ratio(q_l, s_l)/100.0) * penalty
#print ("fuzzy_partial lower_case of %s vs %s * penalty %.2f" % (query, string, penalty), fuzzy_partial)
if fuzzy_partial > min_fuzzy_ratio:
f_score = score - (1.0 - (fuzzy_partial - (1.0 - min_fuzzy_ratio)) / min_fuzzy_ratio) * inv_cascades
return f_score
score -= inv_cascades
fuzzy_partial = (fuzz.partial_token_sort_ratio(q_l, s_l)/100.0) * penalty
#print ("fuzzy_partial token_sort_lower_case of %s vs %s * penalty %.2f" % (query, string, penalty), fuzzy_partial)
if fuzzy_partial > min_fuzzy_ratio:
f_score = score - (1.0 - (fuzzy_partial - (1.0 - min_fuzzy_ratio)) / min_fuzzy_ratio) * inv_cascades
return f_score
return None
#Flatmap from tables to sequence of tuples (confidence, table, row or None, value or None)
def filter_tables(tables, filter_dict, treshold = 0.0, only_max = False):
row = None
value = None
for t in tables:
if 'headers' in filter_dict:
max_conf, index, best_term = None, None, None
terms = filter_dict['headers']['terms']
_threshold = max(treshold, filter_dict['headers']['threshold'])
for term in terms:
if t['headers']:
current_max_conf = (max_conf if only_max else _threshold) or _threshold
scores_indices = ((val, idx) for (idx, val) in enumerate(fuzzy_str_match(term, h) for h in t['headers'] ) )
conf, idx = max(scores_indices)
if conf > max_conf:
max_conf = conf
index = idx
best_term = term
best_header = ""
#Todo: other filter criteria like column names, rows etc. and combinatorial confidence score
if max_conf:
yield max_conf, t, row, value
def get_fuzzy_date(string):
today = date.today()
v_ascii = string.encode("ascii", errors="ignore")
try:
dt = date_parser.parse(v_ascii, fuzzy=True, default=today)
if dt != today:
return dt
except:
return None
def get_first_date(lines, query_string, threshold = 0.4):
for i, l in enumerate(lines):
if fuzzy_str_match(query_string, l) > threshold:
dt = get_fuzzy_date(l)
if dt:
return dt, i, l
def find_row(table, query_string, threshold = 0.4):
#Find first 'other' typed row
try:
index = table['types'].index('other')
except ValueError:
print "no column consisting of mainly string data found"
return None
strings = (s[index]['value'] for s in table['data'])
#query_string can either be a single one or an iterable
if isinstance(query_string, basestring):
query_string = [query_string]
scores_indices = ((val, idx) for (idx, val) in ( (s[0], fuzzy_str_match(qs, s[1])) \
for qs, s in product(query_string, enumerate(strings))) )
val, idx = max(scores_indices)
if val >= threshold:
return table['data'][idx]
else:
return None
def closest_row_numeric_value(table, query_string, threshold = 0.4, raw_cell = False):
row = find_row(table, query_string, threshold)
if row:
for c in row:
if 'type' in c:
if c['type'] in ('integer'):
v = int(c['value'])
return (v, c) if raw_cell else v
elif c['type'] in ('large_num', 'small_float'):
v = float(c['value'].replace(",", ""))
return (v, c) if raw_cell else v
def get_key_values(table, key_queries, threshold = 0.4, raw_cell = False):
return { k : closest_row_numeric_value(table, kk, threshold, raw_cell) for k, kk in key_queries.iteritems() }
def find_column(table, query_string, types=None, subtypes=None, threshold = 0.4):
#Find first column with specific types
columns = []
for i, t in enumerate(zip(table['types'], table['subtypes'])):
t, st = t[0], t[1]
if t in (types or t) and st in (subtypes or st):
if fuzzy_str_match(query_string, table['captions'][i]) > threshold: return i
def filter_time_series(table, query_string, subtypes = ['dollar'], threshold = 0.4):
time_index = find_column(table, "", subtypes=['date', 'year'], threshold=threshold)
value_index = find_column(table, query_string, subtypes=subtypes, threshold=threshold)
for r in table['data']:
dt = get_fuzzy_date(r[time_index]['value'])
if dt:
c = r[value_index]
v = None
if c['type'] in ('integer'):
v = int(c['value'])
elif c['type'] in ('large_num', 'small_float'):
v = float(c['value'].replace(",", ""))
if v: yield dt, v
|
agpl-3.0
| -6,371,302,341,923,677,000
| 38.042945
| 127
| 0.575896
| false
| 3.659574
| false
| false
| false
|
dan-cristian/haiot
|
presence/__init__.py
|
1
|
4256
|
from main.logger_helper import L
from pydispatch import dispatcher
from main import thread_pool, sqlitedb
if sqlitedb:
from storage.sqalc import models
from common import Constant
from presence import presence_bt
from presence import presence_wifi
from storage.model import m
__author__ = 'Dan Cristian<dan.cristian@gmail.com>'
initialised = False
def not_used_record_update(json=''):
# Log.logger.info('Got presence update')
if sqlitedb:
models.Presence().json_to_record_query(json_obj=json)
else:
# fixme
pass
def handle_event_presence_io(gpio_pin_code='', direction='', pin_value='', pin_connected=None):
try:
# Log.logger.info('Presence got event pin {} connected={}'.format(gpio_pin_code, pin_connected))
# skip too many updates, only capture when contact is not connected (for PIR sensors this is alarm)
#if not pin_connected:
if sqlitedb:
zonealarm = models.ZoneAlarm().query_filter_first(
models.ZoneAlarm.gpio_host_name.in_([Constant.HOST_NAME]),
models.ZoneAlarm.gpio_pin_code.in_([gpio_pin_code]))
else:
zonealarm = m.ZoneAlarm.find_one({m.ZoneAlarm.gpio_host_name: Constant.HOST_NAME,
m.ZoneAlarm.gpio_pin_code: gpio_pin_code})
# zone_id = None
# fixme: for now zonealarm holds gpio to zone mapping, should be made more generic
if zonealarm is not None:
zone_id = zonealarm.zone_id
if zone_id is not None:
zone = m.Zone.find_one({m.Zone.id: zone_id})
if zone is not None:
zone_name = zone.name
else:
L.l.warning("Zone not found for presence zoneid={}".format(zone_id))
zone_name = "zone_name not found"
record = m.Presence.find_one({m.Presence.zone_id: zone_id})
if record is None:
record = m.Presence()
record.event_type = zonealarm.sensor_type
record.zone_name = zone_name
# record.event_io_date = utils.get_base_location_now_date()
record.sensor_name = zonealarm.alarm_pin_name
record.is_connected = pin_connected
# Log.logger.info('Presence saving sensor {}'.format(record.sensor_name))
record.save_changed_fields(broadcast=True, persist=True)
else:
L.l.warning('Unable to find presence zone for pin {} in Alarm table'.format(gpio_pin_code))
except Exception as ex:
L.l.critical("Unable to save presence, er={}".format(ex), exc_info=True)
def handle_event_presence_cam(zone_name, cam_name, has_move):
L.l.debug("Got cam event zone {} cam {} move={}".format(zone_name, cam_name, has_move))
zone = m.Zone.find_one({m.Zone.name: zone_name})
if zone is not None:
record = m.Presence().find_one({m.Presence.zone_id: zone.id})
if record is None:
record = m.Presence()
record.event_type = Constant.PRESENCE_TYPE_CAM
record.zone_id = zone.id
record.zone_name = zone_name
# record.event_camera_date = utils.get_base_location_now_date()
record.sensor_name = cam_name
record.is_connected = bool(int(has_move))
L.l.debug("Saving cam event zone {} sensor {} is_conn={} record={}".format(
record.zone_name, record.sensor_name, record.is_connected, record))
record.save_changed_fields(broadcast=True, persist=True)
else:
L.l.warning('Unable to find presence zone for camera zone {}'.format(zone_name))
def unload():
L.l.info('Presence module unloading')
# ...
thread_pool.remove_callable(presence_bt.thread_run)
global initialised
initialised = False
def init():
L.l.debug('Presence module initialising')
thread_pool.add_interval_callable(presence_wifi.thread_run, run_interval_second=20)
dispatcher.connect(handle_event_presence_io, signal=Constant.SIGNAL_GPIO, sender=dispatcher.Any)
dispatcher.connect(handle_event_presence_cam, signal=Constant.SIGNAL_CAMERA, sender=dispatcher.Any)
global initialised
initialised = True
|
gpl-2.0
| -5,611,527,734,522,979,000
| 41.989899
| 107
| 0.626175
| false
| 3.710549
| false
| false
| false
|
pombredanne/quirinus
|
data/codecs/.base/rename.py
|
1
|
1084
|
import os
import re
import codecs
import struct
paths = [p for p in os.listdir(".") if not(p.endswith(".ct"))]
for src in os.listdir("."):
if (src.endswith(".py")):
continue
dest = src.replace(".cpp", ".ct")
with codecs.open(src, "rb") as stream:
data = stream.read()
pattern = re.compile(b"\\s*(0x[0-9A-Ha-h]+),\\s*(0x[0-9A-Ha-h]+),", re.A)
match = pattern.findall(data)
if (match):
name = src.replace(".cpp", "")
name = name.replace("_", "-")
if (name.startswith("MAC")):
name = name.lower()
elif (name.startswith("ATARIST")):
name = "AtariST"
elif (name.startswith("KPS9566")):
name = "KPS 9566"
elif (name.startswith("BIG5")):
name = "Big5"
name = name.encode("UTF-8")
print(name)
with codecs.open(dest, "wb") as stream:
stream.write(name)
stream.write(b'\0')
for pair in match:
byte = int(pair[0], 16)
code = int(pair[1], 16)
stream.write(struct.pack(">I", byte))
stream.write(struct.pack(">I", code))
""_u8
""_u16
""_u32
""_f64
""_f80
""_QF
""_F
|
lgpl-3.0
| 3,352,182,879,483,439,000
| 24.209302
| 75
| 0.558118
| false
| 2.890667
| false
| false
| false
|
sephii/django
|
django/contrib/contenttypes/models.py
|
1
|
7903
|
from __future__ import unicode_literals
from django.apps import apps
from django.db import models
from django.db.utils import OperationalError, ProgrammingError
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import force_text
from django.utils.encoding import python_2_unicode_compatible
class ContentTypeManager(models.Manager):
use_in_migrations = True
# Cache to avoid re-looking up ContentType objects all over the place.
# This cache is shared by all the get_for_* methods.
_cache = {}
def get_by_natural_key(self, app_label, model):
try:
ct = self.__class__._cache[self.db][(app_label, model)]
except KeyError:
ct = self.get(app_label=app_label, model=model)
self._add_to_cache(self.db, ct)
return ct
def _get_opts(self, model, for_concrete_model):
if for_concrete_model:
model = model._meta.concrete_model
elif model._deferred:
model = model._meta.proxy_for_model
return model._meta
def _get_from_cache(self, opts):
key = (opts.app_label, opts.model_name)
return self.__class__._cache[self.db][key]
def get_for_model(self, model, for_concrete_model=True):
"""
Returns the ContentType object for a given model, creating the
ContentType if necessary. Lookups are cached so that subsequent lookups
for the same model don't hit the database.
"""
opts = self._get_opts(model, for_concrete_model)
try:
return self._get_from_cache(opts)
except KeyError:
pass
# The ContentType entry was not found in the cache, therefore we
# proceed to load or create it.
try:
# We start with get() and not get_or_create() in order to use
# the db_for_read (see #20401).
ct = self.get(app_label=opts.app_label, model=opts.model_name)
except (OperationalError, ProgrammingError):
# It's possible to migrate a single app before contenttypes,
# as it's not a required initial dependency (it's contrib!)
# Have a nice error for this.
raise RuntimeError(
"Error creating new content types. Please make sure contenttypes "
"is migrated before trying to migrate apps individually."
)
except self.model.DoesNotExist:
# Not found in the database; we proceed to create it. This time we
# use get_or_create to take care of any race conditions.
ct, created = self.get_or_create(
app_label=opts.app_label,
model=opts.model_name,
defaults={'name': opts.verbose_name_raw},
)
self._add_to_cache(self.db, ct)
return ct
def get_for_models(self, *models, **kwargs):
"""
Given *models, returns a dictionary mapping {model: content_type}.
"""
for_concrete_models = kwargs.pop('for_concrete_models', True)
# Final results
results = {}
# models that aren't already in the cache
needed_app_labels = set()
needed_models = set()
needed_opts = set()
for model in models:
opts = self._get_opts(model, for_concrete_models)
try:
ct = self._get_from_cache(opts)
except KeyError:
needed_app_labels.add(opts.app_label)
needed_models.add(opts.model_name)
needed_opts.add(opts)
else:
results[model] = ct
if needed_opts:
cts = self.filter(
app_label__in=needed_app_labels,
model__in=needed_models
)
for ct in cts:
model = ct.model_class()
if model._meta in needed_opts:
results[model] = ct
needed_opts.remove(model._meta)
self._add_to_cache(self.db, ct)
for opts in needed_opts:
# These weren't in the cache, or the DB, create them.
ct = self.create(
app_label=opts.app_label,
model=opts.model_name,
name=opts.verbose_name_raw,
)
self._add_to_cache(self.db, ct)
results[ct.model_class()] = ct
return results
def get_for_id(self, id):
"""
Lookup a ContentType by ID. Uses the same shared cache as get_for_model
(though ContentTypes are obviously not created on-the-fly by get_by_id).
"""
try:
ct = self.__class__._cache[self.db][id]
except KeyError:
# This could raise a DoesNotExist; that's correct behavior and will
# make sure that only correct ctypes get stored in the cache dict.
ct = self.get(pk=id)
self._add_to_cache(self.db, ct)
return ct
def clear_cache(self):
"""
Clear out the content-type cache. This needs to happen during database
flushes to prevent caching of "stale" content type IDs (see
django.contrib.contenttypes.management.update_contenttypes for where
this gets called).
"""
self.__class__._cache.clear()
def _add_to_cache(self, using, ct):
"""Insert a ContentType into the cache."""
# Note it's possible for ContentType objects to be stale; model_class() will return None.
# Hence, there is no reliance on model._meta.app_label here, just using the model fields instead.
key = (ct.app_label, ct.model)
self.__class__._cache.setdefault(using, {})[key] = ct
self.__class__._cache.setdefault(using, {})[ct.id] = ct
@python_2_unicode_compatible
class ContentType(models.Model):
name = models.CharField(max_length=100)
app_label = models.CharField(max_length=100)
model = models.CharField(_('python model class name'), max_length=100)
objects = ContentTypeManager()
class Meta:
verbose_name = _('content type')
verbose_name_plural = _('content types')
db_table = 'django_content_type'
ordering = ('name',)
unique_together = (('app_label', 'model'),)
def __str__(self):
# self.name is deprecated in favor of using model's verbose_name, which
# can be translated. Formal deprecation is delayed until we have DB
# migration to be able to remove the field from the database along with
# the attribute.
#
# We return self.name only when users have changed its value from the
# initial verbose_name_raw and might rely on it.
model = self.model_class()
if not model or self.name != model._meta.verbose_name_raw:
return self.name
else:
return force_text(model._meta.verbose_name)
def model_class(self):
"Returns the Python model class for this type of content."
try:
return apps.get_model(self.app_label, self.model)
except LookupError:
return None
def get_object_for_this_type(self, **kwargs):
"""
Returns an object of this type for the keyword arguments given.
Basically, this is a proxy around this object_type's get_object() model
method. The ObjectNotExist exception, if thrown, will not be caught,
so code that calls this method should catch it.
"""
return self.model_class()._base_manager.using(self._state.db).get(**kwargs)
def get_all_objects_for_this_type(self, **kwargs):
"""
Returns all objects of this type for the keyword arguments given.
"""
return self.model_class()._base_manager.using(self._state.db).filter(**kwargs)
def natural_key(self):
return (self.app_label, self.model)
|
bsd-3-clause
| -7,295,056,630,378,533,000
| 38.515
| 105
| 0.594205
| false
| 4.157286
| false
| false
| false
|
OpenCV-Python-Tutorials/Filter
|
filter.py
|
1
|
1678
|
import cv2
img_name = raw_input("Enter the image filename:")
img = cv2.imread(img_name,0)
def menu():
print "Select filter type:"
print "Press '1' for Low Pass filter."
print "Press '2' for High Pass filter."
print "Press '3' for Band Pass filter."
print "Press '4' for Notch filter."
print "Press 'q' to quit the program."
menu()
minTh=100
maxTh=200
def lpf(minTh):
l = img.shape[0]
w = img.shape[1]
for x in range(l):
for y in range(w):
if img[x,y]>minTh:
img[x,y]=0
cv2.imshow('Output',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def hpf(maxTh):
l = img.shape[0]
w = img.shape[1]
for x in range(l):
for y in range(w):
if img[x,y]<maxTh:
img[x,y]=0
cv2.imshow('Output',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def bpf():
l = img.shape[0]
w = img.shape[1]
def brf():
l = img.shape[0]
w = img.shape[1]
while(True):
key = raw_input("Enter you choice:")
if key=='1':
cv2.namedWindow('Output',cv2.WINDOW_NORMAL)
cv2.createTrackbar('minTh:','Output',minTh,255,lpf)
print "You selected Low Pass filter"
lpf(minTh)
elif key=='2':
cv2.namedWindow('Output',cv2.WINDOW_NORMAL)
cv2.createTrackbar('maxTh:','Output',maxTh,255,hpf)
print "You selected High Pass filter"
hpf(maxTh)
elif key=='3':
print "You selected Band Pass filter"
bpf()
elif key=='4':
print "You selected Notch filter"
brf()
elif key == 'q':
print "Exit"
break
else:
print "Invalid option"
|
mit
| 1,386,468,332,461,299,000
| 24.815385
| 59
| 0.558999
| false
| 3.107407
| false
| false
| false
|
mldbai/mldb
|
container_files/init/mldb_logger.py
|
1
|
2869
|
#!/usr/bin/env python
# Copyright mldb.ai inc 2016
# Author: Jean Raby <jean@mldb.ai>
# TODO:
# - configure logging so that access/error logs go somewhere else than stderr
import fcntl
import functools
import grp
import jinja2
import os
import pwd
import pytz
import sys
import time
import tornado.web
from tornado.ioloop import IOLoop
from datetime import datetime
from collections import namedtuple, deque
try:
from mldb_logger_utils import RUNAS, HTTP_LISTEN_PORT
except NameError:
# provide defaults if templating didn't run
RUNAS = "nobody"
HTTP_LISTEN_PORT = 1234
LOGBUFSIZE = 8192
LogLine = namedtuple('LogLine', ['dt', 'data', ])
LOGS_MLDB_TEMPLATE = \
"""
<html><body>
<pre>
{%- for l in logbuf -%}
{{l.dt.strftime(timeformat)}} {{l.data}}
{%- endfor %}
</pre>
<a name="end"></a>
</body></html>
"""
def droppriv():
if os.getuid() != 0:
return # not root?
new_uid = pwd.getpwnam(RUNAS).pw_uid
new_gid = grp.getgrnam(RUNAS).gr_gid
os.setgroups([])
os.setgid(new_gid)
os.setuid(new_uid)
old_umask = os.umask(077)
def stdin_ready(f, logbuf, fd, events):
if events & IOLoop.READ:
try:
for line in f:
logline = LogLine(dt=datetime.now(pytz.utc), data=line.decode('utf8', 'replace'))
logbuf.append(logline)
sys.stdout.write(line)
# line buffering is needed to make sure message are emitted in realtime
# simulate that by flushing every line...
sys.stdout.flush()
except IOError:
pass # If we get a EWOULDBLOCK, continue. EOF handled below
if events & IOLoop.ERROR:
exit(0)
class LogsMldbHandler(tornado.web.RequestHandler):
def get(self):
""" Sends the last n lines from logbuf, or all of it if n is not set """
n = self.get_argument("n", default=None)
try:
timeformat = "%FT%T.%f%z"
if logbuf[0].dt.tzname() == "UTC":
timeformat = "%FT%T.%fZ"
except IndexError:
pass # don't care, logbuf is probably empty
env = { "timeformat": timeformat,
"logbuf": list(logbuf)[-int(n):] if n else logbuf
}
out = jinja2.Environment().from_string(LOGS_MLDB_TEMPLATE).render(**env)
self.set_header('Content-Type', 'text/html')
self.write(out)
if __name__ == "__main__":
droppriv() # Early on, we don't need privileges for anything.
logbuf = deque(maxlen=LOGBUFSIZE)
io_loop = IOLoop.current()
# set stdin to non blocking mode for use with tornado
fl = fcntl.fcntl(sys.stdin.fileno(), fcntl.F_GETFL)
fcntl.fcntl(sys.stdin.fileno(), fcntl.F_SETFL, fl | os.O_NONBLOCK)
callback = functools.partial(stdin_ready, sys.stdin, logbuf)
io_loop.add_handler(sys.stdin.fileno(), callback,
io_loop.READ | io_loop.ERROR)
app = tornado.web.Application([ ("/logs/mldb", LogsMldbHandler) ])
app.listen(HTTP_LISTEN_PORT)
io_loop.start()
|
apache-2.0
| 2,752,010,964,470,450,000
| 24.389381
| 89
| 0.654932
| false
| 3.149286
| false
| false
| false
|
davibe/pygobject
|
tests/test_repository.py
|
1
|
17918
|
# -*- Mode: Python; py-indent-offset: 4 -*-
# vim: tabstop=4 shiftwidth=4 expandtab
#
# Copyright (C) 2013 Simon Feltman <sfeltman@gnome.org>
#
# test_repository.py: Test for the GIRepository module
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
# USA
import unittest
import collections
import gi._gi as GIRepository
from gi.module import repository as repo
from gi.repository import GObject
from gi.repository import GLib
from gi.repository import GIMarshallingTests
from gi.repository import GIRepository as IntrospectedRepository
try:
import cairo
cairo
has_cairo = True
except ImportError:
has_cairo = False
def find_child_info(info, getter_name, name):
getter = getattr(info, getter_name)
for child in getter():
if child.get_name() == name:
return child
else:
raise ValueError('child info %s not found' % name)
class Test(unittest.TestCase):
def setUp(self):
repo.require('GObject')
repo.require('GIMarshallingTests')
def test_arg_info(self):
func_info = repo.find_by_name('GIMarshallingTests', 'array_fixed_out_struct')
args = func_info.get_arguments()
self.assertTrue(len(args), 1)
arg = args[0]
self.assertEqual(arg.get_container(), func_info)
self.assertEqual(arg.get_direction(), GIRepository.Direction.OUT)
self.assertEqual(arg.get_name(), 'structs')
self.assertEqual(arg.get_namespace(), 'GIMarshallingTests')
self.assertFalse(arg.is_caller_allocates())
self.assertFalse(arg.is_optional())
self.assertFalse(arg.is_return_value())
self.assertFalse(arg.may_be_null())
self.assertEqual(arg.get_destroy(), -1)
self.assertEqual(arg.get_ownership_transfer(), GIRepository.Transfer.NOTHING)
self.assertEqual(arg.get_scope(), GIRepository.ScopeType.INVALID)
self.assertEqual(arg.get_type().get_tag(), GIRepository.TypeTag.ARRAY)
def test_base_info(self):
info = repo.find_by_name('GIMarshallingTests', 'Object')
self.assertEqual(info.__name__, 'Object')
self.assertEqual(info.get_name(), 'Object')
self.assertEqual(info.__module__, 'gi.repository.GIMarshallingTests')
self.assertEqual(info.get_name_unescaped(), 'Object')
self.assertEqual(info.get_namespace(), 'GIMarshallingTests')
self.assertEqual(info.get_container(), None)
info2 = repo.find_by_name('GIMarshallingTests', 'Object')
self.assertFalse(info is info2)
self.assertEqual(info, info2)
self.assertTrue(info.equal(info2))
def test_object_info(self):
info = repo.find_by_name('GIMarshallingTests', 'Object')
self.assertEqual(info.get_parent(), repo.find_by_name('GObject', 'Object'))
self.assertTrue(isinstance(info.get_methods(), collections.Iterable))
self.assertTrue(isinstance(info.get_fields(), collections.Iterable))
self.assertTrue(isinstance(info.get_interfaces(), collections.Iterable))
self.assertTrue(isinstance(info.get_constants(), collections.Iterable))
self.assertTrue(isinstance(info.get_vfuncs(), collections.Iterable))
self.assertTrue(isinstance(info.get_properties(), collections.Iterable))
self.assertFalse(info.get_abstract())
self.assertEqual(info.get_class_struct(), repo.find_by_name('GIMarshallingTests', 'ObjectClass'))
self.assertEqual(info.get_type_name(), 'GIMarshallingTestsObject')
self.assertEqual(info.get_type_init(), 'gi_marshalling_tests_object_get_type')
self.assertFalse(info.get_fundamental())
self.assertEqual(info.get_parent(), repo.find_by_name('GObject', 'Object'))
def test_registered_type_info(self):
info = repo.find_by_name('GIMarshallingTests', 'Object')
# Call these from the class because GIObjectInfo overrides them
self.assertEqual(GIRepository.RegisteredTypeInfo.get_g_type(info),
GObject.type_from_name('GIMarshallingTestsObject'))
self.assertEqual(GIRepository.RegisteredTypeInfo.get_type_name(info),
'GIMarshallingTestsObject')
self.assertEqual(GIRepository.RegisteredTypeInfo.get_type_init(info),
'gi_marshalling_tests_object_get_type')
@unittest.skipUnless(has_cairo, 'Regress needs cairo')
def test_fundamental_object_info(self):
repo.require('Regress')
info = repo.find_by_name('Regress', 'TestFundamentalObject')
self.assertTrue(info.get_abstract())
self.assertTrue(info.get_fundamental())
self.assertEqual(info.get_ref_function(), 'regress_test_fundamental_object_ref')
self.assertEqual(info.get_unref_function(), 'regress_test_fundamental_object_unref')
self.assertEqual(info.get_get_value_function(), 'regress_test_value_get_fundamental_object')
self.assertEqual(info.get_set_value_function(), 'regress_test_value_set_fundamental_object')
def test_interface_info(self):
info = repo.find_by_name('GIMarshallingTests', 'Interface')
self.assertTrue(isinstance(info.get_methods(), collections.Iterable))
self.assertTrue(isinstance(info.get_vfuncs(), collections.Iterable))
self.assertTrue(isinstance(info.get_constants(), collections.Iterable))
self.assertTrue(isinstance(info.get_prerequisites(), collections.Iterable))
self.assertTrue(isinstance(info.get_properties(), collections.Iterable))
self.assertTrue(isinstance(info.get_signals(), collections.Iterable))
method = info.find_method('test_int8_in')
vfunc = info.find_vfunc('test_int8_in')
self.assertEqual(method.get_name(), 'test_int8_in')
self.assertEqual(vfunc.get_invoker(), method)
self.assertEqual(method.get_vfunc(), vfunc)
iface = info.get_iface_struct()
self.assertEqual(iface, repo.find_by_name('GIMarshallingTests', 'InterfaceIface'))
def test_struct_info(self):
info = repo.find_by_name('GIMarshallingTests', 'InterfaceIface')
self.assertTrue(isinstance(info, GIRepository.StructInfo))
self.assertTrue(isinstance(info.get_fields(), collections.Iterable))
self.assertTrue(isinstance(info.get_methods(), collections.Iterable))
self.assertTrue(isinstance(info.get_size(), int))
self.assertTrue(isinstance(info.get_alignment(), int))
self.assertTrue(info.is_gtype_struct())
self.assertFalse(info.is_foreign())
def test_enum_info(self):
info = repo.find_by_name('GIMarshallingTests', 'Enum')
self.assertTrue(isinstance(info, GIRepository.EnumInfo))
self.assertTrue(isinstance(info.get_values(), collections.Iterable))
self.assertTrue(isinstance(info.get_methods(), collections.Iterable))
self.assertFalse(info.is_flags())
self.assertTrue(info.get_storage_type() > 0) # might be platform dependent
def test_union_info(self):
info = repo.find_by_name('GIMarshallingTests', 'Union')
self.assertTrue(isinstance(info, GIRepository.UnionInfo))
self.assertTrue(isinstance(info.get_fields(), collections.Iterable))
self.assertTrue(isinstance(info.get_methods(), collections.Iterable))
def test_type_info(self):
func_info = repo.find_by_name('GIMarshallingTests', 'array_fixed_out_struct')
arg_info, = func_info.get_arguments()
type_info = arg_info.get_type()
self.assertTrue(type_info.is_pointer())
self.assertEqual(type_info.get_tag(), GIRepository.TypeTag.ARRAY)
self.assertEqual(type_info.get_tag_as_string(), 'array')
self.assertEqual(type_info.get_param_type(0).get_tag(),
GIRepository.TypeTag.INTERFACE)
self.assertEqual(type_info.get_param_type(0).get_interface(),
repo.find_by_name('GIMarshallingTests', 'SimpleStruct'))
self.assertEqual(type_info.get_interface(), None)
self.assertEqual(type_info.get_array_length(), -1)
self.assertEqual(type_info.get_array_fixed_size(), 2)
self.assertFalse(type_info.is_zero_terminated())
self.assertEqual(type_info.get_array_type(), GIRepository.ArrayType.C)
def test_field_info(self):
info = repo.find_by_name('GIMarshallingTests', 'InterfaceIface')
field = find_child_info(info, 'get_fields', 'test_int8_in')
self.assertEqual(field.get_name(), 'test_int8_in')
self.assertTrue(field.get_flags() & GIRepository.FieldInfoFlags.IS_READABLE)
self.assertFalse(field.get_flags() & GIRepository.FieldInfoFlags.IS_WRITABLE)
self.assertEqual(field.get_type().get_tag(), GIRepository.TypeTag.INTERFACE)
# don't test actual values because that might fail with architecture differences
self.assertTrue(isinstance(field.get_size(), int))
self.assertTrue(isinstance(field.get_offset(), int))
def test_property_info(self):
info = repo.find_by_name('GIMarshallingTests', 'PropertiesObject')
prop = find_child_info(info, 'get_properties', 'some-object')
flags = GObject.ParamFlags.READABLE | GObject.ParamFlags.WRITABLE | GObject.ParamFlags.CONSTRUCT
self.assertEqual(prop.get_flags(), flags)
self.assertEqual(prop.get_type().get_tag(), GIRepository.TypeTag.INTERFACE)
self.assertEqual(prop.get_type().get_interface(),
repo.find_by_name('GObject', 'Object'))
self.assertEqual(prop.get_ownership_transfer(), GIRepository.Transfer.NOTHING)
def test_callable_info(self):
func_info = repo.find_by_name('GIMarshallingTests', 'array_fixed_out_struct')
self.assertTrue(hasattr(func_info, 'invoke'))
self.assertTrue(isinstance(func_info.get_arguments(), collections.Iterable))
self.assertEqual(func_info.get_caller_owns(), GIRepository.Transfer.NOTHING)
self.assertFalse(func_info.may_return_null())
self.assertEqual(func_info.get_return_type().get_tag(), GIRepository.TypeTag.VOID)
self.assertRaises(AttributeError, func_info.get_return_attribute, '_not_an_attr')
@unittest.expectedFailure # https://bugzilla.gnome.org/show_bug.cgi?id=709462
@unittest.skipUnless(has_cairo, 'Regress needs cairo')
def test_signal_info(self):
repo.require('Regress')
info = repo.find_by_name('Regress', 'TestObj')
sig_info = find_child_info(info, 'get_signals', 'test')
sig_flags = GObject.SignalFlags.RUN_LAST | \
GObject.SignalFlags.NO_RECURSE | GObject.SignalFlags.NO_HOOKS
self.assertTrue(sig_info is not None)
self.assertTrue(isinstance(sig_info, GIRepository.CallableInfo))
self.assertTrue(isinstance(sig_info, GIRepository.SignalInfo))
self.assertEqual(sig_info.get_name(), 'test')
self.assertEqual(sig_info.get_class_closure(), None)
self.assertFalse(sig_info.true_stops_emit())
self.assertEqual(sig_info.get_flags(), sig_flags)
@unittest.expectedFailure # https://bugzilla.gnome.org/show_bug.cgi?id=709462
@unittest.skipUnless(has_cairo, 'Regress needs cairo')
def test_notify_signal_info_with_obj(self):
repo.require('Regress')
info = repo.find_by_name('Regress', 'TestObj')
sig_info = find_child_info(info, 'get_signals', 'sig-with-array-prop')
sig_flags = GObject.SignalFlags.RUN_LAST
self.assertTrue(sig_info is not None)
self.assertTrue(isinstance(sig_info, GIRepository.CallableInfo))
self.assertTrue(isinstance(sig_info, GIRepository.SignalInfo))
self.assertEqual(sig_info.get_name(), 'sig-with-array-prop')
self.assertEqual(sig_info.get_class_closure(), None)
self.assertFalse(sig_info.true_stops_emit())
self.assertEqual(sig_info.get_flags(), sig_flags)
def test_object_constructor(self):
info = repo.find_by_name('GIMarshallingTests', 'Object')
method = find_child_info(info, 'get_methods', 'new')
self.assertTrue(isinstance(method, GIRepository.CallableInfo))
self.assertTrue(isinstance(method, GIRepository.FunctionInfo))
self.assertTrue(method in info.get_methods())
self.assertEqual(method.get_name(), 'new')
self.assertFalse(method.is_method())
self.assertTrue(method.is_constructor())
self.assertEqual(method.get_symbol(), 'gi_marshalling_tests_object_new')
flags = method.get_flags()
self.assertFalse(flags & GIRepository.FunctionInfoFlags.IS_METHOD)
self.assertTrue(flags & GIRepository.FunctionInfoFlags.IS_CONSTRUCTOR)
self.assertFalse(flags & GIRepository.FunctionInfoFlags.IS_GETTER)
self.assertFalse(flags & GIRepository.FunctionInfoFlags.IS_SETTER)
self.assertFalse(flags & GIRepository.FunctionInfoFlags.WRAPS_VFUNC)
self.assertFalse(flags & GIRepository.FunctionInfoFlags.THROWS)
def test_method_info(self):
info = repo.find_by_name('GIMarshallingTests', 'Object')
method = find_child_info(info, 'get_methods', 'vfunc_return_value_only')
self.assertTrue(isinstance(method, GIRepository.CallableInfo))
self.assertTrue(isinstance(method, GIRepository.FunctionInfo))
self.assertTrue(method in info.get_methods())
self.assertEqual(method.get_name(), 'vfunc_return_value_only')
self.assertFalse(method.is_constructor())
self.assertEqual(method.get_symbol(), 'gi_marshalling_tests_object_vfunc_return_value_only')
self.assertTrue(method.is_method())
flags = method.get_flags()
self.assertTrue(flags & GIRepository.FunctionInfoFlags.IS_METHOD)
self.assertFalse(flags & GIRepository.FunctionInfoFlags.IS_CONSTRUCTOR)
self.assertFalse(flags & GIRepository.FunctionInfoFlags.IS_GETTER)
self.assertFalse(flags & GIRepository.FunctionInfoFlags.IS_SETTER)
self.assertFalse(flags & GIRepository.FunctionInfoFlags.WRAPS_VFUNC)
self.assertFalse(flags & GIRepository.FunctionInfoFlags.THROWS)
def test_vfunc_info(self):
info = repo.find_by_name('GIMarshallingTests', 'Object')
invoker = find_child_info(info, 'get_methods', 'vfunc_return_value_only')
vfunc = find_child_info(info, 'get_vfuncs', 'vfunc_return_value_only')
self.assertTrue(isinstance(vfunc, GIRepository.CallableInfo))
self.assertTrue(isinstance(vfunc, GIRepository.VFuncInfo))
self.assertEqual(vfunc.get_name(), 'vfunc_return_value_only')
self.assertEqual(vfunc.get_invoker(), invoker)
self.assertEqual(invoker, info.find_method('vfunc_return_value_only'))
self.assertEqual(vfunc.get_flags(), 0)
self.assertEqual(vfunc.get_offset(), 0xFFFF) # unknown offset
self.assertEqual(vfunc.get_signal(), None)
def test_flags_double_registration_error(self):
# a warning is printed for double registration and pygobject will
# also raise a RuntimeError.
GIMarshallingTests.NoTypeFlags # cause flags registration
info = repo.find_by_name('GIMarshallingTests', 'NoTypeFlags')
old_mask = GLib.log_set_always_fatal(GLib.LogLevelFlags.LEVEL_ERROR)
try:
self.assertRaises(RuntimeError,
GIRepository.flags_register_new_gtype_and_add,
info)
finally:
GLib.log_set_always_fatal(old_mask)
def test_enum_double_registration_error(self):
# a warning is printed for double registration and pygobject will
# also raise a RuntimeError.
GIMarshallingTests.Enum # cause enum registration
info = repo.find_by_name('GIMarshallingTests', 'Enum')
old_mask = GLib.log_set_always_fatal(GLib.LogLevelFlags.LEVEL_ERROR)
try:
self.assertRaises(RuntimeError,
GIRepository.enum_register_new_gtype_and_add,
info)
finally:
GLib.log_set_always_fatal(old_mask)
def test_enums(self):
self.assertTrue(hasattr(GIRepository, 'Direction'))
self.assertTrue(hasattr(GIRepository, 'Transfer'))
self.assertTrue(hasattr(GIRepository, 'ArrayType'))
self.assertTrue(hasattr(GIRepository, 'ScopeType'))
self.assertTrue(hasattr(GIRepository, 'VFuncInfoFlags'))
self.assertTrue(hasattr(GIRepository, 'FieldInfoFlags'))
self.assertTrue(hasattr(GIRepository, 'FunctionInfoFlags'))
self.assertTrue(hasattr(GIRepository, 'TypeTag'))
self.assertTrue(hasattr(GIRepository, 'InfoType'))
def test_introspected_argument_info(self):
self.assertTrue(isinstance(IntrospectedRepository.Argument.__info__,
GIRepository.UnionInfo))
arg = IntrospectedRepository.Argument()
self.assertTrue(isinstance(arg.__info__, GIRepository.UnionInfo))
old_info = IntrospectedRepository.Argument.__info__
IntrospectedRepository.Argument.__info__ = 'not an info'
self.assertRaises(TypeError, IntrospectedRepository.Argument)
IntrospectedRepository.Argument.__info__ = old_info
if __name__ == '__main__':
unittest.main()
|
lgpl-2.1
| -2,148,817,928,267,952,600
| 49.331461
| 105
| 0.683726
| false
| 3.723608
| true
| false
| false
|
Dolyphin/FSI_1D
|
initialize_data_structure.py
|
1
|
1765
|
# % ---------------------------------
# % filename: initialize_data_structure.py
# %
# % we set the physical data of the structure.
# %============================================================
# % physical data of the structure
# %===========================================================
import numpy as np
from initialize_data_fluid import *
vprel=np.zeros((2,1))
vprel[0] = 1e7 #% spring rigidity
vprel[1]= 100 #% mass of the piston
Lsp0 = 1.2 #% length of the spring (unstretched)
Lspe = Lsp0-(pres_init0-p_ext)*A/vprel[0] #% length at equilibrium
if(Lspe<=0):
print(1,'Length of the spring at equilibrium Lspe= %g! meters ! \n',Le)
# % ----------------------------------
# % We compute the natural period of the (mass+spring) system
# %
omega0 = np.sqrt(vprel[0]/vprel[1]) #% natural pulsation
freq0 = omega0/(2*np.pi) #% natural frequency
T0 = 1/freq0 #% natural period
#%
print(1,'Piston mass= %g kg \n',vprel[1])
print(1,'Spring rigidity= %g N/m \n',vprel[0])
print(1,'Natural frequency of the mass-spring system= %g Hz \n\n',freq0)
# % ============================================================
# % Data initialization for the structure
# % ===========================================================
# % beware of the sign of U0
# % u_t is the current displacement of the piston set to the initial displacement
# % u_dot_t is the current velocity of the piston
# % u_double_dot_t is the current acceleration of the piston
# %
u_t = U0
u_dot_t = 0
vsols0 = u_t
# %
# % ------------------------
# % initialization of the acceleration
vfg0 = (vpres[nnt-1]-0*pres_init)*A
u_double_dot_t = (vfg0+vprel[0]*(Lspe - u_t - Lsp0))/vprel[1]
|
gpl-3.0
| 3,326,060,273,394,111,000
| 35.553191
| 81
| 0.504816
| false
| 3.006814
| false
| false
| false
|
dhellmann/athensdocket
|
docket/server/browse.py
|
1
|
2993
|
import calendar
import datetime
from .app import app, mongo
from .filters import date
from .nav import set_navbar_active
from flask import render_template, g
from flask.ext.pymongo import ASCENDING
@app.route('/browse')
@set_navbar_active
def browse():
locations = sorted(mongo.db.cases.distinct('location'))
return render_template('browse.html',
locations=locations,
)
@app.route('/browse/date')
@app.route('/browse/date/<int:year>')
@app.route('/browse/date/<int:year>/<int:month>')
@app.route('/browse/date/<int:year>/<int:month>/<int:day>')
def browse_date(year=None, month=None, day=None):
g.navbar_active = 'browse'
if month and day:
first_day = datetime.datetime(year, month, day, 0, 0, 0)
last_day = first_day + datetime.timedelta(days=1)
date_range = date(first_day)
elif month:
first_day = datetime.datetime(year, month, 1, 0, 0, 0)
weekday, num_days = calendar.monthrange(year, month)
last_day = first_day + datetime.timedelta(days=num_days)
date_range = '%s-%02d' % (year, month)
elif year:
first_day = datetime.datetime(year, 1, 1, 0, 0, 0)
last_day = datetime.datetime(year + 1, 1, 1, 0, 0, 0)
date_range = unicode(year)
else:
# Show the list of years and months
books = mongo.db.books.find()
years = sorted(set(b['year'] for b in books))
return render_template('browse_date.html',
years=years,
)
app.logger.debug('first_day=%s, last_day=%s', first_day, last_day)
cases = mongo.db.cases.find({'date': {'$gte': first_day,
'$lt': last_day,
},
},
sort=[('date', ASCENDING)],
)
return render_template('browse_date_cases.html',
date_range=date_range,
cases=cases,
year=year,
month=month,
day=day,
)
@app.route('/browse/location')
@app.route('/browse/location/<location>')
def browse_location(location=None):
if location:
cases = mongo.db.cases.find({'location': location,
},
sort=[('date', ASCENDING)],
)
return render_template('browse_location_cases.html',
location=location,
cases=cases,
)
else:
# Show the list of locations
locations = sorted(mongo.db.cases.distinct('location'))
return render_template('browse_location.html',
locations=locations,
)
|
apache-2.0
| 2,354,671,943,219,793,000
| 36.4125
| 70
| 0.495155
| false
| 4.31268
| false
| false
| false
|
sprymix/importkit
|
importkit/utils/adapter.py
|
1
|
4422
|
##
# Copyright (c) 2008-2013 Sprymix Inc.
# All rights reserved.
#
# See LICENSE for details.
##
class AdapterError(Exception):
pass
class Adapter(type):
adapters = {}
instance_adapters = {}
def __new__(mcls, name, bases, clsdict, *, adapts=None,
adapts_instances_of=None, pure=False,
adapterargs=None, **kwargs):
if adapts is not None and adapts_instances_of is not None:
msg = 'adapter class: adapts and adapts_instances_of args are ' + \
'mutually exclusive'
raise AdapterError(msg)
collection = None
if adapts is not None and not pure:
bases = bases + (adapts,)
if adapts_instances_of is not None:
pure = True
adapts = adapts_instances_of
collection = Adapter.instance_adapters
else:
collection = Adapter.adapters
result = super().__new__(mcls, name, bases, clsdict, **kwargs)
if adapts is not None:
assert issubclass(mcls, Adapter) and mcls is not Adapter
registry_key = mcls.get_registry_key(adapterargs)
try:
adapters = collection[registry_key]
except KeyError:
adapters = collection[registry_key] = {}
mcls.register_adapter(adapters, adapts, result)
result.__sx_adaptee__ = adapts
return result
def __init__(cls, name, bases, clsdict, *, adapts=None,
adapts_instances_of=None, pure=False,
adapterargs=None, **kwargs):
super().__init__(name, bases, clsdict, **kwargs)
@classmethod
def register_adapter(mcls, registry, adaptee, adapter):
assert adaptee not in registry
registry[adaptee] = adapter
@classmethod
def match_adapter(mcls, obj, adaptee, adapter):
if issubclass(obj, adapter) and obj is not adapter:
return obj
elif issubclass(obj, adaptee):
return adapter
@classmethod
def _get_adapter(mcls, obj, reversed_mro, collection, kwargs):
registry_key = mcls.get_registry_key(kwargs)
adapters = collection.get(registry_key)
if adapters is None:
return
result = None
seen = set()
for base in reversed_mro:
for adaptee, adapter in adapters.items():
found = mcls.match_adapter(base, adaptee, adapter)
if found and found not in seen:
result = found
seen.add(found)
if result is not None:
return result
@classmethod
def get_adapter(mcls, obj, **kwargs):
if isinstance(obj, type):
collection = Adapter.adapters
mro = obj.__mro__
else:
collection = Adapter.instance_adapters
mro = type(obj).__mro__
reversed_mro = tuple(reversed(mro))
result = mcls._get_adapter(obj, reversed_mro, collection, kwargs)
if result is not None:
return result
for mc in mcls.__subclasses__(mcls):
result = mc._get_adapter(obj, reversed_mro, collection, kwargs)
if result is not None:
return result
@classmethod
def adapt(mcls, obj):
adapter = mcls.get_adapter(obj.__class__)
if adapter is None:
raise AdapterError('could not find {}.{} adapter for {}'.format(
mcls.__module__, mcls.__name__,
obj.__class__.__name__))
elif adapter is not obj.__class__:
return adapter.adapt(obj)
else:
return obj
@classmethod
def get_registry_key(mcls, adapterargs):
if adapterargs:
return (mcls, frozenset(adapterargs.items()))
else:
return mcls
def get_adaptee(cls):
return cls.__sx_adaptee__
class MultiAdapter(Adapter):
@classmethod
def register_adapter(mcls, registry, adaptee, adapter):
try:
registry[adaptee] += (adapter,)
except KeyError:
registry[adaptee] = (adapter,)
@classmethod
def match_adapter(mcls, obj, adaptee, adapter):
if issubclass(obj, adapter) and obj not in adapter:
return (obj,)
elif issubclass(obj, adaptee):
return adapter
|
mit
| -4,098,590,764,919,314,400
| 28.878378
| 79
| 0.558345
| false
| 4.211429
| false
| false
| false
|
elki-project/elki
|
addons/joglvis/src-manual/build.py
|
1
|
1307
|
#!/usr/bin/python
from lxml import etree
import gzip, re, copy, tempfile, subprocess, os
SVG_NAMESPACE="http://www.w3.org/2000/svg"
INKSCAPE_NAMESPACE="http://www.inkscape.org/namespaces/inkscape"
_safe = re.compile("^[A-Za-z]+$")
sizes=[64,32,16,8,4]
tree = etree.parse(gzip.open("Markers.svgz"))
labels = etree.ETXPath("//{%s}g/@{%s}label" % (SVG_NAMESPACE, INKSCAPE_NAMESPACE))(tree)
for l in labels:
if not _safe.match(l): raise Exception("Label not safe: "+l)
ctree = copy.deepcopy(tree)
layers = etree.ETXPath("//{%s}g[./@{%s}label]" % (SVG_NAMESPACE, INKSCAPE_NAMESPACE))(ctree)
for layer in layers:
l2 = layer.get("{%s}label" % INKSCAPE_NAMESPACE)
if l2 == l:
layer.attrib["style"]=""
else:
layer.attrib["style"]="display:none"
f = tempfile.NamedTemporaryFile(delete=False)
f.write(etree.tostring(ctree))
f.close()
cmd=["rsvg-convert",
"-w", "62", "-h", "62",
"-o", "/tmp/%s.png" % l,
f.name]
print "Running", " ".join(cmd)
subprocess.call(cmd)
os.unlink(f.name)
for size in sizes:
cmd = ["montage"]
for l in labels: cmd.append("/tmp/%s.png" % l)
cmd.extend(["-geometry", "%sx%s+1+1" % (size-2, size-2), "-background", "none", "PNG32:markers-%s.png" % size ])
print "Running", " ".join(cmd)
subprocess.call(cmd)
for l in labels: os.unlink("/tmp/%s.png" % l)
|
agpl-3.0
| -8,183,996,135,032,793,000
| 29.395349
| 113
| 0.644989
| false
| 2.552734
| false
| false
| false
|
bonus85/tilt-editor
|
tilt_hack.py
|
1
|
13660
|
#!/usr/bin/env python
"""
Analyze and edit .sketch files (internal in .tilt)
Also supports generating .sketch files from json
@author: Sindre Tosse
"""
import struct
import json
import pdb
import numpy as np
try:
from stl import mesh
STL_SUPPORTED = True
STL_COLOR = (1., 0., 0., 1.)
STL_BRUSH_SIZE = 0.08
except ImportError:
print 'stl files not supported (run "pip install numpy-stl" to enable)'
STL_SUPPORTED = False
END = '' # Struct format
ORDERS_OF_TWO = [2**i for i in range(32)]
MAX_BYTE_VALUE = ORDERS_OF_TWO[-1] - 1
def bits(byte, max_order=32):
assert byte <= MAX_BYTE_VALUE
return [min(byte&oot, 1) for oot in ORDERS_OF_TWO[:max_order]]
MAX_DV = 0.5 # Max length (x,y,z) between two points (from_json)
class SketchEditor:
STROKE_EXTENSION_ENCODING = {
0: 'I', # uint32 flags
}
POINT_EXTENSION_ENCODING = {
0: 'f', # float stroke pressure
1: 'I', # uint32 timestamp (ms)
}
def __init__(self, sentinel=-982080051, version=5, expected_brush_strokes=None):
self.sentinel = sentinel
self.version = version
self.expected_brush_strokes = expected_brush_strokes
self.strokes = []
@classmethod
def from_sketch_file(cls, file_name):
with open(file_name, 'rb') as f:
header_bytes = f.read(16)
sentinel, version, reserved, extra_size = \
struct.unpack(END+'iiii', header_bytes)
assert reserved == 0, \
'.sketch header reserved bytes are not zero: %d' %reserved
if extra_size > 0:
additional_header_data = f.read(extra_size)
print 'Warning: Additional header data present (skipping):'
print ' %r' %additional_header_data
num_strokes_bytes = f.read(4)
num_strokes = struct.unpack(END+'i', num_strokes_bytes)[0]
instance = SketchEditor(sentinel, version , num_strokes)
for i in range(num_strokes):
stroke_header = f.read(32)
#print repr(stroke_header), len(stroke_header)
idx, r, g, b, a, brush_size, stroke_extension, point_extension = \
struct.unpack(END+'ifffffII', stroke_header)
# int32/float32 for each set bit in stroke_extension & ffff
stroke_extension_mask = bits(stroke_extension & 0xffff, 16)
stroke_extension_data = {}
for i, bit in enumerate(stroke_extension_mask):
if bit:
fmt = SketchEditor.STROKE_EXTENSION_ENCODING.get(i, 'cccc')
stroke_extension_data[i] = struct.unpack(END+fmt, f.read(4))[0]
# uint32 size + <size> for each set bit in stroke_extension & ~ffff
stroke_extension_mask_extra = bits(stroke_extension & ~0xffff, 16)
stroke_extension_data_extra = {}
for i, bit in enumerate(stroke_extension_mask_extra):
if bit:
size = struct.unpack(END+'I', f.read(4))[0]
stroke_extension_data_extra[i] = f.read(size)
num_points = struct.unpack(END+'i', f.read(4))[0]
point_extension_mask = bits(point_extension & 0xffff)
stroke = Stroke(
(r, g, b, a),
brush_size,
brush_index=idx,
stroke_extension_mask=stroke_extension_mask,
stroke_extension_data=stroke_extension_data,
stroke_extension_mask_extra=stroke_extension_mask_extra,
stroke_extension_data_extra=stroke_extension_data_extra,
point_extension_mask=point_extension_mask,
expected_points=num_points
)
for j in range(num_points):
point_data = f.read(28)
x, y, z, or1, or2, or3, or4 = \
struct.unpack(END+'fffffff', point_data) # position and orientation
# int32/float32 for each set bit in point_extension
point_extension_data = {}
for i, bit in enumerate(point_extension_mask):
if bit:
fmt = SketchEditor.POINT_EXTENSION_ENCODING.get(i, 'cccc')
point_extension_data[i] = struct.unpack(END+fmt, f.read(4))[0]
point = StrokePoint(
stroke,
(x, y, z),
(or1, or2, or3, or4),
point_extension_data
)
stroke.add(point)
instance.add_stroke(stroke)
assert f.read() == '',\
'Error: file did not match format specification (incorrect length)'
return instance
@classmethod
def from_json(cls, file_name):
with open(file_name) as f:
json_sketch = json.load(f)
instance = SketchEditor()
for stroke_spec in json_sketch['strokes']:
stroke = Stroke(tuple(stroke_spec['color']), stroke_spec['brush_size'])
positions = np.array(stroke_spec['points'], dtype=float)
prev_pos = np.roll(positions, 1, 0)
prev_pos[0][0] = np.nan
for prev, position in zip(prev_pos, positions):
if not np.isnan(prev[0]):
dv = MAX_DV * (position-prev) / np.linalg.norm(position-prev)
print prev, position, dv
while np.linalg.norm(position-prev) > MAX_DV:
prev += dv
#print prev
stroke.add(StrokePoint(stroke, tuple(prev)))
#print position
stroke.add(StrokePoint(stroke, tuple(position)))
instance.add_stroke(stroke)
return instance
@classmethod
def from_stl(cls, file_name):
assert STL_SUPPORTED
stl_mesh = mesh.Mesh.from_file(file_name)
instance = SketchEditor()
for p0, p1, p2 in zip(stl_mesh.v0, stl_mesh.v1, stl_mesh.v2):
stroke = Stroke(STL_COLOR, STL_BRUSH_SIZE)
positions = np.array([p0, p1, p2, p0], dtype=float)
prev_pos = np.roll(positions, 1, 0)
prev_pos[0][0] = np.nan
for prev, position in zip(prev_pos, positions):
if not np.isnan(prev[0]):
dv = MAX_DV * (position-prev) / np.linalg.norm(position-prev)
print prev, position, dv
while np.linalg.norm(position-prev) > MAX_DV:
prev += dv
#print prev
stroke.add(StrokePoint(stroke, tuple(prev)))
#print position
stroke.add(StrokePoint(stroke, tuple(position)))
instance.add_stroke(stroke)
return instance
def add_stroke(self, stroke):
self.strokes.append(stroke)
def write(self, file_name):
with open(file_name, 'wb') as f:
f.write(struct.pack(END+'iiiii',
self.sentinel, self.version, 0, 0, len(self.strokes)))
for stroke in self.strokes:
f.write(stroke.pack())
def write_points(self, file_name):
npoints = sum(len(s.points) for s in self.strokes)
with open(file_name, 'w') as f:
f.write(str(npoints)+'\n')
for stroke in self.strokes:
for point in stroke.points:
f.write('{p.x} {p.y} {p.z}\n'.format(p=point))
def info(self):
print 'Sentinel: %d' %self.sentinel
print 'Version: %d' %self.version
print 'Brush strokes: %s expected, %d actual' %(
self.expected_brush_strokes, len(self.strokes))
Z16 = [0 for i in range(16)]
Z32 = [0 for i in range(32)]
class Stroke:
def __init__(
self,
(r, g, b, a),
brush_size,
brush_index=0,
stroke_extension_mask=Z16,
stroke_extension_data=None,
stroke_extension_mask_extra=Z16,
stroke_extension_data_extra=None,
point_extension_mask=Z32,
expected_points=None
):
self.r = r
self.g = g
self.b = b
self.a = a
self.brush_size = brush_size
self.brush_index = brush_index
self.stroke_extension_mask = stroke_extension_mask
self.stroke_extension_mask_extra = stroke_extension_mask_extra
self.point_extension_mask = point_extension_mask
self.stroke_extension_data = stroke_extension_data
self.stroke_extension_data_extra = stroke_extension_data_extra
self.expected_stroke_points = expected_points
self.points = []
def pack(self):
stroke_extension = sum(b * oot for b, oot in
zip(self.stroke_extension_mask, ORDERS_OF_TWO[:16]))
stroke_extension += sum(b * oot for b, oot in
zip(self.stroke_extension_mask_extra, ORDERS_OF_TWO[16:]))
point_extension = sum(b * oot for b, oot in
zip(self.point_extension_mask, ORDERS_OF_TWO))
s = struct.pack(END+'ifffffII',
self.brush_index, self.r, self.g, self.b, self.a,
self.brush_size, stroke_extension, point_extension)
for i, bit in enumerate(self.stroke_extension_mask):
if bit:
fmt = SketchEditor.STROKE_EXTENSION_ENCODING.get(i, 'cccc')
s += struct.pack(END+fmt, self.stroke_extension_data[i])
for i, bit in enumerate(self.stroke_extension_mask_extra):
if bit:
s += struct.pack(END+'I', len(self.stroke_extension_data_extra[i]))
s += self.stroke_extension_data_extra[i]
s += struct.pack(END+'i', len(self.points))
for point in self.points:
s += point.pack()
return s
def add(self, point):
self.points.append(point)
def info(self):
print 'Stroke color: (%f, %f, %f, %f)' %(self.r, self.g, self.b, self.a)
print 'Brush size: %f' %self.brush_size
print 'Stroke extension:'
for i, bit in enumerate(self.stroke_extension_mask):
if bit:
print ' %d: %r' %(i, self.stroke_extension_data[i])
print 'Stroke extension (extra):'
for i, bit in enumerate(self.stroke_extension_mask_extra):
if bit:
print ' %d: %r' %(i, self.stroke_extension_data_extra[i])
print 'Number of stroke points: %s expected, %d actual' %(
self.expected_stroke_points, len(self.points))
print 'First point:'
self.points[0].info()
print 'Last point:'
self.points[-1].info()
class StrokePoint:
def __init__(
self,
parent_stroke,
(x, y, z),
(or1, or2, or3, or4)=(0.,0.,0.,0.),
point_extension_data=None
):
self.parent_stroke = parent_stroke
self.x = x
self.y = y
self.z = z
self.or1 = or1
self.or2 = or2
self.or3 = or3
self.or4 = or4
self.point_extension_data = point_extension_data
def pack(self):
s = struct.pack(END+'fffffff',
self.x, self.y, self.z, self.or1, self.or2, self.or3, self.or4)
for i, bit in enumerate(self.parent_stroke.point_extension_mask):
if bit:
fmt = SketchEditor.POINT_EXTENSION_ENCODING.get(i, 'cccc')
s += struct.pack(END+fmt, self.point_extension_data[i])
return s
def info(self):
print 'Position: (%f, %f, %f)' %(self.x, self.y, self.z)
print 'Orientation: (%f, %f, %f, %f)' %(self.or1, self.or2, self.or3, self.or4)
print 'Point extension:'
for i, bit in enumerate(self.parent_stroke.point_extension_mask):
if bit:
print ' %d: %r' %(i, self.point_extension_data[i])
if __name__ == '__main__':
import argparse
import os
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("file_name", type=str, help="Name of file to open")
opts = parser.parse_args() # Parses sys.argv by default
name, ext = os.path.splitext(opts.file_name)
if ext == '.sketch':
t = SketchEditor.from_sketch_file(opts.file_name)
t.info()
for stroke in t.strokes:
stroke.info()
print 'Removing stroke extension'
t.strokes[0].stroke_extension_mask = [0 for i in range(16)]
t.strokes[0].stroke_extension_mask_extra = [0 for i in range(16)]
print 'Removing point extension'
t.strokes[0].point_extension_mask = [0 for i in range(32)]
print "Saving"
t.write('data.sketch')
elif ext == '.json':
t = SketchEditor.from_json(opts.file_name)
t.info()
for stroke in t.strokes:
stroke.info()
t.write('data.sketch')
elif ext == '.stl':
t = SketchEditor.from_stl(opts.file_name)
t.info()
for stroke in t.strokes:
stroke.info()
t.write('data.sketch')
else:
print 'Unknown file type: %s' %ext
|
gpl-3.0
| 1,189,793,421,834,732,800
| 38.252874
| 91
| 0.527965
| false
| 3.774523
| false
| false
| false
|
Micronaet/micronaet-order
|
auto_order_nomail_check/auto.py
|
1
|
3871
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import logging
import openerp
import xlsxwriter
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID, api
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class SaleOrder(orm.Model):
""" Model name: SaleOrder
"""
_inherit = 'sale.order'
def send_sale_order_email_check(self, cr, uid, context=None):
''' Generate email for check deadline status
'''
query = '''
SELECT name
FROM res_partner
WHERE
email_invoice_id is null and
email is null and
id IN (
SELECT distinct partner_id
FROM sale_order
WHERE
state not in ('cancel', 'draft', 'sent'));
'''
cr.execute(query)
partner_name = [item[0] for item in cr.fetchall()]
if not partner_name:
_logger.info('No email missed in partner with order found!')
return True
body = '<table>'
for name in partner_name:
body += '''<tr><td>%s</td></tr>''' % name
body += '</table>'
# ---------------------------------------------------------------------
# Send report:
# ---------------------------------------------------------------------
# Send mail with attachment:
group_pool = self.pool.get('res.groups')
model_pool = self.pool.get('ir.model.data')
thread_pool = self.pool.get('mail.thread')
group_id = model_pool.get_object_reference(
cr, uid,
'auto_order_nomail_check',
'group_order_email_report_admin')[1]
partner_ids = []
for user in group_pool.browse(
cr, uid, group_id, context=context).users:
partner_ids.append(user.partner_id.id)
thread_pool = self.pool.get('mail.thread')
thread_pool.message_post(cr, uid, False,
type='email',
body=body,
subject='%s: Partner senza mail per invio fattura: %s' % (
cr.dbname,
datetime.now().strftime(DEFAULT_SERVER_DATE_FORMAT),
),
partner_ids=[(6, 0, partner_ids)],
context=context,
)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
| 5,362,938,094,823,373,000
| 35.518868
| 87
| 0.54172
| false
| 4.511655
| false
| false
| false
|
cgchemlab/chemlab
|
tools/convert_gromacs2espp.py
|
1
|
4036
|
#!/usr/bin/env python
# Copyright (C) 2012,2013,2015(H),2016
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import math
import re
def convertTable(gro_in_file, esp_out_file, sigma=1.0, epsilon=1.0, c6=1.0, c12=1.0):
"""Convert GROMACS tabulated file into ESPResSo++ tabulated file (new file
is created). First column of input file can be either distance or angle.
For non-bonded files, c6 and c12 can be provided. Default value for sigma, epsilon,
c6 and c12 is 1.0. Electrostatics are not taken into account (f and fd columns).
Keyword arguments:
gro_in_file -- the GROMACS tabulated file name (bonded, nonbonded, angle
or dihedral).
esp_out_file -- filename of the ESPResSo++ tabulated file to be written.
sigma -- optional, depending on whether you want to convert units or not.
epsilon -- optional, depending on whether you want to convert units or not.
c6 -- optional
c12 -- optional
"""
# determine file type
bonded, angle, dihedral = False, False, False
re_bond = re.compile('.*_b[0-9]+.*')
re_angle = re.compile('.*_a[0-9]+.*')
re_dihedral = re.compile('.*_d[0-9]+.*')
if re.match(re_bond, gro_in_file):
bonded = True
elif re.match(re_angle, gro_in_file):
angle = True
bonded = True
elif re.match(re_dihedral, gro_in_file):
dihedral = True
bonded = True
fin = open(gro_in_file, 'r')
fout = open(esp_out_file, 'w')
if bonded: # bonded has 3 columns
for line in fin:
if line[0] == "#": # skip comment lines
continue
columns = line.split()
r = float(columns[0])
f = float(columns[1]) # energy
fd= float(columns[2]) # force
# convert units
if angle or dihedral: # degrees to radians
r = math.radians(r)
fd=fd*180/math.pi
else:
r = r / sigma
e = f / epsilon
f = fd*sigma / epsilon
if (not angle and not dihedral and r != 0) or \
(angle and r <= math.pi and r > 0) or \
(dihedral and r >= -math.pi and r <= math.pi):
fout.write("%15.8g %15.8g %15.8g\n" % (r, e, f))
else: # non-bonded has 7 columns
for line in fin:
if line.startswith('#'): # skip comment lines
continue
columns = line.split()
r = float(columns[0])
g = float(columns[3]) # dispersion
gd= float(columns[4])
h = float(columns[5]) # repulsion
hd= float(columns[6])
e = c6*g + c12*h
f = c6*gd+ c12*hd
# convert units
r = r / sigma
e = e / epsilon
f = f*sigma / epsilon
if r != 0: # skip 0
fout.write("%15.8g %15.8g %15.8g\n" % (r, e, f))
fin.close()
fout.close()
def _args():
parser = argparse.ArgumentParser()
parser.add_argument('in_file')
parser.add_argument('out_file')
return parser
def main():
args = _args().parse_args()
convertTable(args.in_file, args.out_file)
if __name__ == '__main__':
main()
|
gpl-3.0
| 4,729,603,492,576,089,000
| 31.031746
| 87
| 0.582755
| false
| 3.500434
| false
| false
| false
|
FAForever/api
|
api/leaderboards.py
|
1
|
8221
|
from faf.api import LeaderboardSchema
from faf.api import LeaderboardStatsSchema
from flask import request
from pymysql.cursors import DictCursor
from api import app
from api.error import ApiException, ErrorCode
from api.error import Error
from api.query_commons import fetch_data
from faf import db
MAX_PAGE_SIZE = 5000
SELECT_EXPRESSIONS = {
'id': 'r.id',
'login': 'l.login',
'mean': 'r.mean',
'deviation': 'r.deviation',
'num_games': 'r.numGames',
'is_active': 'r.is_active',
'rating': 'ROUND(r.mean - 3 * r.deviation)',
'ranking': '@rownum:=@rownum+1'
}
TABLE1V1 = 'ladder1v1_rating r JOIN login l on r.id = l.id, (SELECT @rownum:=%(row_num)s) n'
TABLEGLOBAL = 'global_rating r JOIN login l on r.id = l.id, (SELECT @rownum:=%(row_num)s) n'
@app.route('/leaderboards/<string:leaderboard_type>')
def leaderboards_type(leaderboard_type):
"""
Lists all ranked 1v1 or global players.
**Example Request**:
**Default Values**:
page[number]=1
page[size]=5000
.. sourcecode:: http
GET /leaderboards/1v1 /leaderboards/global
Accept: application/vnd.api+json
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
{
"data": [
{
"attributes": {
"deviation": 48.4808,
"id": "781",
"login": "Zock",
"mean": 2475.69,
"num_games": 1285,
"ranking": 1,
"rating": 2330,
"won_games": 946
},
"id": "781",
"type": "ranked1v1"
},
...
]
}
:param page[number]: The page number being requested (EX.: /leaderboards/1v1?page[number]=2)
:type page[number]: int
:param page[size]: The total amount of players to grab by default (EX.: /leaderboards/1v1?page[size]=10)
:type page[size]: int
:param leaderboard_type: Finds players in the 1v1 or global rating
:type leaderboard_type: 1v1 OR global
:status 200: No error
"""
sort_field = request.values.get('sort')
if sort_field:
raise ApiException([Error(ErrorCode.QUERY_INVALID_SORT_FIELD, sort_field)])
page = int(request.values.get('page[number]', 1))
page_size = int(request.values.get('page[size]', MAX_PAGE_SIZE))
row_num = (page - 1) * page_size
select = SELECT_EXPRESSIONS
args = {'row_num': row_num}
rating = find_leaderboard_type(leaderboard_type, select)
return fetch_data(LeaderboardSchema(), rating['table'], rating['select'], MAX_PAGE_SIZE, request, sort='-rating',
args=args, where='is_active = 1 AND r.numGames > 0')
@app.route('/leaderboards/<string:leaderboard_type>/<int:player_id>')
def leaderboards_type_get_player(leaderboard_type, player_id):
"""
Gets a global or 1v1 player. Player must be active, played more than one ranked game, and must have statistics associated
with he/she.
**Example Request**:
.. sourcecode:: http
GET /leaderboards/1v1/781 /leaderboards/global/781
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
{
"data": {
"attributes": {
"deviation": 48.4808,
"id": "781",
"login": "Zock",
"mean": 2475.69,
"num_games": 1285,
"ranking": 1,
"rating": 2330,
"won_games": 946
},
"id": "781",
"type": "leaderboard"
}
}
:param leaderboard_type: Finds players in the 1v1 or global rating
:type leaderboard_type: 1v1 OR global
:param player_id: Player ID
:type player_id: int
:status 200: No error
:status 404: No entry with this id was found
"""
select_expressions = SELECT_EXPRESSIONS.copy()
select = select_expressions
rating = find_leaderboard_type(leaderboard_type, select)
select_expressions['ranking'] = """(SELECT count(*) FROM """ + rating['tableName'] + """
WHERE ROUND(mean - 3 * deviation) >= ROUND(r.mean - 3 * r.deviation)
AND is_active = 1
AND numGames > 0)
"""
result = fetch_data(LeaderboardSchema(), rating['table'], rating['select'], MAX_PAGE_SIZE, request,
many=False, where='r.id=%(id)s', args=dict(id=player_id, row_num=0))
if 'id' not in result['data']:
return {'errors': [{'title': 'No entry with this id was found'}]}, 404
return result
@app.route("/leaderboards/<string:rating_type>/stats")
def rating_stats(rating_type):
"""
Gets all player stats sorted by rankings.
**Example Request**:
.. sourcecode:: http
GET /leaderboards/1v1/stats /leaderboards/global/stats
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
{
"data": {
"attributes": {
"rating_distribution": {
"-100": 206,
"-200": 192,
"-300": 137,
"-400": 77,
"-500": 15,
"-600": 10,
"-700": 1,
"0": 268,
"100": 282,
"1000": 122,
"1100": 86,
"1200": 72,
"1300": 55,
"1400": 42,
"1500": 35,
"1600": 25,
"1700": 15,
"1800": 14,
"1900": 7,
"200": 284,
"2000": 5,
"2100": 2,
"2200": 1,
"2300": 2,
"300": 316,
"400": 296,
"500": 239,
"600": 238,
"700": 208,
"800": 177,
"900": 140
}
},
"id": "/leaderboards/1v1/stats",
"type": "leaderboard_stats"
}
}
:status 200: No error
"""
rating = find_leaderboard_type(rating_type)
with db.connection:
cursor = db.connection.cursor(DictCursor)
cursor.execute("""
SELECT
FLOOR((mean - 3 * deviation)/100) * 100 AS `rating`,
count(*) as count
FROM """ + rating['tableName'] + """
WHERE `is_active` = 1
AND mean BETWEEN 0 AND 3000
AND deviation <= 240
AND numGames > 0
GROUP BY `rating`
ORDER BY CAST(`rating` as SIGNED) ASC;
""")
result = cursor.fetchall()
data = dict(id='/leaderboards/' + rating_type + '/stats', rating_distribution={})
for item in result:
data['rating_distribution'][str(int(item['rating']))] = item['count']
return LeaderboardStatsSchema().dump(data, many=False).data
def find_leaderboard_type(rating_type, select=None):
rating = {}
if rating_type == '1v1':
rating['table'] = TABLE1V1
rating['select'] = append_select_expression()
rating['tableName'] = 'ladder1v1_rating'
elif rating_type == 'global':
rating['table'] = TABLEGLOBAL
rating['select'] = select
rating['tableName'] = 'global_rating'
else:
raise ApiException([Error(ErrorCode.QUERY_INVALID_RATING_TYPE, rating_type)])
return rating
def append_select_expression():
select = SELECT_EXPRESSIONS.copy()
select['won_games'] = 'r.winGames'
select['lost_games'] = 'r.numGames - r.winGames'
select['winning_percentage'] = 'ROUND((r.winGames/r.numGames) * 100)'
return select
|
gpl-3.0
| -7,423,851,992,924,885,000
| 28.256228
| 129
| 0.505291
| false
| 3.933493
| false
| false
| false
|
johncadigan/CategoryGenerator
|
db_hyponym_trees.py
|
1
|
2540
|
import os
CURRENT_DIR = os.path.dirname(__file__)
###Default Settings
DATA_DIR = 'data'
COUNTS_FILE = 'word-totals.txt'
WHITE_LIST = 'whitelist.csv'
DEFAULT_LIMIT = 50000
DEFAULT_DEPTH = 5
DEFAULT_SYNSETS = 3
##### DB Dependent variables
MYSQL_URL = 'mysql://user:password@host/database?charset=utf8'
from sqlalchemy import *
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, aliased
from tree import Tree
from hyponym_generator import HyponymGenerator
from model import *
class DBTree(Tree):
def write(self, child, parent): #Overriden function for db version
Session = sessionmaker(bind=create_engine(MYSQL_URL))
DBSession = Session()
parent_category = DBSession.query(Category).filter(Category.name==parent).first()
child_category = Category(name=child)
child_category.parent = parent_category
DBSession.add(child_category)
DBSession.commit()
queue = self[child].fpointer
if self[child].expanded:
for element in queue:
self.write(element, child_category.name) # recursive call
class DBGenerator(HyponymGenerator):
def __init__(self):
self.frequency_limit = DEFAULT_LIMIT
self.depth_limit = DEFAULT_DEPTH
self.synset_limit = DEFAULT_SYNSETS
#Add only relevant word frequencies
data_dir = os.path.join(CURRENT_DIR, DATA_DIR)
unigram_file = os.path.join(data_dir, COUNTS_FILE)
with open(unigram_file, "r") as unigrams:
unigrams = unigrams.readlines()
for unigram in unigrams:
word, frequency = unigram.split('\t')
frequency = int(frequency)
if frequency >= self.frequency_limit:
self.unigram_frequencies[word] = frequency
del unigrams
def set_tree(self): #Overriden function for db version
self.tree = DBTree()
def final_menu(self, word): #Overriden function for db version
Session = sessionmaker(bind=create_engine(MYSQL_URL))
DBSession = Session()
ppinput = "1"
while ppinput == "1":
pinput = raw_input("Please input the potential name of a grandparent in db to find parents\n")
parent = DBSession.query(Category).filter(Category.name == pinput).first()
descendants = DBSession.query(Category.name).filter(Category.left > parent.left).filter(Category.right < parent.right).all()
print "{0} \n \t {1}".format(parent.name, str(descendants))
ppinput = raw_input("Please input the name for tree's parent. Input 1 to look at other parts of database tree\n")
if ppinput != "1":
self.tree.write(child=word, parent=ppinput)
if __name__ == '__main__':
hg = DBGenerator()
hg.run_menus()
|
mit
| -2,648,622,299,959,671,000
| 30.75
| 127
| 0.720079
| false
| 3.211125
| false
| false
| false
|
CTR-BFX/CambridgeHackathon
|
rRNA_MT_count.py
|
1
|
4956
|
#!/usr/bin/python
# Malwina Prater, mn367@cam.ac.uk, 2017, Copyright
# Centre for Trophoblast Research, University of Cambridge
#
# Script version: v01.
#
# Script to calculate the percent of transcripts mapping to rRNA
#
# INPUTS :
# 1. HTseq_counts file
# 2. Original reference transcriptome alignned to
#
# USAGE :
# For producing table(s) with rRNA and MT counts for each sample use commands like that:
#
# ./rRNA_MT_count.py --gtf /Users/malwina/Documents/CTR-Data/genomes/Mus_musculus/mm10/Mus_musculus.GRCm38.84.gtf --htseq C17_3_S20_Aligned.out.srt.bam_htseq_combined_counts.txt
#
# import modules:
import os,sys
from optparse import OptionParser
import re
# parse in the user options:
parser = OptionParser(usage="%prog [-x Excel [-i imagefile] [-s squares]",
version="%prog 0.1")
parser.add_option("--htseq", dest="FileName", type="string", action="store")
parser.add_option("--gtf", dest="GTF", type="string", action="store")
(options, args) = parser.parse_args()
#files = sys.argv[]
HTSEQ_COUNTS = options.FileName
GTF = options.GTF
# check if files supplied exist:
try:
handle = open(GTF, "rU")
handle.close()
except:
print "\nError->\tGTF File: %s does not exist\n" % GTF
sys.exit()
try:
handle = open(HTSEQ_COUNTS, "rU")
handle.close()
except:
print "\nError->\tFile: %s does not exist\n" % HTSEQ_COUNTS
sys.exit()
#
# First job is to extract all the identifiers of genes/transcripts mapping to the rRNA and MT genes and store in 2 arrays
#
rRNA_identifiers = {}
MT_identifiers = {}
with open(GTF, "rU") as handle:
#line = handle.readline()
for line in handle:
line.rstrip('\n')
if 'gene_biotype "rRNA"' in line:
identifier = line
identifier = re.sub('.*gene_id "', '', identifier)
identifier = re.sub('"; gene_version.*\n', '', identifier)
rRNA_identifiers[identifier] = 1
if 'MT' in line:
identifier = line
identifier = re.sub('.*gene_id "', '', identifier)
identifier = re.sub('"; gene_version.*\n', '', identifier)
MT_identifiers[identifier] = 1
handle.close()
#print("rRNA:")
#print(rRNA_identifiers.keys())
#print("MT:")
#print(MT_identifiers.keys())
#
# Second job is to go through the HTSEQ-couts and count reads matching the rRNA identifiers
#
Cummulative_rRNA_Count = 0
rRNA_genes = 0
ReadCount = 0
line_number = 0
MT_genes = 0;
Cummulative_MT_Count = 0;
with open(HTSEQ_COUNTS, "rU") as handle:
for line in handle:
line.rstrip('\n')
split_line = line.split("\t")
if line_number > 0:
if split_line[0] in rRNA_identifiers.keys(): # if rRNA_identifiers[gene_id]
rRNA_genes += 1
Cummulative_rRNA_Count += int(split_line[1])
if split_line[0] in MT_identifiers.keys():
MT_genes += 1
Cummulative_MT_Count += int(split_line[1])
ReadCount += int(split_line[1])
line_number += 1
handle.close()
#print(Cummulative_MT_Count)
#print(Cummulative_rRNA_Count)
#
# wiritng the output files:
#
out = HTSEQ_COUNTS + '_rRNAmtRNACounts.txt';
out = re.sub('.txt_', '_', out)
print "Summary output file: ", out, "\n"
OUT = open(out, "w")
OUT.write('HT-SEQ file name: \t' + HTSEQ_COUNTS + '\n\n')
OUT.write('GTF file name: \t\t' + GTF + '\n\n\n')
OUT.write('---------------------------------------------------------------------------------' + '\n')
OUT.write(' rRNA and MT identifiers\n')
OUT.write('---------------------------------------------------------------------------------' + '\n')
OUT.write('No. of rRNA identifiers: ' + str(len(rRNA_identifiers.keys())) + '\n') # PRINT size of this hash
OUT.write('No. of MT identifiers: ' + str(len(MT_identifiers.keys())) + '\n') # PRINT size of this hash
OUT.write('\n\n')
OUT.write('---------------------------------------------------------------------------------' + '\n')
OUT.write(' HTSEQ mapping summary\n')
OUT.write('---------------------------------------------------------------------------------' + '\n')
OUT.write('ReadCount: ' + str(ReadCount) + '\n\n')
#OUT.write(' Number of rRNA genes: ' + str(rRNA_genes) + '\n')
OUT.write('Total no. of rRNA transcripts: ' + str(Cummulative_rRNA_Count) + '\n')
perc_rRNA = 100*float(Cummulative_rRNA_Count)/float(ReadCount)
perc_rRNA = str(round(perc_rRNA, 3))
OUT.write('Percent rRNA mapped reads: ' + str(Cummulative_rRNA_Count) + ' / ' + str(ReadCount) + ' * 100 = ' + perc_rRNA + '%\n\n')
#OUT.write('\n Number of MT genes: ' + str(MT_genes) + '\n')
OUT.write('Total no. of MT transcripts: ' + str(Cummulative_MT_Count) + '\n')
perc_MT = 100*float(Cummulative_MT_Count)/float(ReadCount)
perc_MT = str(round(perc_MT, 3))
OUT.write('Percent MT mapped reads: ' + str(Cummulative_MT_Count) + ' / ' + str(ReadCount) + ' * 100 = ' + perc_MT + '%\n\n')
OUT.close()
|
gpl-3.0
| -3,703,518,415,631,307,300
| 32.04
| 180
| 0.583535
| false
| 3.068731
| false
| false
| false
|
codeofdusk/ProjectMagenta
|
src/update/update.py
|
1
|
5042
|
from logging import getLogger
logger = getLogger('update')
import contextlib
import io
import os
import platform
import requests
import tempfile
from wxUI import commonMessageDialogs
import widgetUtils
import webbrowser
try:
import czipfile as zipfile
except ImportError:
import zipfile
from platform_utils import paths
def perform_update(endpoint, current_version, app_name='', password=None, update_available_callback=None, progress_callback=None, update_complete_callback=None):
requests_session = create_requests_session(app_name=app_name, version=current_version)
available_update = find_update(endpoint, requests_session=requests_session)
if not available_update:
logger.debug("No update available")
return False
available_version = float(available_update['current_version'])
if not float(available_version) > float(current_version) or platform.system()+platform.architecture()[0][:2] not in available_update['downloads']:
logger.debug("No update for this architecture")
return False
available_description = available_update.get('description', None)
update_url = available_update ['downloads'][platform.system()+platform.architecture()[0][:2]]
logger.info("A new update is available. Version %s" % available_version)
donation()
if callable(update_available_callback) and not update_available_callback(version=available_version, description=available_description): #update_available_callback should return a falsy value to stop the process
logger.info("User canceled update.")
return
base_path = tempfile.mkdtemp()
download_path = os.path.join(base_path, 'update.zip')
update_path = os.path.join(base_path, 'update')
downloaded = download_update(update_url, download_path, requests_session=requests_session, progress_callback=progress_callback)
extracted = extract_update(downloaded, update_path, password=password)
bootstrap_path = move_bootstrap(extracted)
execute_bootstrap(bootstrap_path, extracted)
logger.info("Update prepared for installation.")
if callable(update_complete_callback):
update_complete_callback()
def create_requests_session(app_name=None, version=None):
user_agent = ''
session = requests.session()
if app_name:
user_agent = ' %s/%r' % (app_name, version)
session.headers['User-Agent'] = session.headers['User-Agent'] + user_agent
return session
def find_update(endpoint, requests_session):
response = requests_session.get(endpoint)
response.raise_for_status()
content = response.json()
return content
def download_update(update_url, update_destination, requests_session, progress_callback=None, chunk_size=io.DEFAULT_BUFFER_SIZE):
total_downloaded = total_size = 0
with io.open(update_destination, 'w+b') as outfile:
download = requests_session.get(update_url, stream=True)
total_size = int(download.headers.get('content-length', 0))
logger.debug("Total update size: %d" % total_size)
download.raise_for_status()
for chunk in download.iter_content(chunk_size):
outfile.write(chunk)
total_downloaded += len(chunk)
if callable(progress_callback):
call_callback(progress_callback, total_downloaded, total_size)
logger.debug("Update downloaded")
return update_destination
def extract_update(update_archive, destination, password=None):
"""Given an update archive, extracts it. Returns the directory to which it has been extracted"""
with contextlib.closing(zipfile.ZipFile(update_archive)) as archive:
if password:
archive.setpassword(password)
archive.extractall(path=destination)
logger.debug("Update extracted")
return destination
def move_bootstrap(extracted_path):
working_path = os.path.abspath(os.path.join(extracted_path, '..'))
if platform.system() == 'Darwin':
extracted_path = os.path.join(extracted_path, 'Contents', 'Resources')
downloaded_bootstrap = os.path.join(extracted_path, bootstrap_name())
new_bootstrap_path = os.path.join(working_path, bootstrap_name())
os.rename(downloaded_bootstrap, new_bootstrap_path)
return new_bootstrap_path
def execute_bootstrap(bootstrap_path, source_path):
arguments = r'"%s" "%s" "%s" "%s"' % (os.getpid(), source_path, paths.app_path(), paths.get_executable())
if platform.system() == 'Windows':
import win32api
win32api.ShellExecute(0, 'open', bootstrap_path, arguments, '', 5)
else:
import subprocess
make_executable(bootstrap_path)
subprocess.Popen(['%s %s' % (bootstrap_path, arguments)], shell=True)
logger.info("Bootstrap executed")
def bootstrap_name():
if platform.system() == 'Windows': return 'bootstrap.exe'
if platform.system() == 'Darwin': return 'bootstrap-mac.sh'
return 'bootstrap-lin.sh'
def make_executable(path):
import stat
st = os.stat(path)
os.chmod(path, st.st_mode | stat.S_IEXEC)
def call_callback(callback, *args, **kwargs):
# try:
callback(*args, **kwargs)
# except:
# logger.exception("Failed calling callback %r with args %r and kwargs %r" % (callback, args, kwargs))
def donation():
dlg = commonMessageDialogs.donation()
if dlg == widgetUtils.YES:
webbrowser.open_new_tab("http://twblue.es/?q=donate")
|
gpl-2.0
| 3,068,892,833,140,432,000
| 39.344
| 211
| 0.754859
| false
| 3.656273
| false
| false
| false
|
txomon/SpockBot
|
spock/plugins/helpers/physics.py
|
1
|
6091
|
"""
PhysicsPlugin is planned to provide vectors and tracking necessary to implement
SMP-compliant client-side physics for entities. Primarirly this will be used to
keep update client position for gravity/knockback/water-flow etc. But it should
also eventually provide functions to track other entities affected by SMP
physics
Minecraft client/player physics is unfortunately very poorly documented.
Most of
these values are based of experimental results and the contributions of a
handful of people (Thank you 0pteron!) to the Minecraft wiki talk page on
Entities and Transportation. Ideally someone will decompile the client with MCP
and document the totally correct values and behaviors.
"""
# Gravitational constants defined in blocks/(client tick)^2
PLAYER_ENTITY_GAV = 0.08
THROWN_ENTITY_GAV = 0.03
RIDING_ENTITY_GAV = 0.04
BLOCK_ENTITY_GAV = 0.04
ARROW_ENTITY_GAV = 0.05
# Air drag constants defined in 1/tick
PLAYER_ENTITY_DRG = 0.02
THROWN_ENTITY_DRG = 0.01
RIDING_ENTITY_DRG = 0.05
BLOCK_ENTITY_DRG = 0.02
ARROW_ENTITY_DRG = 0.01
# Player ground acceleration isn't actually linear, but we're going to pretend
# that it is. Max ground velocity for a walking client is 0.215blocks/tick, it
# takes a dozen or so ticks to get close to max velocity. Sprint is 0.28, just
# apply more acceleration to reach a higher max ground velocity
PLAYER_WLK_ACC = 0.15
PLAYER_SPR_ACC = 0.20
PLAYER_GND_DRG = 0.41
# Seems about right, not based on anything
PLAYER_JMP_ACC = 0.45
import logging
import math
from spock.mcmap import mapdata
from spock.plugins.base import PluginBase
from spock.utils import BoundingBox, Position, pl_announce
from spock.vector import Vector3
logger = logging.getLogger('spock')
class PhysicsCore(object):
def __init__(self, vec, pos):
self.vec = vec
self.pos = pos
def jump(self):
if self.pos.on_ground:
self.pos.on_ground = False
self.vec += Vector3(0, PLAYER_JMP_ACC, 0)
def walk(self, angle, radians=False):
if not radians:
angle = math.radians(angle)
z = math.cos(angle) * PLAYER_WLK_ACC
x = math.sin(angle) * PLAYER_WLK_ACC
self.vec += Vector3(x, 0, z)
def sprint(self, angle, radians=False):
if not radians:
angle = math.radians(angle)
z = math.cos(angle) * PLAYER_SPR_ACC
x = math.sin(angle) * PLAYER_SPR_ACC
self.vec += Vector3(x, 0, z)
@pl_announce('Physics')
class PhysicsPlugin(PluginBase):
requires = ('Event', 'ClientInfo', 'World')
events = {
'physics_tick': 'tick',
}
def __init__(self, ploader, settings):
super(PhysicsPlugin, self).__init__(ploader, settings)
self.vec = Vector3(0.0, 0.0, 0.0)
# wiki says 0.6 but I made it 0.8 to give a little wiggle room
self.playerbb = BoundingBox(0.8, 1.8)
self.pos = self.clientinfo.position
ploader.provides('Physics', PhysicsCore(self.vec, self.pos))
def tick(self, _, __):
self.check_collision()
self.apply_horizontal_drag()
self.apply_vector()
def check_collision(self):
cb = Position(math.floor(self.pos.x), math.floor(self.pos.y),
math.floor(self.pos.z))
if self.block_collision(cb, y=2): # we check +2 because above my head
self.vec.y = 0
if self.block_collision(cb, y=-1): # we check below feet
self.pos.on_ground = True
self.vec.y = 0
self.pos.y = cb.y
else:
self.pos.on_ground = False
self.vec -= Vector3(0, PLAYER_ENTITY_GAV, 0)
self.apply_vertical_drag()
# feet or head collide with x
if self.block_collision(cb, x=1) or \
self.block_collision(cb, x=-1) or \
self.block_collision(cb, y=1, x=1) or \
self.block_collision(cb, y=1, x=-1):
self.vec.x = 0
# replace with real info in event
self.event.emit("phy_collision", "x")
# feet or head collide with z
if self.block_collision(cb, z=1) or \
self.block_collision(cb, z=-1) or \
self.block_collision(cb, y=1, z=1) or \
self.block_collision(cb, y=1, z=-1):
self.vec.z = 0
# replace with real info in event
self.event.emit("phy_collision", "z")
def block_collision(self, cb, x=0, y=0, z=0):
block_id, meta = self.world.get_block(cb.x + x, cb.y + y, cb.z + z)
block = mapdata.get_block(block_id, meta)
if block is None:
return False
# possibly we want to use the centers of blocks as the starting
# points for bounding boxes instead of 0,0,0 this might make thinks
# easier when we get to more complex shapes that are in the center
# of a block aka fences but more complicated for the player uncenter
# the player position and bump it up a little down to prevent
# colliding in the floor
pos1 = Position(self.pos.x - self.playerbb.w / 2, self.pos.y - 0.2,
self.pos.z - self.playerbb.d / 2)
bb1 = self.playerbb
bb2 = block.bounding_box
if bb2 is not None:
pos2 = Position(cb.x + x + bb2.x, cb.y + y + bb2.y,
cb.z + z + bb2.z)
if ((pos1.x + bb1.w) >= (pos2.x) and (pos1.x) <= (
pos2.x + bb2.w)) and (
(pos1.y + bb1.h) >= (pos2.y) and (pos1.y) <= (
pos2.y + bb2.h)) and (
(pos1.z + bb1.d) >= (pos2.z) and (pos1.z) <= (
pos2.z + bb2.d)):
return True
return False
def apply_vertical_drag(self):
self.vec.y -= self.vec.y * PLAYER_ENTITY_DRG
def apply_horizontal_drag(self):
self.vec.x -= self.vec.x * PLAYER_GND_DRG
self.vec.z -= self.vec.z * PLAYER_GND_DRG
def apply_vector(self):
p = self.pos
p.x = p.x + self.vec.x
p.y = p.y + self.vec.y
p.z = p.z + self.vec.z
|
mit
| 4,147,684,726,026,790,000
| 35.915152
| 79
| 0.602036
| false
| 3.212553
| false
| false
| false
|
jhpyle/docassemble
|
docassemble_base/docassemble/base/mako/doc/build/conf.py
|
1
|
9486
|
# -*- coding: utf-8 -*-
#
# Mako documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('.'))
import mako
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode',
# 'sphinx.ext.doctest', 'builder.builders']
extensions = ['sphinx.ext.autodoc','sphinx.ext.intersphinx',
'changelog', 'sphinx_paramlinks',
'builder.builders']
changelog_render_ticket = "https://bitbucket.org/zzzeek/mako/issue/%s/"
changelog_render_pullreq = {
"bitbucket": "https://bitbucket.org/zzzeek/mako/pull-request/%s",
"default": "https://bitbucket.org/zzzeek/mako/pull-request/%s",
"github": "https://github.com/zzzeek/mako/pull/%s",
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
nitpicky = True
site_base = "http://www.makotemplates.org"
# The suffix of source filenames.
source_suffix = '.rst'
template_bridge = "builder.builders.MakoBridge"
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Mako'
copyright = u'the Mako authors and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = mako.__version__
# The full version, including alpha/beta/rc tags.
release = mako.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "%s %s Documentation" % (project, release)
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%m/%d/%Y %H:%M:%S'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Makodoc'
#autoclass_content = 'both'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'mako_%s.tex' % release.replace('.', '_'), r'Mako Documentation',
r'Mike Bayer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
# sets TOC depth to 2.
latex_preamble = '\setcounter{tocdepth}{3}'
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
#latex_elements = {
# 'papersize': 'letterpaper',
# 'pointsize': '10pt',
#}
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mako', u'Mako Documentation',
[u'Mako authors'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Mako'
epub_author = u'Mako authors'
epub_publisher = u'Mako authors'
epub_copyright = u'Mako authors'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
intersphinx_mapping = {
'dogpilecache':('http://dogpilecache.readthedocs.org/en/latest', None),
'beaker':('http://beaker.readthedocs.org/en/latest',None),
}
|
mit
| -6,735,364,583,275,619,000
| 31.047297
| 80
| 0.700822
| false
| 3.645657
| true
| false
| false
|
Skeletrox/usb-backend-pinut
|
file_upload/fileupload/USBFinder.py
|
1
|
5689
|
import os, inspect, json, re #needed for os files
from django.conf import settings
from glob import glob #Needed for directories
import subprocess #Running lsusb
import getpass #used for getuser()
import time #temp fix; used to sleep
from stat import * #imports stats like ST_SIZE
import threading #Multithreading
from shutil import copy2 #Copies files
process = None
staticFileLocRoot = settings.CONTENT_ROOT
data_folder = settings.USB_DIR
extns = settings.ACCEPTED_EXTNS
def get_usb_name():
lsblk_out = subprocess.check_output("lsblk", shell=True)
lsblk_list = lsblk_out.split('\n')
media_dir = None
for line in lsblk_list:
if '/media/' in line:
media_loc = line.index('/media/')
media_dir = line[media_loc:].strip()
return media_dir
def verify(device_mnt):
blkid_out = subprocess.check_output("blkid", shell=True)
blkid_list = blkid_out.split('\n')
for line in blkid_list:
if ("/dev/" + device_mnt) in line:
return check_if_line_usb(line)
def check_if_line_usb(line):
UUID_beg = line.index('UUID') + 5
UUID_end = line.find('\"', UUID_beg+1)
print str(UUID_end - UUID_beg)
if UUID_end - UUID_beg == 10:
return True
return False
def transfer_file(file):
print "file " + file + "staticFileLocRoot " + staticFileLocRoot
index=file.rfind('/')
file_name=file[index+1:]
print "file_name " + file_name + "staticFileLocRoot " + staticFileLocRoot
sendString = "cp " + file + " " + staticFileLocRoot + file_name
proc = subprocess.Popen (sendString, shell=True)
proc.communicate()[0]
return proc.returncode
def attemptMount():
lsblk_out = subprocess.check_output("lsblk", shell=True)
lsblk_list = lsblk_out.split('\n')
media_dir = None
devmnt_regex = r'([s][d][a-zA-Z][0-9]+)'
for line in lsblk_list:
if '/media/' in line:
media_loc = line.index('/media/')
media_dir = line[media_loc:].strip()
try:
media_mntpnt = re.findall(devmnt_regex, line)[0]
except:
return None
is_needed = verify(media_mntpnt)
if is_needed:
break
if media_dir is None:
return None
try:
os.chdir(media_dir + '/' + data_folder)
except:
return None
temps = [name for name in os.listdir(".")]
print 'Temporary files are ' + str(temps)
files = []
for root, subfolders, usb_files in os.walk("."):
for name in usb_files:
if (not os.path.isdir(name)):
if(name.endswith(tuple(extns))):
#if (not os.path.isdir(name)) and (name[-5:] == '.data' or name == 'content.json'):
files.append(os.path.join(root, name))
return files
def main():
#enableAutoMount()
df = subprocess.check_output("lsusb", stderr=subprocess.STDOUT) #subprocess prints to stderr for some reason, making it think stdout is stderr
oldDeviceList = df.split("\n") #gets list of previously connected usb devices
while True:
df = subprocess.check_output("lsusb", stderr=subprocess.STDOUT) #do it again
newDeviceList = df.split('\n') #store in a NEW list
if len(newDeviceList) > len(oldDeviceList): #new usb device inserted!
for line in newDeviceList:
if line not in oldDeviceList: #this points to the newer device we have attached
IDAnchor = line.index("ID")
line = line[IDAnchor:] #slice off unwanted line info [such as bus information]
print ("You have attached " + line) #debug purposes
time.sleep(3) #prevents python from attempting to access the files before the OS itself, might need to be increased
attemptMount() #attempt mounting the device
if len(newDeviceList) < len(oldDeviceList): #some USB device has been removed!
for line in oldDeviceList:
if line not in newDeviceList:
IDAnchor = line.index("ID")
line = line[IDAnchor:]
print ("You have removed " + line)
attemptRemoval()
oldDeviceList = list(newDeviceList) #allows for the loop to function properly
if __name__ == '__main__':
main()
|
apache-2.0
| -8,428,731,860,846,621,000
| 49.794643
| 198
| 0.465108
| false
| 4.780672
| false
| false
| false
|
pyfa-org/eos
|
eos/eve_obj/buff_template.py
|
1
|
1741
|
# ==============================================================================
# Copyright (C) 2011 Diego Duclos
# Copyright (C) 2011-2018 Anton Vorobyov
#
# This file is part of Eos.
#
# Eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Eos. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
from eos.util.repr import make_repr_str
class WarfareBuffTemplate:
def __init__(
self,
buff_id=None,
affectee_filter=None,
affectee_filter_extra_arg=None,
affectee_attr_id=None,
operator=None,
aggregate_mode=None
):
self.buff_id = buff_id
self.affectee_filter = affectee_filter
self.affectee_filter_extra_arg = affectee_filter_extra_arg
self.affectee_attr_id = affectee_attr_id
self.operator = operator
self.aggregate_mode = aggregate_mode
# Auxiliary methods
def __repr__(self):
spec = [
'buff_id',
'affectee_filter',
'affectee_filter_extra_arg',
'affectee_attr_id',
'operator',
'aggregate_mode']
return make_repr_str(self, spec)
|
lgpl-3.0
| -2,537,342,386,661,229,600
| 32.480769
| 80
| 0.593912
| false
| 3.930023
| false
| false
| false
|
fracpete/wekamooc
|
moredataminingwithweka/class-2.1.py
|
1
|
2401
|
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# More Data Mining with Weka - Class 2.1
# Copyright (C) 2014 Fracpete (fracpete at gmail dot com)
# Use the WEKAMOOC_DATA environment variable to set the location
# for the datasets
import os
data_dir = os.environ.get("WEKAMOOC_DATA")
if data_dir is None:
data_dir = "." + os.sep + "data"
import os
import weka.core.jvm as jvm
from weka.core.converters import Loader
from weka.core.classes import Random
from weka.filters import Filter
from weka.classifiers import Classifier, Evaluation
jvm.start()
# load ionosphere
fname = data_dir + os.sep + "ionosphere.arff"
print("\nLoading dataset: " + fname + "\n")
loader = Loader(classname="weka.core.converters.ArffLoader")
data = loader.load_file(fname)
data.class_is_last()
for equal in ["", "-F"]:
print("\nEqual frequency binning? " + str(equal == "-F") + "\n")
for bins in [0, 40, 10, 5, 2]:
if bins > 0:
fltr = Filter(classname="weka.filters.unsupervised.attribute.Discretize", options=["-B", str(bins), equal])
fltr.inputformat(data)
filtered = fltr.filter(data)
else:
filtered = data
cls = Classifier(classname="weka.classifiers.trees.J48")
# cross-validate
evl = Evaluation(filtered)
evl.crossvalidate_model(cls, filtered, 10, Random(1))
# build classifier on full dataset
cls.build_classifier(filtered)
# get size of tree from model strings
lines = str(cls).split("\n")
nodes = "N/A"
for line in lines:
if line.find("Size of the tree :") > -1:
nodes = line.replace("Size of the tree :", "").strip()
# output stats
print("bins=%i accuracy=%0.1f nodes=%s" % (bins, evl.percent_correct, nodes))
jvm.stop()
|
gpl-3.0
| 7,711,363,147,533,410,000
| 36.515625
| 119
| 0.669304
| false
| 3.551775
| false
| false
| false
|
ntduong/ML
|
DecisionTree/treepredict.py
|
1
|
6510
|
'''
Created on Feb 21, 2013
@author: Administrator
'''
from collections import defaultdict
from math import log
def readDataFromFile(filename='decision_tree_example.txt'):
with open(filename, 'rt') as f:
data = []
for line in f:
data.append(line.strip().split('\t'))
return data
def uniquecounts(rows):
results = defaultdict(int)
for row in rows:
r = row[len(row)-1]
results[r] += 1
return results
def gini_impurity(rows):
total = len(rows)
counts = uniquecounts(rows)
imp = 0
for k1 in counts:
p1 = float(counts[k1])/total
for k2 in counts:
if k1 == k2: continue
p2 = float(counts[k2])/total
imp += p1*p2
return imp
def entropy(rows):
log2 = lambda x: log(x)/log(2)
results = uniquecounts(rows)
ent = 0.0
total = len(rows)
for r in results:
p = float(results[r])/total
ent -= p*log2(p)
return ent
def divide_set(rows, col, value):
split_function = None
if isinstance(value, int) or isinstance(value, float):
split_function = lambda row: row[col] >= value
else:
split_function = lambda row: row[col] == value
set1 = [row for row in rows if split_function(row)]
set2 = [row for row in rows if not split_function(row)]
return (set1, set2)
class treenode(object):
def __init__(self, col=-1, value=None, results=None, tb=None, fb=None):
self.col = col
self.value = value
self.results = results
self.fb = fb
self.tb = tb
def buildtree(rows, score_function=entropy):
if len(rows) == 0: return treenode()
current_score = score_function(rows)
best_gain = 0.0
best_criteria = None
best_sets = None
column_cnt = len(rows[0])-1 # excluding the last column
for col in range(0, column_cnt):
col_values = {}
for row in rows:
col_values[row[col]] = 1
for value in col_values:
(set1, set2) = divide_set(rows, col, value)
len1 = len(set1)
total = len(rows)
p = float(len1)/total
gain = current_score - p*score_function(set1) - (1-p)*score_function(set2)
if gain > best_gain and len(set1) > 0 and len(set2) > 0:
best_gain = gain
best_criteria = (col, value)
best_sets = (set1, set2)
if best_gain > 0:
trueBranch = buildtree(best_sets[0], score_function)
falseBranch = buildtree(best_sets[1], score_function)
return treenode(col=best_criteria[0], value=best_criteria[1], tb=trueBranch, fb=falseBranch)
else:
return treenode(results=uniquecounts(rows))
def print_tree(node, indent=''):
if node.results != None:
print str(node.results)
else:
print str(node.col) + ':' + str(node.value) + '?'
print indent + 'T->',
print_tree(node.tb, indent+' ')
print indent + 'F->',
print_tree(node.fb, indent+' ')
def getWidth(node):
if node.tb == None and node.fb == None:
return 1
return getWidth(node.fb) + getWidth(node.tb)
def getHeight(node):
if node.tb == None and node.fb == None:
return 1
return getHeight(node.tb) + getHeight(node.fb) + 1
from PIL import Image, ImageDraw
def drawNode(draw, node, x, y):
if node.results == None:
w1 = getWidth(node.fb)*100
w2 = getWidth(node.tb)*100
left = x-(w1+w2)/2
right = x+(w1+w2)/2
draw.text((x-20,y-10),str(node.col)+':'+str(node.value),(0,0,0))
draw.line((x, y, left+w1/2, y+100), fill=(255,0,0))
draw.line((x, y, right-w2/2, y+100), fill=(255,0,0))
drawNode(draw, node.fb, left+w1/2, y+100)
drawNode(draw, node.tb, right-w2/2, y+100)
else:
txt = ' \n'.join(['%s:%d' %v for v in node.results.items()])
draw.text((x-20,y), txt, (0,0,0))
def drawTree(node, jpeg='tree.jpg'):
w = getWidth(node)*100
h = getHeight(node)*100+120
img = Image.new('RGB', (w,h), (255,255,255))
draw = ImageDraw.Draw(img)
drawNode(draw, node, w/2, 20)
img.save(jpeg, 'JPEG')
def classify(observation, node):
if node.results != None:
return node.results
else:
v = observation[node.col]
branch = None
if isinstance(v,int) or isinstance(v,float):
if v >= node.value: branch = node.tb
else: branch = node.fb
else:
if v == node.value: branch = node.tb
else: branch = node.fb
return classify(observation, branch)
def prune(node, mingain):
if node.tb.results == None:
prune(node.tb, mingain)
if node.fb.results == None:
prune(node.fb, mingain)
if node.tb.results != None and node.fb.results != None:
tb, fb = [], []
for v, c in node.tb.results.items():
tb.extend([[v]]*c)
for v, c in node.fb.results.items():
fb.extend([[v]]*c)
delta = entropy(tb+fb) - (entropy(tb) + entropy(fb))/2
if delta < mingain:
node.tb, node.fb = None, None
node.results = uniquecounts(tb+fb)
def missing_value_classify(observation, node):
if node.results != None:
return node.results
else:
v = observation[node.col]
if v == None:
tr, fr = missing_value_classify(observation, node.tb), missing_value_classify(observation, node.fb)
tcount = sum(tr.values())
fcount = sum(fr.values())
tw = float(tcount)/(tcount+fcount)
fw = 1-tw
result = {}
for k,v in tr.items():
result[k] = v*tw
for k,v in fr.items():
if k not in result: result[k] = 0
result[k] += v*fw
return result
else:
if isinstance(v, int) or isinstance(v, float):
if v >= node.value: branch = node.tb
else: branch = node.fb
else:
if v == node.value: branch = node.tb
else: branch = node.fb
return missing_value_classify(observation, branch)
if __name__ == '__main__':
data = readDataFromFile()
root = buildtree(data)
print missing_value_classify(['google',None,'yes',None], root)
|
mit
| 9,111,104,916,089,302,000
| 29.85782
| 111
| 0.541782
| false
| 3.43717
| false
| false
| false
|
eknowles/CV
|
app.py
|
1
|
2096
|
import os
from flask import Flask, render_template, send_from_directory
from calendar_parser import CalendarParser
# initialization
app = Flask(__name__)
app.config.update(
DEBUG=True,
)
events = {}
# settings
ics_url = "https://www.google.com/calendar/ical/88kil28s7t686h1p5aoem6ui24%40group.calendar.google.com/public/basic.ics"
class Event(object):
name = ''
location = ''
start_time = None
end_time = None
description = ''
clean_dates = ''
def tidytime(start, end):
output = ''
if start.day + 1 == end.day:
sameday = True
else:
sameday = False
if start.month == end.month:
samemonth = True
else:
samemonth = False
if start.year == end.year:
sameyear = True
else:
sameyear = False
if sameyear and samemonth and sameday:
output = start.strftime("%A, %d %B %Y")
elif sameyear and samemonth and not sameday:
output = start.strftime("%A, %d-") + end.strftime("%d %B %Y")
elif sameyear and not samemonth:
output = start.strftime("%d %B - ") + end.strftime("%d %B %Y")
return output
# controllers
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'), 'ico/favicon.ico')
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.route("/")
def index():
return render_template('index.html')
@app.route("/update")
def reset_events():
global events
event_list = []
cal = CalendarParser(ics_url=ics_url)
for event in cal.parse_calendar():
NewEvent = Event()
NewEvent.name = event["name"]
NewEvent.location = event["location"]
NewEvent.start_time = event["start_time"]
NewEvent.end_time = event["end_time"]
NewEvent.description = event["description"]
NewEvent.clean_dates = tidytime(event["start_time"], event["end_time"])
event_list.append(NewEvent)
event_list.sort(key=lambda r: r.start_time)
k = 0
for event in event_list:
events[k] = event
k += 1
# print events
return render_template('reset.html', events=events)
# launch
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(host='127.0.0.1', port=port)
|
gpl-2.0
| -1,447,502,892,584,952,000
| 22.043956
| 120
| 0.68416
| false
| 2.855586
| false
| false
| false
|
hzlf/openbroadcast
|
website/djangorestframework/tests/views.py
|
1
|
1564
|
from django.conf.urls.defaults import patterns, url
from django.test import TestCase
from django.test import Client
urlpatterns = patterns('djangorestframework.utils.staticviews',
url(r'^robots.txt$', 'deny_robots'),
url(r'^favicon.ico$', 'favicon'),
url(r'^accounts/login$', 'api_login'),
url(r'^accounts/logout$', 'api_logout'),
)
class ViewTests(TestCase):
"""Test the extra views djangorestframework provides"""
urls = 'djangorestframework.tests.views'
def test_robots_view(self):
"""Ensure the robots view exists"""
response = self.client.get('/robots.txt')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'text/plain')
def test_favicon_view(self):
"""Ensure the favicon view exists"""
response = self.client.get('/favicon.ico')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'image/vnd.microsoft.icon')
def test_login_view(self):
"""Ensure the login view exists"""
response = self.client.get('/accounts/login')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'].split(';')[0], 'text/html')
def test_logout_view(self):
"""Ensure the logout view exists"""
response = self.client.get('/accounts/logout')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'].split(';')[0], 'text/html')
# TODO: Add login/logout behaviour tests
|
gpl-3.0
| 8,763,692,571,406,726,000
| 35.372093
| 78
| 0.658568
| false
| 3.919799
| true
| false
| false
|
iLoop2/ResInsight
|
ThirdParty/Ert/devel/python/python/ert_gui/viewer/slice_viewer.py
|
1
|
8469
|
import os
from OpenGL.GL import *
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QApplication, QMainWindow, QDockWidget
import sys
import traceback
from ert.ecl import EclTypeEnum, EclKW, EclGrid
from ert.ecl.faults import FaultCollection
from ert.geo.xyz_io import XYZIo
from ert_gui.viewer import Texture3D, Bounds, SliceViewer, SliceSettingsWidget, Texture1D
def loadGrid(path, load_actnum=True):
""" @rtype: EclGrid """
with open(path, "r") as f:
specgrid = EclKW.read_grdecl(f, "SPECGRID", ecl_type=EclTypeEnum.ECL_INT_TYPE, strict=False)
zcorn = EclKW.read_grdecl(f, "ZCORN")
coord = EclKW.read_grdecl(f, "COORD")
actnum = None
if load_actnum:
actnum = EclKW.read_grdecl(f, "ACTNUM", ecl_type=EclTypeEnum.ECL_INT_TYPE)
mapaxes = EclKW.read_grdecl(f, "MAPAXES")
grid = EclGrid.create(specgrid, zcorn, coord, actnum, mapaxes=mapaxes)
return grid
def loadKW(keyword, ecl_type, path):
""" @rtype: EclKW """
with open(path, "r") as f:
kw_data = EclKW.read_grdecl(f, keyword, ecl_type=ecl_type)
return kw_data
def loadGridData(path):
grid = loadGrid(path)
nx, ny, nz, nactive = grid.dims
print(nx, ny, nz)
bounds = Bounds()
grid_data = []
index = 0
for z in range(nz):
for y in range(ny):
for x in range(nx):
# x, y, z = grid.get_corner_xyz(0, global_index=index)
x, y, z = grid.get_xyz(global_index=index)
active = grid.active(global_index=index)
if active:
active = 1.0
else:
active = 0.0
bounds.addPoint(x, y, z)
grid_data.append(x)
grid_data.append(y)
grid_data.append(z)
grid_data.append(active)
index += 1
print(bounds)
return nx, ny, nz, grid_data, bounds,grid
def loadKWData(path, keyword, ecl_type=EclTypeEnum.ECL_FLOAT_TYPE):
kw_data = loadKW(keyword, ecl_type, path)
print(kw_data.min, kw_data.max)
min_value = kw_data.min
data_range = kw_data.max - kw_data.min
result = []
for value in kw_data:
value = float(value - min_value) / data_range
result.append(value)
return result, data_range
def rgb(r, g, b):
return [r / 255.0, g / 255.0, b / 255.0, 1.0]
def createColorBrewerScale():
color_list = [rgb(141,211,199),
rgb(255,255,179),
rgb(190,186,218),
rgb(251,128,114),
rgb(128,177,211),
rgb(253,180,98),
rgb(179,222,105),
rgb(252,205,229),
rgb(217,217,217),
rgb(188,128,189),
rgb(204,235,197),
rgb(255,237,111)]
colors = [component for color in color_list for component in color]
return Texture1D(len(colors) / 4, colors)
def createSeismicScale():
color_list = [rgb(0, 0, 255), rgb(255, 255, 255), rgb(255, 0, 0)]
colors = [component for color in color_list for component in color]
return Texture1D(len(colors) / 4, colors, wrap=GL_CLAMP_TO_EDGE)
def createLinearGreyScale():
color_list = [rgb(128, 128, 128), rgb(255, 255, 255)]
colors = [component for color in color_list for component in color]
return Texture1D(len(colors) / 4, colors, wrap=GL_CLAMP_TO_EDGE)
def createRainbowScale():
color_list = [rgb(200, 0, 255), rgb(0, 0, 255), rgb(0, 255, 0), rgb(255, 255, 0), rgb(255, 127, 0), rgb(255, 0, 0)]
colors = [component for color in color_list for component in color]
return Texture1D(len(colors) / 4, colors, wrap=GL_CLAMP_TO_EDGE, internal_format=GL_RGBA8)
def createColorScales():
return {
"region_colors": createColorBrewerScale(),
"seismic": createSeismicScale(),
"linear_grey": createLinearGreyScale(),
"rainbow": createRainbowScale()
}
def loadFaults(grid , fault_file):
faults = FaultCollection( grid )
faults.load( fault_file )
try:
faults.load( fault_file )
except Exception as e:
traceback.print_tb(e)
print("Loading from fault file:%s failed" % fault_file)
return faults
def createDataStructures(grid_path=None, grid_data_path=None , polyline_root_path = None):
if grid_path is not None:
nx, ny, nz, grid_data, bounds, grid = loadGridData(grid_path)
data, data_range = loadKWData(grid_data_path, "FLTBLCK", ecl_type=EclTypeEnum.ECL_INT_TYPE)
faults = loadFaults( grid , os.path.join(polyline_root_path , "faults.grdecl"))
else:
# nx, ny, nz, grid_data, bounds = loadGridData("/Volumes/Statoil/data/faultregion/grid.grdecl")
# data, data_range = loadKWData("/Volumes/Statoil/data/faultregion/fltblck.grdecl", "FLTBLCK", ecl_type=EclTypeEnum.ECL_INT_TYPE)
nx, ny, nz, grid_data, bounds, grid = loadGridData("/Volumes/Statoil/data/TestCase/eclipse/include/example_grid_sim.GRDECL")
data, data_range = loadKWData("/Volumes/Statoil/data/TestCase/eclipse/include/example_permx.GRDECL", "PERMX", ecl_type=EclTypeEnum.ECL_FLOAT_TYPE)
faults = loadFaults( grid , os.path.join("/Volumes/Statoil/data/TestCase/eclipse/include" , "example_faults_sim.GRDECL"))
grid_texture = Texture3D(nx, ny, nz, grid_data, GL_RGBA32F, GL_RGBA)
attribute_texture = Texture3D(nx, ny, nz, data)
textures = {"grid": grid_texture,
"grid_data": attribute_texture}
return textures, bounds, nx, ny, nz, data_range , faults
def readPolylines(root_path):
polyline_files = ["pol1.xyz",
"pol2.xyz",
"pol3.xyz",
"pol4.xyz",
"pol5.xyz",
"pol6.xyz",
"pol7.xyz",
"pol8.xyz",
"pol9.xyz",
"pol10.xyz",
"pol11.xyz"]
polylines = []
if root_path is not None and os.path.exists(root_path):
for polyline_file in polyline_files:
path = os.path.join(root_path, polyline_file)
polyline = XYZIo.readXYZFile(path)
polylines.append(polyline)
return polylines
if __name__ == '__main__':
grid_path = None
grid_data_path = None
polyline_root_path = None
if len(sys.argv) == 4:
grid_path = sys.argv[1]
grid_data_path = sys.argv[2]
polyline_root_path = sys.argv[3]
app = QApplication(["Slice Viewer"])
window = QMainWindow()
window.resize(1024, 768)
textures, bounds, nx, ny, nz, data_range , faults = createDataStructures(grid_path, grid_data_path , polyline_root_path)
polylines = readPolylines(root_path=polyline_root_path)
color_scales = createColorScales()
textures["color_scale"] = color_scales[color_scales.keys()[0]]
viewer = SliceViewer(textures=textures, volume_bounds=bounds, color_scales=color_scales, data_range=data_range, polylines=polylines , faults = faults)
viewer.setSliceSize(width=nx, height=ny)
slice_settings = SliceSettingsWidget(max_slice_count=nz, color_scales=color_scales.keys())
slice_settings.inactiveCellsHidden.connect(viewer.hideInactiveCells)
slice_settings.currentSliceChanged.connect(viewer.setCurrentSlice)
slice_settings.toggleOrthographicProjection.connect(viewer.useOrthographicProjection)
slice_settings.toggleLighting.connect(viewer.useLighting)
slice_settings.colorScalesChanged.connect(viewer.changeColorScale)
slice_settings.regionToggling.connect(viewer.useRegionScaling)
slice_settings.toggleInterpolation.connect(viewer.useInterpolationOnData)
slice_settings.mirrorX.connect(viewer.mirrorX)
slice_settings.mirrorY.connect(viewer.mirrorY)
slice_settings.mirrorZ.connect(viewer.mirrorZ)
slice_settings.toggleFlatPolylines.connect(viewer.toggleFlatPolylines)
dock_widget = QDockWidget("Settings")
dock_widget.setObjectName("SliceSettingsDock")
dock_widget.setWidget(slice_settings)
dock_widget.setAllowedAreas(Qt.AllDockWidgetAreas)
dock_widget.setFeatures(QDockWidget.NoDockWidgetFeatures)
window.addDockWidget(Qt.LeftDockWidgetArea, dock_widget)
window.setCentralWidget(viewer)
window.show()
window.activateWindow()
window.raise_()
app.exec_()
|
gpl-3.0
| 4,388,381,655,475,250,700
| 33.149194
| 154
| 0.627465
| false
| 3.26107
| false
| false
| false
|
blade2005/dosage
|
scripts/mklanguages.py
|
1
|
1450
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2016 Tobias Gruetzmacher
'''update languages.py from pycountry'''
from __future__ import absolute_import, division, print_function
import os
import sys
import codecs
basepath = os.path.dirname(os.path.dirname(__file__))
sys.path.insert(0, basepath)
from dosagelib.scraper import get_scrapers # noqa
def main():
"""Update language information in dosagelib/languages.py."""
fn = os.path.join(basepath, 'dosagelib', 'languages.py')
encoding = 'utf-8'
with codecs.open(fn, 'w', encoding) as f:
f.write('# -*- coding: %s -*-%s' % (encoding, os.linesep))
f.write('# ISO 693-1 language codes from pycountry%s' % os.linesep)
f.write('# This file is automatically generated, DO NOT EDIT!%s' % os.linesep)
lang = get_used_languages()
write_languages(f, lang)
def get_used_languages():
lang = {}
for scraperobj in get_scrapers():
l = scraperobj.lang
if l not in lang:
lang[l] = scraperobj.language()
return lang
def write_languages(f, l):
"""Write language information."""
f.write("Languages = {%s" % os.linesep)
for lang in sorted(l):
f.write(" %r: %r,%s" % (lang, l[lang], os.linesep))
f.write("}%s" % os.linesep)
if __name__ == '__main__':
main()
|
mit
| -1,067,473,444,864,903,400
| 28.591837
| 86
| 0.631034
| false
| 3.215078
| false
| false
| false
|
imeteora/cocos2d-x-3.x-Qt
|
tools/jenkins-scripts/watchdog.py
|
1
|
1716
|
import jenkinsapi
from jenkinsapi.jenkins import Jenkins
import sys
import time
import os
#check & kill dead buid
def build_time(_job,_threshold):
#get jenkins-job-watchdog-threshold
#Get last build running
build = _job.get_last_build()
running = build.is_running()
print 'build_job:',_job,'running:',running
if not running:
return False
#Get numerical ID of the last build.
buildnu = _job.get_last_buildnumber()
print "buildnumber:#",buildnu
#get nowtime
nowtime = time.strftime('%M',time.localtime(time.time()))
#print 'nowtime:',nowtime
#get build start time
timeb = build.get_timestamp()
#print 'buildtime:',str(timeb)[14:16]
buildtime = int(str(timeb)[14:16])
subtime = 0
if int(nowtime) >= buildtime:
subtime = int(nowtime)-buildtime
else:
subtime = 60-buildtime+int(nowtime)
if subtime > _threshold:
#print 'subtime',subtime
#kill dead buid
build.stop()
def main():
username = os.environ['JENKINS_ADMIN']
password = os.environ['JENKINS_ADMIN_PW']
J = Jenkins('http://115.28.134.83:8000',username,password)
#get all jenkins jobs
for key,job in J.iteritems():
threshold = 0
if(os.environ.has_key(key+'-threshold')):
threshold = int(os.environ[key+'-threshold'])
else:
threshold = int(os.environ['jenkins-job-watchdog-threshold'])
build_time(job,threshold)
return(0)
# -------------- main --------------
if __name__ == '__main__':
sys_ret = 0
try:
sys_ret = main()
except:
traceback.print_exc()
sys_ret = 1
finally:
sys.exit(sys_ret)
|
gpl-2.0
| 4,595,123,461,525,334,500
| 26.677419
| 73
| 0.598485
| false
| 3.538144
| false
| false
| false
|
gviejo/ThalamusPhysio
|
python/main_pop_corr_nucleus.py
|
1
|
7266
|
import numpy as np
import pandas as pd
# from matplotlib.pyplot import plot,show,draw
import scipy.io
from functions import *
import _pickle as cPickle
import time
import os, sys
import ipyparallel
import neuroseries as nts
import scipy.stats
from pylab import *
from multiprocessing import Pool
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
mappings = pd.read_hdf("/mnt/DataGuillaume/MergedData/MAPPING_NUCLEUS.h5")
nucleus = np.unique(mappings['nucleus'])
sessions = np.unique([n.split("_")[0] for n in mappings.index])
# determining number of neurons per nucleus et per sessions
count = pd.DataFrame(index=sessions, columns = nucleus,data=0)
for s in count.index:
for n in nucleus:
count.loc[s,n] = (mappings[mappings.index.str.contains(s)]['nucleus'] == n).sum()
nucleus_session = {n:count.index.values[count[n]>5] for n in nucleus}
# sys.exit()
# make directory for each nucleus
for n in nucleus:
try:
os.mkdir("/mnt/DataGuillaume/corr_pop_nucleus/"+n)
except:
pass
def compute_population_correlation(nuc, session):
start_time = time.clock()
print(session)
store = pd.HDFStore("/mnt/DataGuillaume/population_activity/"+session+".h5")
rip_pop = store['rip']
rem_pop = store['rem']
wak_pop = store['wake']
store.close()
# WHICH columns to keep
mappings = pd.read_hdf("/mnt/DataGuillaume/MergedData/MAPPING_NUCLEUS.h5")
tmp = mappings[mappings.index.str.contains(session)]['nucleus'] == nuc
neurons = tmp.index.values[np.where(tmp)[0]]
idx = np.array([int(n.split("_")[1]) for n in neurons])
rip_pop = rip_pop[idx]
rem_pop = rem_pop[idx]
wak_pop = wak_pop[idx]
###############################################################################################################
# POPULATION CORRELATION FOR EACH RIPPLES
###############################################################################################################
#matrix of distance between ripples in second
interval_mat = np.vstack(nts.TsdFrame(rip_pop).as_units('s').index.values) - nts.TsdFrame(rip_pop).as_units('s').index.values
rip_corr = np.ones(interval_mat.shape)*np.nan
# doing the upper part of the diagonal
# rip_corr = np.eye(interval_mat.shape[0])
# bad
tmp = np.zeros_like(rip_corr)
tmp[np.triu_indices(interval_mat.shape[0], 1)] += 1
tmp[np.tril_indices(interval_mat.shape[0], 300)] += 1
index = np.where(tmp == 2)
for i, j in zip(index[0], index[1]):
rip_corr[i,j] = scipy.stats.pearsonr(rip_pop.iloc[i].values, rip_pop.iloc[j].values)[0]
rip_corr[j,i] = rip_corr[i,j]
# print(rip_corr[i,j])
allrip_corr = pd.DataFrame(index = interval_mat[index], data = rip_corr[index])
rip_corr = pd.DataFrame(index = rip_pop.index.values, data = rip_corr, columns = rip_pop.index.values)
np.fill_diagonal(rip_corr.values, 1.0)
rip_corr = rip_corr.fillna(0)
###############################################################################################################
# POPULATION CORRELATION FOR EACH THETA CYCLE OF REM
###############################################################################################################
# compute all time interval for each ep of theta
interval_mat = np.vstack(nts.TsdFrame(rem_pop).as_units('s').index.values) - nts.TsdFrame(rem_pop).as_units('s').index.values
rem_corr = np.ones(interval_mat.shape)*np.nan
# index = np.where(np.logical_and(interval_mat < 3.0, interval_mat >= 0.0))
# rem_corr = np.eye(interval_mat.shape[0])
# bad
tmp = np.zeros_like(rem_corr)
tmp[np.triu_indices(interval_mat.shape[0], 1)] += 1
tmp[np.tril_indices(interval_mat.shape[0], 300)] += 1
index = np.where(tmp == 2)
for i, j in zip(index[0], index[1]):
rem_corr[i,j] = scipy.stats.pearsonr(rem_pop.iloc[i].values, rem_pop.iloc[j].values)[0]
rem_corr[j,i] = rem_corr[i,j]
allrem_corr = pd.DataFrame(index = interval_mat[index], data = rem_corr[index])
rem_corr = pd.DataFrame(index = rem_pop.index.values, data = rem_corr, columns = rem_pop.index.values)
np.fill_diagonal(rem_corr.values, 1.0)
rem_corr = rem_corr.fillna(0)
###############################################################################################################
# POPULATION CORRELATION FOR EACH THETA CYCLE OF WAKE
###############################################################################################################
# compute all time interval for each ep of theta
interval_mat = np.vstack(nts.TsdFrame(wak_pop).as_units('s').index.values) - nts.TsdFrame(wak_pop).as_units('s').index.values
wak_corr = np.ones(interval_mat.shape)*np.nan
# index = np.where(np.logical_and(interval_mat < 3.0, interval_mat >= 0.0))
# wak_corr = np.eye(interval_mat.shape[0])
# bad
tmp = np.zeros_like(wak_corr)
tmp[np.triu_indices(interval_mat.shape[0], 1)] += 1
tmp[np.tril_indices(interval_mat.shape[0], 300)] += 1
index = np.where(tmp == 2)
for i, j in zip(index[0], index[1]):
wak_corr[i,j] = scipy.stats.pearsonr(wak_pop.iloc[i].values, wak_pop.iloc[j].values)[0]
wak_corr[j,i] = wak_corr[i,j]
allwak_corr = pd.DataFrame(index = interval_mat[index], data = wak_corr[index])
wak_corr = pd.DataFrame(index = wak_pop.index.values, data = wak_corr, columns = wak_pop.index.values)
np.fill_diagonal(wak_corr.values, 1.0)
wak_corr = wak_corr.fillna(0)
###############################################################################################################
# STORING
###############################################################################################################
store = pd.HDFStore("/mnt/DataGuillaume/corr_pop_nucleus/"+nuc+"/"+session+".h5")
store.put('rip_corr', rip_corr)
store.put('allrip_corr', allrip_corr)
store.put('wak_corr', wak_corr)
store.put('allwak_corr', allwak_corr)
store.put('rem_corr', rem_corr)
store.put('allrem_corr', allrem_corr)
store.close()
print(time.clock() - start_time, "seconds")
return time.clock() - start_time
dview = Pool(8)
for n in nucleus:
print(n)
a = dview.starmap_async(compute_population_correlation, zip([n]*len(nucleus_session[n]),nucleus_session[n])).get()
# a = compute_population_correlation('AD', nucleus_session['AD'][0])
# ###############################################################################################################
# # PLOT
# ###############################################################################################################
# last = np.max([np.max(allrip_corr[:,0]),np.max(alltheta_corr[:,0])])
# bins = np.arange(0.0, last, 0.2)
# # average rip corr
# index_rip = np.digitize(allrip_corr[:,0], bins)
# mean_ripcorr = np.array([np.mean(allrip_corr[index_rip == i,1]) for i in np.unique(index_rip)[0:30]])
# # average theta corr
# index_theta = np.digitize(alltheta_corr[:,0], bins)
# mean_thetacorr = np.array([np.mean(alltheta_corr[index_theta == i,1]) for i in np.unique(index_theta)[0:30]])
# xt = list(bins[0:30][::-1]*-1.0)+list(bins[0:30])
# ytheta = list(mean_thetacorr[0:30][::-1])+list(mean_thetacorr[0:30])
# yrip = list(mean_ripcorr[0:30][::-1])+list(mean_ripcorr[0:30])
# plot(xt, ytheta, 'o-', label = 'theta')
# plot(xt, yrip, 'o-', label = 'ripple')
# legend()
# xlabel('s')
# ylabel('r')
# show()
|
gpl-3.0
| 3,322,972,831,613,027,300
| 39.366667
| 126
| 0.579136
| false
| 2.895974
| false
| false
| false
|
timlau/FedoraReview
|
src/FedoraReview/helpers_mixin.py
|
1
|
5574
|
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# (C) 2011 - Tim Lauridsen <timlau@fedoraproject.org>
'''
Tools for helping Fedora package reviewers
'''
import logging
import os.path
import re
import urllib
from subprocess import Popen, PIPE
import hashlib
from settings import Settings
from review_error import ReviewError
class DownloadError(ReviewError):
''' Error in urlretrieve(). '''
def __init__(self, code, url):
ReviewError.__init__(
self, "Error %s downloading %s" % (code, url))
class HelpersMixin(object):
''' Miscellaneous library support mixin class. '''
def __init__(self):
try:
self.log = Settings.get_logger()
except AttributeError:
pass
def _run_cmd(self, cmd, header='Run command'):
''' Run a command using using subprocess, return output. '''
self.log.debug(header + ': ' + cmd)
cmd = cmd.split(' ')
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
output, error = '', 'undefined'
try:
output, error = proc.communicate()
except OSError, e:
self.log.debug("OS error, stderr: " + error, exc_info=True)
self.log.error("OS error running " + ' '.join(cmd), str(e))
return output
@staticmethod
def _checksum(path):
''' get the checksum for a path using algorithm set by configuration
(default: md5)
:arg path: the path to get the the checksum for
:return: checksum
'''
ck = hashlib.new(Settings.checksum)
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(8192), ''):
ck.update(chunk)
return ck.hexdigest()
@staticmethod
def urlretrieve(url, path):
''' Similar to urllib.urlretrieve, raises DownloadError. '''
try:
# we need to timeout eventually if there are problems
import socket
socket.setdefaulttimeout(30)
istream = urllib.FancyURLopener().open(url)
if istream.getcode() and istream.getcode() != 200:
raise DownloadError(istream.getcode(), url)
with open(path, 'w') as ostream:
octets = istream.read(32767)
while octets != '':
ostream.write(octets)
octets = istream.read(32767)
except IOError as err:
raise DownloadError(str(err), url)
def _get_file(self, link, directory, logger=None):
''' Download a file in link to directory. '''
fname = link.rsplit('/', 1)[1]
path = os.path.join(directory, fname)
if os.path.exists(path) and Settings.cache:
if logger:
logger(True)
logging.debug('Using cached source: ' + fname)
return path
self.log.debug(" --> %s : %s" % (directory, link))
if logger:
logger(False)
self.urlretrieve(link, path)
return path
@staticmethod
def rpmdev_extract(archive, extract_dir):
"""
Unpack archive in extract_dir. Returns true if
from subprocess.call() returns 0
"""
cmd = 'rpmdev-extract -qC ' + extract_dir + ' ' + archive
cmd += ' &>/dev/null'
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = p.communicate()
if p.returncode != 0:
log = Settings.get_logger()
log.debug("Cannot unpack " + archive)
log.debug("Status: %d, stdout: %s, stderr: %s.",
p.returncode, str(stdout), str(stderr))
return p.returncode == 0
@staticmethod
def check_rpmlint_errors(out, log):
""" Check the rpmlint output, return(ok, errmsg)
If ok, output is OK and there is 0 warnings/errors
If not ok, and errmsg!= None there is system errors,
reflected in errmsg. If not ok and msg == None parsing
is ok but there are warnings/errors"""
problems = re.compile(r'(\d+)\serrors\,\s(\d+)\swarnings')
lines = out.split('\n')[:-1]
err_lines = filter(lambda l: l.lower().find('error') != -1,
lines)
if len(err_lines) == 0:
Settings.get_logger().debug('Cannot parse rpmlint output: '
+ out)
return False, 'Cannot parse rpmlint output:'
res = problems.search(err_lines[-1])
if res and len(res.groups()) == 2:
errors, warnings = res.groups()
if errors == '0' and warnings == '0':
return True, None
else:
return False, None
else:
log.debug('Cannot parse rpmlint output: ' + out)
return False, 'Cannot parse rpmlint output:'
# vim: set expandtab ts=4 sw=4:
|
gpl-2.0
| -8,841,794,272,914,089,000
| 34.503185
| 76
| 0.577861
| false
| 4.101545
| false
| false
| false
|
cuauv/software
|
visualizer/configure.py
|
1
|
1691
|
#!/usr/bin/env python3
from build import ninja_common
build = ninja_common.Build("visualizer")
# Only build if all dependencies are present.
# TODO Create a better means of dependency checking.
import os
sources = ['gl_utils.cpp',
'graphics_engine.cpp',
'material.cpp',
'obj_builder.cpp',
'render_buffer.cpp',
'renderable.cpp',
'renderer.cpp',
'scene_object.cpp',
'shadow_map.cpp',
'skybox.cpp',
'stl_builder.cpp',
'stl_read.cpp']
build.build_shared(
'vis', [os.path.join('graphics_engine', source) for source in sources],
pkg_confs=['gl'], cflags=['-DGL_GLEXT_PROTOTYPES', '-Wno-misleading-indentation']
)
# We compile this separately and link at RUNTIME to avoid
# requiring OpenCV and Eigen for visualizer use.
build.build_shared('vision_link', ['vision_link.cpp'],
auv_deps=['auv-camera-message-framework', 'conf'], pkg_confs=['opencv4', 'eigen3'],
cflags=[]
)
# TODO we should not be compiling units like below.
build.build_shared('fishbowl_comm', ['fishbowl_comm.cpp', '../fishbowl/bits.cpp'],
auv_deps=['utils'])
build.build_shared('aslam_comm', ['aslam_comm.cpp'], auv_deps=['utils'])
build.build_cmd('auv-visualizer-nodisplay',
['visualizer.cpp', 'keyboard.cpp', 'point_manager.cpp',
'fishbowl_manager.cpp', 'async_manager.cpp'],
auv_deps=['shm', 'utils', 'vis',
'fishbowl_comm', 'math', 'quat', 'aslam_comm'],
pkg_confs=['gl', 'libconfig++', 'glfw3'], lflags=['-ldl'])
build.install('auv-visualizer', f='visualizer/visualizer.sh')
|
bsd-3-clause
| -7,491,380,834,800,671,000
| 33.510204
| 83
| 0.603193
| false
| 3.315686
| false
| false
| false
|
Clarity-89/clarityv2
|
src/clarityv2/portfolio/migrations/0001_initial.py
|
1
|
1263
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-02-27 19:20
from __future__ import unicode_literals
import autoslug.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Entry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='name')),
('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='name', unique=True, verbose_name='slug')),
('image', models.ImageField(blank=True, upload_to='portfolio', verbose_name='image')),
('description', models.TextField(blank=True, verbose_name='description')),
('order', models.PositiveIntegerField(default=0)),
('published', models.BooleanField(default=True, verbose_name='published')),
],
options={
'ordering': ['order'],
'verbose_name_plural': 'portfolio entries',
'verbose_name': 'portfolio entry',
},
),
]
|
mit
| -5,629,408,718,103,012,000
| 36.147059
| 128
| 0.578781
| false
| 4.416084
| false
| false
| false
|
reedessick/pedagogy
|
coherentLikelihood/coherentLikelihood.py
|
1
|
23070
|
#!/usr/bin/python
usage = "coherentLikelihood.py [--options]"
description = "builds figures to demonstrate a heuristic burst search"
author = "Reed Essick (reed.essick@ligo.org)"
#-------------------------------------------------
import waveforms
import numpy as np
import subprocess as sp
import matplotlib
matplotlib.use("Agg")
from matplotlib import pyplot as plt
from optparse import OptionParser
#-------------------------------------------------
parser = OptionParser(usage=usage, description=description)
parser.add_option('-v', '--verbose', default=False, action='store_true')
parser.add_option('-T', '--duration', default=10.0, type='float', help='duration of the experiment')
parser.add_option('-s', '--sampling-rate', default=1024, type='int', help='sampling rate of the experiment, should be a power of 2')
parser.add_option('-S', '--SNR', default=15.0, type='float', help='requested SNR for the injection')
parser.add_option('', '--theta', default=45, type='float', help='the polar angle for triangulation. WARNING: the plot shows "theta" but that is measured from the zenith!')
parser.add_option('', '--D-over-c', default=3, type='float', help='the triangulation baseline')
parser.add_option('-f', '--freq', default=10.0, type='float', help='central frequency of the chirpedSineGaussian')
parser.add_option('-F', '--freqDot', default=20, type='float', help='frequency derivative of the chirpedSineGaussian')
parser.add_option('-t', '--tau', default=0.25, type='float', help='time constnat of the chirpedSineGaussian')
parser.add_option('', '--frames-per-sec', default=30, type='int', help='the number of frames per second of the movie')
parser.add_option('', '--num-frames', default=200, type='int', help='the total number of frames in the movie')
parser.add_option('', '--hide-signal', default=False, action='store_true', help='do not show signal in fame*png figures')
parser.add_option('', '--hide-noisy-reconstruction', default=False, action='store_true', help='do not show the reconstructed signal which contains noise')
parser.add_option('', '--hide-noiseless-reconstruction', default=False, action='store_true', help='do not show the reconstructed signal which contains only injections')
parser.add_option('', '--tag', default='', type='string' )
parser.add_option('', '--dpi', default=200, type='int' )
parser.add_option('', '--movie-type', default=[], action='append', type='string')
parser.add_option('', '--sanity-check', default=False, action='store_true', help='stop after making sanity check plots')
opts, args = parser.parse_args()
if opts.tag:
opts.tag = "_%s"%opts.tag
N = opts.duration*opts.sampling_rate
if N%2:
raise ValueError("must have an even number of sample points! %.3f*%.3f=%.3f"%(opts.duration, opts.sampling_rate, N))
if not opts.movie_type:
opts.movie_type.append( 'mpg' )
#-------------------------------------------------
if opts.verbose:
print "generating white noise (in the freq domain)"
(freqs, wFreqDom1), (times, wTimeDom1) = waveforms.whiteNoise( opts.duration, opts.sampling_rate )
(freqs, wFreqDom2), (times, wTimeDom2) = waveforms.whiteNoise( opts.duration, opts.sampling_rate )
#-------------------------------------------------
dt = opts.D_over_c * np.cos( opts.theta*np.pi/180 )
to = opts.duration/2
if opts.verbose:
print "generating injection with to=%.3f"%(to)
hTimeDom1 = waveforms.chirpSineGaussianT( times, 1.0, opts.freq, opts.freqDot, opts.tau, to+dt/2 )
hFreqDom1 = waveforms.chirpSineGaussianF( freqs, 1.0, opts.freq, opts.freqDot, opts.tau, to+dt/2 )
hTimeDom2 = waveforms.chirpSineGaussianT( times, 1.0, opts.freq, opts.freqDot, opts.tau, to-dt/2 )
hFreqDom2 = waveforms.chirpSineGaussianF( freqs, 1.0, opts.freq, opts.freqDot, opts.tau, to-dt/2 )
#-------------------------------------------------
if opts.verbose:
print "computing optimal SNR and scaling injection"
### for white-gaussian noise with unit-variance in the frequency domain
snr = ( 2 * np.sum( hFreqDom1.real**2 + hFreqDom1.imag**2 + hFreqDom2.real**2 + hFreqDom2.imag**2 ) / opts.duration )**0.5
scaling = opts.SNR/snr
hTimeDom1 *= scaling
hFreqDom1 *= scaling
hTimeDom2 *= scaling
hFreqDom2 *= scaling
#-------------------------------------------------
if opts.verbose:
print "compute logBSN as a function of theta"
dataF1 = wFreqDom1 + hFreqDom1
dataT1 = wTimeDom1 + hTimeDom1
dataF2 = wFreqDom2 + hFreqDom2
dataT2 = wTimeDom2 + hTimeDom2
ylim = 1.1*max(np.max(np.abs(dataT2)), np.max(np.abs(dataT1)))
ylim = (-ylim, ylim)
#snr = 2 * np.sum( dataF1.real**2 + dataF1.imag**2 + dataF2.real**2 + dataF2.imag**2 ) / opts.duration + np.fft.ifft( 2 * np.fft.ifftshift( dataF1 * np.conj(dataF2) ) ).real * opts.sampling_rate ### ifft normalizes the sum by 1/n = 1/(s*T) and we want to normalize by 1/T to approximate the integral
#SNR = snr**0.5 ### this is the "coherent snr"
SNR = np.fft.ifft( 2 * np.fft.ifftshift( dataF1 * np.conj(dataF2) ) ).real * opts.sampling_rate ### ifft normalizes the sum by 1/n = 1/(s*T) and we want to normalize by 1/T to approximate the integral
#-------------------------------------------------
if opts.verbose:
print "plotting sanity check of injection and noise"
fig = plt.figure(figsize=(15,10))
### IFO1 raw data
ax = plt.subplot(2,3,1)
ax.plot( times, dataT1, 'm-', linewidth=1, alpha=0.75, label='$\mathrm{noise_1+signal_1}$' )
ax.plot( times-dt/2, dataT1, 'b-', linewidth=1, alpha=0.90, label='$\mathrm{shifted\ noise_1+signal_1}$' )
if not opts.hide_signal:
ax.plot( times-dt/2, hTimeDom1, 'k-', linewidth=1, alpha=0.5, label='$\mathrm{shifted\ signal_1}$' )
ax.legend(loc='best')
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax.set_xlabel('$\mathrm{time}$')
ax.set_ylabel('$d_1(t)$')
ax.set_xlim(xmin=opts.D_over_c, xmax=opts.duration-opts.D_over_c)
ax.set_ylim(ylim)
### IFO1 strain data
ax = plt.subplot(2,3,2)
ax.plot( times, hTimeDom1, 'm-', linewidth=1, alpha=0.75, label='$\mathrm{signal_1}$' )
ax.plot( times-dt/2, hTimeDom1, 'b-', linewidth=1, alpha=0.90, label='$\mathrm{shifted\ signal_1}$' )
ax.set_ylim(ylim)
#ax.legend(loc='best')
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax.set_xlabel('$\mathrm{time}$')
ax.yaxis.tick_right()
ax.yaxis.set_label_position('right')
plt.setp(ax.get_yticklabels(), visible=False)
ax.set_ylabel('$h_1(t)$')
ax.set_xlim(xmin=opts.D_over_c, xmax=opts.duration-opts.D_over_c)
plt.annotate(s='', xy=(to+dt/2,np.min(hTimeDom1)), xytext=(to,np.min(hTimeDom1)), arrowprops=dict(arrowstyle='<-'))
#plt.annotate(s='$\\tau$', xy=(to+dt/4,np.min(hTimeDom1)*1.1), xytext=(to+dt/4,np.min(hTimeDom1)*1.1) )
ax.plot( [to]*2, ylim, 'k--', alpha=0.5, linewidth=1 )
ax.set_ylim(ylim)
### IFO2 raw data
ax = plt.subplot(2,3,4)
ax.plot( times, dataT2, 'c-', linewidth=1, alpha=0.75, label='$\mathrm{noise_2+signal_2}$' )
ax.plot( times+dt/2, dataT2, 'r-', linewidth=1, alpha=0.90, label='$\mathrm{shifted\ noise_2+signal_2}$' )
if not opts.hide_signal:
ax.plot( times+dt/2, hTimeDom2, 'k-', linewidth=1, alpha=0.5, label='$\mathrm{shifted\ signal_2}$' )
ax.legend(loc='best')
ax.set_xlabel('$\mathrm{time}$')
ax.set_ylabel('$d_2(t)$')
ax.set_xlim(xmin=opts.D_over_c, xmax=opts.duration-opts.D_over_c)
ax.set_ylim(ylim)
### IFO2 strain data
ax = plt.subplot(2,3,5)
ax.plot( times, hTimeDom2, 'c-', linewidth=1, alpha=0.75, label='$\mathrm{signal_2}$' )
ax.plot( times+dt/2, hTimeDom2, 'r-', linewidth=1, alpha=0.90, label='$\mathrm{shifted\ signal_2}$' )
ax.set_ylim(ylim)
#ax.legend(loc='best')
ax.set_xlabel('$\mathrm{time}$')
ax.yaxis.tick_right()
ax.yaxis.set_label_position('right')
plt.setp(ax.get_yticklabels(), visible=False)
ax.set_ylabel('$h_2(t)$')
ax.set_xlim(xmin=opts.D_over_c, xmax=opts.duration-opts.D_over_c)
plt.annotate(s='', xy=(to-dt/2,np.max(hTimeDom2)), xytext=(to,np.max(hTimeDom2)), arrowprops=dict(arrowstyle='<-'))
#plt.annotate(s='$\\tau$', xy=(to-dt/4,np.max(hTimeDom2)*1.1), xytext=(to-dt/4,np.max(hTimeDom2)*1.1) )
ax.plot( [to]*2, ylim, 'k--', alpha=0.5, linewidth=1 )
ax.set_ylim(ylim)
### ray-plot
ax = plt.subplot(3,3,6)
truth = times<=opts.D_over_c
ax.plot( times[truth], SNR[truth], 'g-', linewidth=1, alpha=0.5, label='$\mathrm{freq-domain}$\n$\mathrm{computation}$' )
truth = times[-1]-times < opts.D_over_c
ax.plot( times[truth]-times[-1], SNR[truth], 'g-', linewidth=1, alpha=0.5, label='$\mathrm{freq-domain}$\n$\mathrm{computation}$' )
ylim_ray = ax.get_ylim()
ax.plot( [dt]*2, ylim_ray, 'k--', linewidth=1, alpha=0.5 )
ax.set_ylim(ylim_ray)
#ax.legend(loc='best')
ax.set_xlabel('$\\tau$')
ax.yaxis.tick_right()
ax.yaxis.set_label_position('right')
#ax.set_ylabel('$\\rho(\\tau)$')
ax.set_ylabel('$\mathrm{correlated\ Energy}\sim\\rho^2$')
ax.set_xlim(xmin=0, xmax=opts.D_over_c)
ax = ax.twiny()
thetas = [-90, -45, -30, -15, 0, 15, 30, 45, 90]
ax.set_xticks([opts.D_over_c*np.sin(theta*np.pi/180) for theta in thetas])
ax.set_xticklabels(["$%d^\circ$"%theta for theta in thetas])
ax.set_xlim(xmin=0, xmax=opts.D_over_c)
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax.set_xlabel('$\\theta$')
plt.subplots_adjust(hspace=0.1, wspace=0.1)
figname = "sanityCheck%s.png"%(opts.tag)
if opts.verbose:
print " %s"%figname
fig.savefig( figname, dpi=opts.dpi )
plt.close( fig )
if opts.sanity_check:
import sys
sys.exit(0)
#-------------------------------------------------
if opts.verbose:
print "making movie frames"
shifts = np.arange(0, opts.D_over_c, 1.0/opts.sampling_rate)
N = len(shifts)
frame_step = int( 1.0*N / opts.num_frames )
frameNo = 0
### plot an openning frame
fig = plt.figure(figsize=(15,10))
### IFO1 raw data
ax = plt.subplot(2,3,1)
ax.plot( times, dataT1, 'm-', linewidth=1, alpha=0.50, label='$\mathrm{noise_1+signal_1}$' )
ax.plot( times, dataT1, 'b-', linewidth=1, alpha=0.90, label='$\mathrm{shifted\ noise_1+signal_1}$' )
if not opts.hide_signal:
ax.plot( times, hTimeDom1, 'k-', linewidth=1, alpha=0.5, label='$\mathrm{shifted\ signal_1}$' )
ax.legend(loc='upper left')
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax.set_xlabel('$\mathrm{time}$')
ax.set_ylabel('$d_1(t)$')
ax.set_xlim(xmin=opts.D_over_c, xmax=opts.duration-opts.D_over_c)
ax.set_ylim(ylim)
### IFO1 strain data
ax = plt.subplot(2,3,2)
if not opts.hide_noisy_reconstruction:
ax.plot( times, 0.5*(dataT1 + dataT2), 'm-', linewidth=1, alpha=0.75, label='$\mathrm{reconstructed\ signal_1}$')
if not opts.hide_noiseless_reconstruction:
ax.plot( times, 0.5*(hTimeDom1 + hTimeDom2), 'b-', linewidth=1, alpha=0.90, label='$\mathrm{zero\ noise}$\n$\mathrm{reconstructed\ signal_1}$')
if not opts.hide_signal:
ax.plot( times, hTimeDom1, 'k-', linewidth=1, alpha=0.5, label='$\mathrm{signal_1}$' )
ax.set_ylim(ylim)
ax.legend(loc='upper right')
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax.set_xlabel('$\mathrm{time}$')
ax.yaxis.tick_right()
ax.yaxis.set_label_position('right')
plt.setp(ax.get_yticklabels(), visible=False)
ax.set_ylabel('$h_1(t)$')
ax.set_xlim(xmin=opts.D_over_c, xmax=opts.duration-opts.D_over_c)
### IFO2 raw data
ax = plt.subplot(2,3,4)
ax.plot( times, dataT2, 'c-', linewidth=1, alpha=0.50, label='$\mathrm{noise_2+signal_2}$' )
ax.plot( times, dataT2, 'r-', linewidth=1, alpha=0.90, label='$\mathrm{shifted\ noise_2+signal_2}$' )
if not opts.hide_signal:
ax.plot( times, hTimeDom2, 'k-', linewidth=1, alpha=0.5, label='$\mathrm{shifted\ signal_2}$' )
ax.legend(loc='lower left')
ax.set_xlabel('$\mathrm{time}$')
ax.set_ylabel('$d_2(t)$')
ax.set_xlim(xmin=opts.D_over_c, xmax=opts.duration-opts.D_over_c)
ax.set_ylim(ylim)
### IFO2 strain data
ax = plt.subplot(2,3,5)
if not opts.hide_noisy_reconstruction:
ax.plot( times, 0.5*(dataT1 + dataT2), 'c-', linewidth=1, alpha=0.75, label='$\mathrm{reconstructed\ signal_2}$')
if not opts.hide_noiseless_reconstruction:
ax.plot( times, 0.5*(hTimeDom1 + hTimeDom2), 'r-', linewidth=1, alpha=0.90, label='$\mathrm{zero\ noise}$\n$\mathrm{reconstructed\ signal_2}$')
if not opts.hide_signal:
ax.plot( times, hTimeDom2, 'k-', linewidth=1, alpha=0.5, label='$\mathrm{signal_2}$' )
ax.set_ylim(ylim)
ax.legend(loc='lower right')
ax.set_xlabel('$\mathrm{time}$')
ax.yaxis.tick_right()
ax.yaxis.set_label_position('right')
plt.setp(ax.get_yticklabels(), visible=False)
ax.set_ylabel('$h_2(t)$')
ax.set_xlim(xmin=opts.D_over_c, xmax=opts.duration-opts.D_over_c)
### ray-plot
ax = plt.subplot(3,3,6)
#ax.legend(loc='best')
ax.set_xlabel('$\\tau$')
ax.yaxis.tick_right()
ax.yaxis.set_label_position('right')
#ax.set_ylabel('$\\rho(\\tau)$')
ax.set_ylabel('$\mathrm{correlated\ Energy}\sim\\rho^2$')
ax.set_xlim(xmin=0, xmax=opts.D_over_c)
ax.set_ylim(ymin=1.1*np.min(SNR), ymax=1.1*np.max(SNR))
ax = ax.twiny()
thetas = [-90, -45, -30, -15, 0, 15, 30, 45, 90]
ax.set_xticks([opts.D_over_c*np.sin(theta*np.pi/180) for theta in thetas])
ax.set_xticklabels(["$%d^\circ$"%theta for theta in thetas])
ax.set_xlim(xmin=0, xmax=opts.D_over_c)
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax.set_xlabel('$\\theta$')
plt.subplots_adjust(hspace=0.1, wspace=0.1)
figname = "frame%s-%04d.png"%(opts.tag, frameNo)
if opts.verbose:
print " %s"%figname
fig.savefig( figname, dpi=opts.dpi )
plt.close(fig)
frameNo += 1
### plot the rest of the frames
ind = 0
while ind < N:
shift = shifts[ind]
fig = plt.figure(figsize=(15,10))
### IFO1 raw data
ax = plt.subplot(2,3,1)
ax.plot( times, dataT1, 'm-', linewidth=1, alpha=0.50, label='$\mathrm{noise_1+signal_1}$' )
ax.plot( times-shift/2, dataT1, 'b-', linewidth=1, alpha=0.90, label='$\mathrm{shifted\ noise_1+signal_1}$' )
if not opts.hide_signal:
ax.plot( times-shift/2, hTimeDom1, 'k-', linewidth=1, alpha=0.5, label='$\mathrm{shifted\ signal_1}$' )
if shift:
plt.annotate(s='', xy=(to+dt/2,np.min(dataT1)), xytext=(to+dt/2-shift/2,np.min(dataT1)), arrowprops=dict(arrowstyle='<-'))
ax.legend(loc='upper left')
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax.set_xlabel('$\mathrm{time}$')
ax.set_ylabel('$d_1(t)$')
ax.set_xlim(xmin=opts.D_over_c, xmax=opts.duration-opts.D_over_c)
ax.set_ylim(ylim)
### IFO1 strain data
ax = plt.subplot(2,3,2)
if not opts.hide_noisy_reconstruction:
ax.plot( times[ind:], 0.5*(dataT1[ind:] + dataT2[:len(hTimeDom1)-ind]), 'm-', linewidth=1, alpha=0.75, label='$\mathrm{reconstructed\ signal_1}$')
if not opts.hide_noiseless_reconstruction:
ax.plot( times[ind:], 0.5*(hTimeDom1[ind:] + hTimeDom2[:len(hTimeDom1)-ind]), 'b-', linewidth=1, alpha=0.90, label='$\mathrm{zero\ noise}$\n$\mathrm{reconstructed\ signal_1}$')
if not opts.hide_signal:
ax.plot( times, hTimeDom1, 'k-', linewidth=1, alpha=0.5, label='$\mathrm{signal_1}$' )
ax.set_ylim(ylim)
ax.legend(loc='upper right')
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax.set_xlabel('$\mathrm{time}$')
ax.yaxis.tick_right()
ax.yaxis.set_label_position('right')
plt.setp(ax.get_yticklabels(), visible=False)
ax.set_ylabel('$h_1(t)$')
ax.set_xlim(xmin=opts.D_over_c, xmax=opts.duration-opts.D_over_c)
### IFO2 raw data
ax = plt.subplot(2,3,4)
ax.plot( times, dataT2, 'c-', linewidth=1, alpha=0.50, label='$\mathrm{noise_2+signal_2}$' )
ax.plot( times+shift/2, dataT2, 'r-', linewidth=1, alpha=0.90, label='$\mathrm{shifted\ noise_2+signal_2}$' )
if not opts.hide_signal:
ax.plot( times+shift/2, hTimeDom2, 'k-', linewidth=1, alpha=0.5, label='$\mathrm{shifted\ signal_2}$' )
if shift:
plt.annotate(s='', xy=(to-dt/2,np.max(dataT2)), xytext=(to-dt/2+shift/2,np.max(dataT2)), arrowprops=dict(arrowstyle='<-'))
ax.legend(loc='lower left')
ax.set_xlabel('$\mathrm{time}$')
ax.set_ylabel('$d_2(t)$')
ax.set_xlim(xmin=opts.D_over_c, xmax=opts.duration-opts.D_over_c)
ax.set_ylim(ylim)
### IFO2 strain data
ax = plt.subplot(2,3,5)
if not opts.hide_noisy_reconstruction:
ax.plot( times[:len(hTimeDom2)-ind], 0.5*(dataT1[ind:] + dataT2[:len(hTimeDom2)-ind]), 'c-', linewidth=1, alpha=0.75, label='$\mathrm{reconstructed\ signal_2}$')
if not opts.hide_noiseless_reconstruction:
ax.plot( times[:len(hTimeDom2)-ind], 0.5*(hTimeDom1[ind:] + hTimeDom2[:len(hTimeDom2)-ind]), 'r-', linewidth=1, alpha=0.90, label='$\mathrm{zero\ noise}$\n$\mathrm{reconstructed\ signal_2}$')
if not opts.hide_signal:
ax.plot( times, hTimeDom2, 'k-', linewidth=1, alpha=0.5, label='$\mathrm{signal_2}$' )
ax.set_ylim(ylim)
ax.legend(loc='lower right')
ax.set_xlabel('$\mathrm{time}$')
ax.yaxis.tick_right()
ax.yaxis.set_label_position('right')
plt.setp(ax.get_yticklabels(), visible=False)
ax.set_ylabel('$h_2(t)$')
ax.set_xlim(xmin=opts.D_over_c, xmax=opts.duration-opts.D_over_c)
### ray-plot
ax = plt.subplot(3,3,6)
truth = times <= shift
ax.plot( times[truth], SNR[truth], 'g-', linewidth=1, alpha=0.5, label='$\mathrm{freq-domain}$\n$\mathrm{computation}$' )
# truth = times[-1]-times < shift
# ax.plot( times[truth]-times[-1], SNR[truth], 'g-', linewidth=1, alpha=0.5, label='$\mathrm{freq-domain}$\n$\mathrm{computation}$' )
# ax.legend(loc='best')
ax.set_xlabel('$\\tau$')
ax.yaxis.tick_right()
ax.yaxis.set_label_position('right')
# ax.set_ylabel('$\\rho(\\tau)$')
ax.set_ylabel('$\mathrm{correlated\ Energy}\sim\\rho^2$')
# ax.set_xlim(xmin=-opts.D_over_c, xmax=opts.D_over_c)
ax.set_xlim(xmin=0, xmax=opts.D_over_c)
ax.set_ylim(ymin=1.1*np.min(SNR), ymax=1.1*np.max(SNR))
ax = ax.twiny()
thetas = [-90, -45, -30, -15, 0, 15, 30, 45, 90]
ax.set_xticks([opts.D_over_c*np.sin(theta*np.pi/180) for theta in thetas])
ax.set_xticklabels(["$%d^\circ$"%theta for theta in thetas])
ax.set_xlim(xmin=0, xmax=opts.D_over_c)
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax.set_xlabel('$\\theta$')
plt.subplots_adjust(hspace=0.1, wspace=0.1)
figname = "frame%s-%04d.png"%(opts.tag, frameNo)
if opts.verbose:
print " %s"%figname
fig.savefig( figname, dpi=opts.dpi )
plt.close(fig)
frameNo += 1
ind += frame_step
### plot the final frame
shift = opts.D_over_c
ind = N
fig = plt.figure(figsize=(15,10))
### IFO1 raw data
ax = plt.subplot(2,3,1)
ax.plot( times, dataT1, 'm-', linewidth=1, alpha=0.50, label='$\mathrm{noise_1+signal_1}$' )
ax.plot( times-shift/2, dataT1, 'b-', linewidth=1, alpha=0.90, label='$\mathrm{shifted\ noise_1+signal_1}$' )
if not opts.hide_signal:
ax.plot( times-shift/2, hTimeDom1, 'k-', linewidth=1, alpha=0.5, label='$\mathrm{shifted\ signal_1}$' )
plt.annotate(s='', xy=(to+dt/2,np.min(dataT1)), xytext=(to+dt/2-shift/2,np.min(dataT1)), arrowprops=dict(arrowstyle='<-'))
ax.legend(loc='upper left')
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax.set_xlabel('$\mathrm{time}$')
ax.set_ylabel('$d_1(t)$')
ax.set_xlim(xmin=opts.D_over_c, xmax=opts.duration-opts.D_over_c)
ax.set_ylim(ylim)
### IFO1 strain data
ax = plt.subplot(2,3,2)
if not opts.hide_noisy_reconstruction:
ax.plot( times[ind:], 0.5*(dataT1[ind:] + dataT2[:len(hTimeDom2)-ind]), 'm-', linewidth=1, alpha=0.75, label='$\mathrm{reconstructed\ signal_1}$')
if not opts.hide_noiseless_reconstruction:
ax.plot( times[ind:], 0.5*(hTimeDom1[ind:] + hTimeDom2[:len(hTimeDom2)-ind]), 'b-', linewidth=1, alpha=0.90, label='$\mathrm{zero\ noise}$\n$\mathrm{reconstructed\ signal_1}$')
if not opts.hide_signal:
ax.plot( times, hTimeDom1, 'k-', linewidth=1, alpha=0.5, label='$\mathrm{signal_1}$' )
ax.set_ylim(ylim)
ax.legend(loc='upper right')
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax.set_xlabel('$\mathrm{time}$')
ax.yaxis.tick_right()
ax.yaxis.set_label_position('right')
plt.setp(ax.get_yticklabels(), visible=False)
ax.set_ylabel('$h_1(t)$')
ax.set_xlim(xmin=opts.D_over_c, xmax=opts.duration-opts.D_over_c)
### IFO2 raw data
ax = plt.subplot(2,3,4)
ax.plot( times, dataT2, 'c-', linewidth=1, alpha=0.50, label='$\mathrm{noise_2+signal_2}$' )
ax.plot( times+shift/2, dataT2, 'r-', linewidth=1, alpha=0.90, label='$\mathrm{shifted\ noise_2+signal_2}$' )
if not opts.hide_signal:
ax.plot( times+shift/2, hTimeDom2, 'k-', linewidth=1, alpha=0.5, label='$\mathrm{shifted\ signal_2}$' )
plt.annotate(s='', xy=(to-dt/2,np.max(dataT2)), xytext=(to-dt/2+shift/2,np.max(dataT2)), arrowprops=dict(arrowstyle='<-'))
ax.legend(loc='lower left')
ax.set_xlabel('$\mathrm{time}$')
ax.set_ylabel('$d_2(t)$')
ax.set_xlim(xmin=opts.D_over_c, xmax=opts.duration-opts.D_over_c)
ax.set_ylim(ylim)
## IFO2 strain data
ax = plt.subplot(2,3,5)
if not opts.hide_noisy_reconstruction:
ax.plot( times[:len(hTimeDom2)-ind], 0.5*(dataT1[ind:] + dataT2[:len(hTimeDom2)-ind]), 'c-', linewidth=1, alpha=0.75, label='$\mathrm{reconstructed\ signal_2}$')
if not opts.hide_noiseless_reconstruction:
ax.plot( times[:len(hTimeDom2)-ind], 0.5*(hTimeDom1[ind:] + hTimeDom2[:len(hTimeDom2)-ind]), 'r-', linewidth=1, alpha=0.90, label='$\mathrm{zero\ noise}$\n$\mathrm{reconstructed\ signal_2}$')
if not opts.hide_signal:
ax.plot( times, hTimeDom2, 'k-', linewidth=1, alpha=0.5, label='$\mathrm{signal_2}$' )
ax.set_ylim(ylim)
ax.legend(loc='lower right')
ax.set_xlabel('$\mathrm{time}$')
ax.yaxis.tick_right()
ax.yaxis.set_label_position('right')
plt.setp(ax.get_yticklabels(), visible=False)
ax.set_ylabel('$h_2(t)$')
ax.set_xlim(xmin=opts.D_over_c, xmax=opts.duration-opts.D_over_c)
### ray-plot
ax = plt.subplot(3,3,6)
truth = times <= shift
ax.plot( times[truth], SNR[truth], 'g-', linewidth=1, alpha=0.5, label='$\mathrm{freq-domain}$\n$\mathrm{computation}$' )
#truth = times[-1]-times < shift
#ax.plot( times[truth]-times[-1], SNR[truth], 'g-', linewidth=1, alpha=0.5, label='$\mathrm{freq-domain}$\n$\mathrm{computation}$' )
#ax.legend(loc='best')
ax.set_xlabel('$\\tau$')
ax.yaxis.tick_right()
ax.yaxis.set_label_position('right')
#ax.set_ylabel('$\\rho(\\tau)$')
ax.set_ylabel('$\mathrm{correlated\ Energy}\sim\\rho^2$')
#ax.set_xlim(xmin=-opts.D_over_c, xmax=opts.D_over_c)
ax.set_xlim(xmin=0, xmax=opts.D_over_c)
ax.set_ylim(ymin=1.1*np.min(SNR), ymax=1.1*np.max(SNR))
ax = ax.twiny()
thetas = [-90, -45, -30, -15, 0, 15, 30, 45, 90]
ax.set_xticks([opts.D_over_c*np.sin(theta*np.pi/180) for theta in thetas])
ax.set_xticklabels(["$%d^\circ$"%theta for theta in thetas])
ax.set_xlim(xmin=0, xmax=opts.D_over_c)
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax.set_xlabel('$\\theta$')
plt.subplots_adjust(hspace=0.1, wspace=0.1)
figname = "frame%s-%04d.png"%(opts.tag, frameNo)
if opts.verbose:
print " %s"%figname
fig.savefig( figname, dpi=opts.dpi )
plt.close(fig)
#-------------------------------------------------
for movie_type in opts.movie_type:
cmd = "ffmpeg -r %d -i frame%s-%s04d.png coherentLikelihood%s.%s"%(opts.frames_per_sec, opts.tag, "%", opts.tag, movie_type)
if opts.verbose:
print "wrapping into a movie:\n\t%s"%(cmd)
sp.Popen(cmd.split()).wait()
|
mit
| -2,642,448,682,214,172,000
| 35.103286
| 299
| 0.650975
| false
| 2.560204
| false
| false
| false
|
Gorbagzog/StageIAP
|
HorizonPhotometricNumpy.py
|
1
|
82759
|
#!/usr/bin/env python3
# -*-coding:Utf-8 -*
"""H-AGN LightCone photometric Catalog.
Load catalog and make a match with the true lightcone catalog.
"""
import numpy as np
import matplotlib.pyplot as plt
import pyfits
# from scipy.spatial import cKDTree
# from timeit import default_timer as timer
import numpy.lib.recfunctions as rfn
# import matplotlib.mlab as mlab
import matplotlib as mpl
from scipy.optimize import curve_fit
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import matplotlib.ticker as ticker
from matplotlib.gridspec import GridSpec
"""Load true galdata from the H-AGN Lightcone"""
zbins_Cone = np.array([0, 1, 2, 3, 6])
numzbin = np.size(zbins_Cone)-1
galdata = []
for i in range(np.size(zbins_Cone)-1):
hdulist = pyfits.open('../Data/HorizonAGNLaigleCatalogs/Galaxies_' +
str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1])+'.fits')
galdata.append(hdulist[1].data)
hdulist.close()
# cols = hdulist[1].columns
# cols.info()
"""It looks like the good catalogs to use are the Haloes and not the Halos"""
halodata = []
for i in range(np.size(zbins_Cone)-1):
hdulist2 = pyfits.open('../Data/HorizonAGNLaigleCatalogs/Haloes_' +
str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1])+'.fits')
halodata.append(hdulist2[1].data)
hdulist2.close()
"""Load Horizon-AGN Lightcone Photometric catalog."""
col_names = ['Id', 'Ra', 'Dec', 'zphot', 'zphot_err', 'Mass', 'Mass_err', 'mag_u', 'magerr_u',
'mag_B', 'magerr_B', 'mag_V', 'magerr_V', 'mag_r', 'magerr_r', 'mag_i', 'magerr_i',
'mag_z', 'magerr_z', 'mag_Y', 'magerr_Y', 'mag_J', 'magerr_J', 'mag_H', 'magerr_H',
'mag_K', 'magerr_K', 'SFR']
galphot = np.genfromtxt(
'../Data/HorizonAGNLightconePhotometric/Salp_0.0-3.0_dust_v15c.in_Init_Small',
names=col_names)
"""Load catalog matching halos to their central galaxies"""
# Contains the IDs (starts at 1) of the central galaxy of each halo
hal_centgal = [[] for x in range(numzbin)]
for i in range(numzbin-1):
hal_centgal[i] = np.loadtxt('../Data/HorizonAGNLaigleCatalogs/Cat_' +
str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1])+'_Hal_CentralGal_newb.txt',
dtype='i4')
# np.loadtxt('../Data/HorizonAGNLaigleCatalogs/Cat_' +
# str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1])+'_Hal_CentralGal_new.txt',
# dtype='i4'))
# New match with Yohan algorithm to find galaxies by decreasing spheres.
"""Load halos environment.
Header is #dens dfil dnod1 dnod2.
"dens" est une estimation de la densité locale (basée sur la tesselation de delaunay)
lissée à 3Mpc, "dfiil" est la distance au filament le plus proche, "dnod1" est la distance
au noeud le plus proche, et "dnod2" la distance au noeud le plus proche en suivant le
filament. Les distances sont en Mpc.
Si tu veux pour commencer, tu pourrais separer les halos en fonction de leur distance
au filament et au noeud, e.g:
Noeuds: dnod1 < 5Mpc
Filament: dfil < 2 Mpc
Walls/voids: le reste des galaxies """
haloes_env = []
for i in range(3):
haloes_env.append(
np.loadtxt('../Data/HorizonAGNLaigleCatalogs/Haloes_' +
str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1])+'_env.txt'))
"""Algorithm to find nearest value using a KDTree.
We make a match between nearest galaxies in projection on the sky.
Maybe we should also take into account the third dimension, to have
a better match. But it will give more importance to the error in redshift
in the observed catalog."""
# galdata_allz = np.concatenate((galdata[0], galdata[1], galdata[2]))
# start = timer()
# kdtree = cKDTree(np.transpose([galdata_allz['Ra'], galdata_allz['Dec']]))
# obstotrue = np.apply_along_axis(kdtree.query, 0, [galphot['Ra'], galphot['Dec']])
# obstotrue[1][:] = obstotrue[1][:].astype('int')
# # add index of true gal corresponding to each observed gal
# galphot = rfn.append_fields(galphot, ['Distance', 'True_gal_idx'], obstotrue, usemask=False)
# # add index of observed gal to each true gal
# truetoobs = np.empty(galdata_allz.shape)
# truetoobs[:] = np.nan
# for idx_obs in range(len(obstotrue[0])):
# truetoobs[obstotrue[1][idx_obs].astype('int')] = idx_obs
# galdata_allz = rfn.append_fields(galdata_allz, 'Obs_gal_idx', truetoobs, usemask=False)
# end = timer()
# print('Positional match took :' + str(end - start))
"""Use the match Catalog of Clotilde"""
galdata_allz = np.concatenate((galdata[0], galdata[1], galdata[2]))
# Load the 2 columns matching catalog, first column is the ID of the galaxy in the Photo catalog,
# the second is the ID in the original catalog, concatenated in one big catalog
# Galaxies_0-1.fits, Galaxies_1-2.fits, Galaxies_2-3.fits.
obstotrue = np.loadtxt('../Data/HorizonAGNLightconePhotometric/Match.dat')
# I prefer to work with index (starts at 0) than with ID (starts at 1), and the first column is
# useless because it is just the position in the array.
# galdata_allz[obstotrue[i]] = original galaxy corresponding to galphot[i]
obstotrue = obstotrue[:, 1] - 1
# add index of observed gal to each true gal
truetoobs = np.empty(galdata_allz.shape)
truetoobs[:] = -1
for idx_obs in range(len(obstotrue)):
truetoobs[obstotrue[idx_obs].astype('int')] = idx_obs
galdata_allz = rfn.append_fields(galdata_allz, 'Obs_gal_idx', truetoobs, usemask=False)
"""Plot MsObserved vs MsTrue"""
plt.figure()
x = np.arange(5, 13)
plt.hist2d(
np.log10(galdata_allz['Mass'][galdata_allz['Obs_gal_idx'] > 0]*10**11),
galphot['Mass'][galdata_allz[galdata_allz['Obs_gal_idx'] > 0]['Obs_gal_idx'].astype('int')],
cmin=1, bins=100, range=[[9, 12], [9, 12]], norm=mpl.colors.LogNorm(), cmap='jet'
)
plt.colorbar()
# plt.plot(x, x, lab='y=x')
plt.xlabel('Original Mass', size=12)
plt.ylabel('Photometric mass', size=12)
plt.title('H-AGN, stellar photometric mass dispersion')
"""Compute median, average and percentiles for masses."""
# For true catalog
stellarmassbins = np.linspace(9, 12, num=100)
avHMperSM = np.full([numzbin, np.size(stellarmassbins)-1], np.nan)
medHMperSM = np.full([numzbin, np.size(stellarmassbins)-1], np.nan)
stdHMperSM = np.full([numzbin, np.size(stellarmassbins)-1], np.nan)
for i in range(numzbin - 1):
for j in range(np.size(stellarmassbins)-1):
m1 = stellarmassbins[j]
m2 = stellarmassbins[j+1]
# select indices of central galaxies with a mass
# between m1 and m2 :
indices = np.where(
np.logical_and(
np.logical_and(
np.log10(galdata[i]['Mass'][hal_centgal[i]-1]*10**11) > m1,
np.log10(galdata[i]['Mass'][hal_centgal[i]-1]*10**11) <= m2
),
hal_centgal[i] > 0
)
)
if np.size(indices) > 2:
avHMperSM[i, j] = np.average(np.log10(halodata[i]['Mass'][indices] * 10**11))
medHMperSM[i, j] = np.median(np.log10(halodata[i]['Mass'][indices] * 10**11))
stdHMperSM[i, j] = np.std(np.log10(halodata[i]['Mass'][indices] * 10**11))
# For photometric catalog
stellarmassbins = np.linspace(9, 12, num=100)
avHMperSMPhot = np.full([numzbin, np.size(stellarmassbins)-1], np.nan)
medHMperSMPhot = np.full([numzbin, np.size(stellarmassbins)-1], np.nan)
stdHMperSMPhot = np.full([numzbin, np.size(stellarmassbins)-1], np.nan)
for i in range(numzbin-1):
for j in range(np.size(stellarmassbins)-1):
m1 = stellarmassbins[j]
m2 = stellarmassbins[j+1]
# select indices of central galaxies with a mass
# between m1 and m2 :
indices = np.where(
np.logical_and(
np.logical_and(
hal_centgal[i] > 0,
galdata_allz['Obs_gal_idx'][
hal_centgal[i][:] - 1 + sum(len(galdata[j]) for j in range(i))
] > 0
),
np.logical_and(
galphot['Mass'][
galdata_allz['Obs_gal_idx'][
hal_centgal[i][:] - 1 +
sum(len(galdata[j]) for j in range(i))
].astype('int')
] > m1,
galphot['Mass'][
galdata_allz['Obs_gal_idx'][
hal_centgal[i][:] - 1 +
sum(len(galdata[j]) for j in range(i))
].astype('int')
] <= m2
),
)
)
if np.size(indices) > 2:
# print(np.size(indices))
avHMperSMPhot[i, j] = np.average(np.log10(halodata[i]['Mass'][indices] * 10**11))
medHMperSMPhot[i, j] = np.median(np.log10(halodata[i]['Mass'][indices] * 10**11))
stdHMperSMPhot[i, j] = np.std(np.log10(halodata[i]['Mass'][indices] * 10**11))
# stellarmassbins = np.linspace(8.1, 12, num=100)
# first_per = np.zeros([numzbin, np.size(stellarmassbins)-1])
# last_per = np.zeros([numzbin, np.size(stellarmassbins)-1])
# for i in range(numzbin):
# for j in range(np.size(stellarmassbins)-1):
# m1 = stellarmassbins[j]
# m2 = stellarmassbins[j+1]
# # select indices of central galaxies with a mass
# # between m1 and m2 :
# indices = np.where(
# np.logical_and(
# np.logical_and(
# np.log10(galdata[i]['Mass'][hal_centgal[i]-1]*10**11) > m1,
# np.log10(galdata[i]['Mass'][hal_centgal[i]-1]*10**11) <= m2
# ),
# hal_centgal[i] > 0
# )
# )
# if indices[0].size : #check if the array is not empty
# first_per[i,j] = np.percentile(np.log10(
# halodata[i]['Mass'][gal_mainhaloes[i][centGalInCentHalo[i][indices]]-1]*10**11), 10)
# last_per[i,j] = np.percentile(np.log10(
# halodata[i]['Mass'][gal_mainhaloes[i][centGalInCentHalo[i][indices]]-1]*10**11), 90)
# else:
# first_per[i,j] = numpy.nan
# last_per[i,j] = numpy.nan
"""Compute average and median Ms for a given Mh"""
massbins = np.linspace(10, 15, num=100)
avSMperHM = np.zeros([numzbin, np.size(massbins)-1])
medSMperHM = np.zeros([numzbin, np.size(massbins)-1])
for i in range(numzbin-1):
for j in range(np.size(massbins)-1):
m1 = massbins[j]
m2 = massbins[j+1]
# select indices of galaxies contained in the haloes with a mass
# between m1 and m2 :
indices = np.where(np.logical_and(
np.log10(halodata[i]['Mass']*10**11) > m1,
np.log10(halodata[i]['Mass']*10**11) <= m2))[0]
# indices_cent = np.intersect1d(indices, halodata[i]['level'] == 1)
if len(indices) > 0:
avSMperHM[i, j] = np.average(
np.log10(galdata[i]['Mass'][hal_centgal[i][indices]-1]*10**11))
medSMperHM[i, j] = np.median(
np.log10(galdata[i]['Mass'][hal_centgal[i][indices]-1]*10**11))
else:
avSMperHM[i, j] = np.nan
medSMperHM[i, j] = np.nan
"""Fit the Behroozi 2010 relation on Mh(Ms)"""
def boo_MhMs(Ms, M1, Ms0, beta, delta, gamma):
"""Behroozi et al. 2010 Mh(Ms) relation
All masses are in logscale"""
return M1+beta*(Ms-Ms0)+10**(delta*(Ms-Ms0))/(1+10**(-gamma*(Ms-Ms0)))-0.5
boo_fit_true = np.empty([numzbin-1, 5])
boo_cov_true = np.empty([numzbin-1, 5, 5])
for i in range(numzbin-1):
print(i)
indices = np.where(
np.logical_and(
np.log10(galdata[i]['Mass'][hal_centgal[i]-1]*10**11) > 9,
np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1)
)
)
boo_fit_true[i], boo_cov_true[i] = curve_fit(
boo_MhMs,
np.log10(galdata[i]['Mass'][hal_centgal[i][indices]-1]*10**11),
np.log10(halodata[i]['Mass'][indices]*10**11),
bounds=[[10, 8, 0, 0, 0], [13, 11, 5, 5, 5]],
method='trf')
print(boo_fit_true)
boo_fit_phot = np.empty([numzbin-1, 5])
boo_cov_phot = np.empty([numzbin-1, 5, 5])
for i in range(numzbin-1):
print(i)
indices = np.where(
np.logical_and(
np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1),
np.logical_and(
galdata_allz['Obs_gal_idx'][
hal_centgal[i][:] - 1 + sum(len(galdata[j]) for j in range(i))
] > 0,
galphot['Mass'][
galdata_allz['Obs_gal_idx'][
hal_centgal[i] - 1 + sum(len(galdata[j]) for j in range(i))
].astype('int')
] > 9)
)
)
boo_fit_phot[i], boo_cov_phot[i] = curve_fit(
boo_MhMs,
galphot['Mass'][
galdata_allz['Obs_gal_idx'][
hal_centgal[i][indices] - 1 + sum(len(galdata[j]) for j in range(i))
].astype('int')
],
np.log10(halodata[i]['Mass'][indices]*10**11),
bounds=[[10, 8, 0, 0, 0], [13, 11, 5, 5, 5]],
method='trf')
print(boo_fit_phot)
"""Plot Ms(Mh) for true galaxies and level 1 halos"""
boofitsSMbins = np.linspace(9, 12, num=100)
for i in range(numzbin-1):
plt.figure()
indices = np.where(np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1))
# verification that all galaxies selected are central
# print(galdata[i]['level'][hal_centgal[i][indices]-1].min())
plt.hist2d(
np.log10(halodata[i]['Mass'][indices]*10**11),
np.log10(galdata[i]['Mass'][hal_centgal[i][indices]-1]*10**11),
bins=100, cmin=1)
plt.colorbar()
# plt.scatter((massbins[:-1]+massbins[1:])/2, avSMperHM[i][:], color='red',
# label='Average SM for a given HM')
# plt.scatter((massbins[:-1]+massbins[1:])/2, medSMperHM[i][:],
# color='green', label='Median SM for a given HM')
plt.scatter(avHMperSM[i][:], (stellarmassbins[:-1]+stellarmassbins[1:])/2,
color='black', label='Average HM for a given SM')
plt.scatter(medHMperSM[i][:], (stellarmassbins[:-1]+stellarmassbins[1:])/2,
color='pink', label='Median HM for a given SM')
# Plot Behroozi fit
# plt.plot(boo_MhMs(boofitsSMbins, *boo_fit_true[i]), boofitsSMbins,
# label=str('True Behroozi function fit'), c='r')
plt.legend()
plt.xlabel('Log($M_{h}$) [Log($M_{\odot}$)]', size=12)
plt.ylabel('Log($M_{*}$) [Log($M_{\odot}$)]', size=12)
plt.title('HorizonAGN, Central galz='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]))
# plt.savefig('../Plots/HAGN_Matching/ClotMatchBis/TrueMass_HaloMass_Boofit' +
# str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]) + '.pdf')
"""Plot Ms(Mh) on the same figure"""
fig, ax = plt.subplots(2, 2)
# fig.suptitle('Horizon AGN CONE, WARNING !! colorbars not homogeneous')
for i in range(3):
indices = np.where(np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1))
ax1 = ax[i//2, i % 2]
if i == 0:
counts, xedges, yedges, im = ax1.hist2d(
np.log10(halodata[i]['Mass'][indices]*10**11),
np.log10(galdata[i]['Mass'][hal_centgal[i][indices]-1]*10**11),
bins=100, cmin=1)
else:
_, _, _, im = ax1.hist2d(
np.log10(halodata[i]['Mass'][indices]*10**11),
np.log10(galdata[i]['Mass'][hal_centgal[i][indices]-1]*10**11),
bins=(xedges, yedges), cmin=1)
# Put the colorbar inside of the axes.
axins1 = inset_axes(
ax1,
width="5%", # width = 10% of parent_bbox width
height="35%", # height : 50%
loc=8,
bbox_to_anchor=[0.5, 0, 0.5, 1],
bbox_transform=ax1.transAxes
# bbox_transform=ax1.transAxes,
# bbox_to_anchor=(1, 1)
)
cbar = fig.colorbar(im, cax=axins1)
ax1.set_xlabel('Log($M_{h}/M_{\odot}$)', size=12)
ax1.set_ylabel('Log($M_{*}/M_{\odot}$)', size=12)
cbar.ax.tick_params(labelsize=9)
tick_locator = ticker.MaxNLocator(nbins=5)
cbar.locator = tick_locator
cbar.update_ticks()
ax1.plot(boo_MhMs(boofitsSMbins, *boo_fit_true[i]), boofitsSMbins,
label=str('Behroozi function fit'), c='r')
plt.text(0.1, 0.8, str(zbins_Cone[i])+'<z<'+str(zbins_Cone[i+1]),
size=12, transform=ax1.transAxes, bbox=dict(boxstyle='round', facecolor='white'))
fig.tight_layout()
# plt.subplots_adjust(top=0.95)
plt.show()
"""Plot Ms_observed(Mh) and level 1 halos"""
boofitsSMbins = np.linspace(9, 12, num=100)
for i in range(numzbin-1):
plt.figure()
indices = np.where(
np.logical_and(
np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1),
galdata_allz['Obs_gal_idx'][
hal_centgal[i][:] - 1 + sum(len(galdata[j]) for j in range(i))
] > 0
)
)
plt.hist2d(
np.log10(halodata[i]['Mass'][indices]*10**11),
galphot['Mass'][
galdata_allz['Obs_gal_idx'][
hal_centgal[i][indices] - 1 + sum(len(galdata[j]) for j in range(i))
].astype('int')
],
bins=100, cmin=1, range=[[10, 14], [9, 12]])
plt.colorbar()
# plt.errorbar(avHMperSMPhot[i][:], (stellarmassbins[:-1]+stellarmassbins[1:])/2,
# xerr=stdHMperSMPhot[i],
# color='red', label='Average HM for a given SM')
# plt.scatter(medHMperSMPhot[i][:], (stellarmassbins[:-1]+stellarmassbins[1:])/2,
# color='pink', label='Median HM for a given SM')
# Plot Behroozi fit
plt.plot(boo_MhMs(boofitsSMbins, *boo_fit_phot[i]), boofitsSMbins,
label=str('Behroozi function fit'), c='r')
plt.legend()
plt.xlabel('Log($M_{h}$) [Log($M_{\odot}$)]', size=12)
plt.ylabel('Log($M_{*}$) Photometric [Log($M_{\odot}$)]', size=12)
plt.title('HorizonAGN photo, z='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]))
# plt.savefig('../Plots/HAGN_Matching/ClotMatchBis/PhotoMass_HaloMass_Boofit' +
# str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]) + '.pdf')
"""Plot Ms/Mh vs Mh for true and photometric catalogs"""
plt.figure()
# cmap = ['blue', 'green', 'red']
# marker = ['v', '>', '^']
for i in range(numzbin-1):
# plt.scatter(
# medHMperSM[i],
# (stellarmassbins[:-1]+stellarmassbins[1:]) / 2 - medHMperSM[i],
# label='True catalog, z='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]),
# edgecolors=cmap[i], facecolors='none'
# )
plt.plot(boo_MhMs(boofitsSMbins, *boo_fit_true[i]),
boofitsSMbins - boo_MhMs(boofitsSMbins, *boo_fit_true[i]),
label='z='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]))
# plt.scatter(
# medHMperSMPhot[i],
# (stellarmassbins[:-1]+stellarmassbins[1:]) / 2 - medHMperSMPhot[i],
# label='Phot catalog, z='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]),
# edgecolors=cmap[i], facecolors=cmap[i]
# )
plt.legend()
plt.xlabel('Log($M_{h} / M_{\odot}$)', size=15)
plt.ylabel('Log($M_{s}/M_{h}$)', size=15)
"""Plot Ms/Mh histogram for true catalog"""
for i in range(numzbin-1):
plt.figure()
indices = np.where(np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1))
# verification that all galaxies selected are central
# print(galdata[i]['level'][hal_centgal[i][indices]-1].min())
plt.hist2d(
np.log10(halodata[i]['Mass'][indices]*10**11),
np.log10(galdata[i]['Mass'][hal_centgal[i][indices]-1]/halodata[i]['Mass'][indices]),
bins=100, cmin=1)
plt.colorbar()
plt.scatter(
medHMperSM[i],
(stellarmassbins[:-1]+stellarmassbins[1:]) / 2 - medHMperSM[i],
label='Phot catalog, z='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]),
facecolors='none', color='red'
)
# Plot Behroozi fit
plt.plot(boo_MhMs(boofitsSMbins, *boo_fit_true[i]),
boofitsSMbins - boo_MhMs(boofitsSMbins, *boo_fit_true[i]),
label=str('Behroozi function fit'), c='r')
plt.legend()
plt.xlabel('Log($M_{h}$) [Log($M_{\odot}$)]', size=12)
plt.ylabel('Log($M_{*}$) [Log($M_{\odot}$)]', size=12)
plt.title('HorizonAGN, Central galz='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]))
"""Plot Ms/Mh for photometric catalog and with median found with Ms(Mh)"""
for i in range(numzbin-1):
plt.figure()
indices = np.where(
np.logical_and(
np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1),
np.logical_and(
galdata_allz['Obs_gal_idx'][
hal_centgal[i][:] - 1 + sum(len(galdata[j]) for j in range(i))
] > 0,
galphot['Mass'][
galdata_allz['Obs_gal_idx'][
hal_centgal[i][:] - 1 + sum(len(galdata[j]) for j in range(i))
].astype('int')
] > 9
)
)
)
plt.hist2d(
np.log10(halodata[i]['Mass'][indices]*10**11),
galphot['Mass'][
galdata_allz['Obs_gal_idx'][
hal_centgal[i][indices] - 1 + sum(len(galdata[j]) for j in range(i))
].astype('int')
] - np.log10(halodata[i]['Mass'][indices]*10**11),
bins=100, cmin=1, range=[[10, 14], [-2, 1]]
)
# plt.plot(
# medHMperSMPhot[i],
# (stellarmassbins[:-1]+stellarmassbins[1:]) / 2 - medHMperSMPhot[i],
# label='Phot catalog, z='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]),
# color='red'
# )
# plt.plot(
# boo_MhMs(boofitsSMbins, *boo_fit_phot[i]),
# boofitsSMbins - boo_MhMs(boofitsSMbins, *boo_fit_phot[i]),
# label=str('phot Behroozi function fit'), c='black')
plt.legend()
plt.xlabel('Log($M_{h}$) [Log($M_{\odot}$)]', size=15)
plt.ylabel('Log($M_{s}/M_{h}$)', size=15)
plt.title('H-AGN, Central gal and level 1 halos')
""""With gas mass"""
# plt.hist2d(
# np.log10(halodata[i]['Mass'][indices]*10**11),
# np.log10(10**galphot['Mass'][
# galdata_allz['Obs_gal_idx'][
# hal_centgal[i][indices] - 1 + sum(len(galdata[j]) for j in range(i))
# ].astype('int')
# ] + gas_mass[galdata_allz['Obs_gal_idx'][
# hal_centgal[i][indices] - 1 + sum(len(galdata[j]) for j in range(i))
# ].astype('int')]) - np.log10(halodata[i]['Mass'][indices]*10**11),
# bins=100, cmin=1, range=[[10, 14], [-2, 1]]
# )
"""Fit the Yang relation on the M*/Mh relation"""
def mstar_over_mh_yang(x, A, m1, beta, gamma):
"""Yang et al. 2012 function, see Moster et al. 2010."""
return 2.0 * A * ((x / m1)**(-beta) + (x / m1)**gamma)**(-1)
# yang_fit_true = np.empty([numzbin, 4])
# yang_cov_true = np.empty([numzbin, 4, 4])
# for i in range(numzbin-1):
# yang_fit_true[i], yang_cov_true[i] = curve_fit(
# mstar_over_mh_yang,
# 10**medHMperSM[i][~np.isnan(medHMperSM[i])],
# 10**(((stellarmassbins[:-1]+stellarmassbins[1:]) / 2)[~np.isnan(medHMperSM[i])] -
# medHMperSM[i][~np.isnan(medHMperSM[i])]),
# sigma=stdHMperSM[i][~np.isnan(medHMperSM[i])],
# p0=[0.01, 10**12, 0.1, 0.1],
# bounds=[[0, 10**9, 0, 0], [0.5, 10**14, 5, 5]], method='trf')
# yang_fit_phot = np.empty([numzbin-1, 4])
# yang_cov_phot = np.empty([numzbin-1, 4, 4])
# for i in range(numzbin-1):
# yang_fit_phot[i], yang_cov_phot[i] = curve_fit(
# mstar_over_mh_yang,
# 10**medHMperSMPhot[i][~np.isnan(medHMperSMPhot[i])],
# 10**(((stellarmassbins[:-1]+stellarmassbins[1:]) / 2)[~np.isnan(medHMperSMPhot[i])] -
# medHMperSMPhot[i][~np.isnan(medHMperSMPhot[i])]),
# sigma=stdHMperSMPhot[i][~np.isnan(medHMperSMPhot[i])],
# p0=[0.01, 10**12, 0.5, 0.1],
# bounds=[[0, 10**10, 0, 0], [0.5, 10**13, 5, 5]], method='trf')
# print(yang_fit_phot)
yang_fit_true = np.empty([numzbin-1, 4])
yang_cov_true = np.empty([numzbin-1, 4, 4])
for i in range(numzbin-1):
print(i)
indices = np.where(
np.logical_and(
np.logical_and(
np.log10(galdata[i]['Mass'][hal_centgal[i]-1]*10**11) > 9,
np.log10(halodata[i]['Mass']*10**11) > 10.8),
np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1)
)
)
yang_fit_true[i], yang_cov_true[i] = curve_fit(
mstar_over_mh_yang,
halodata[i]['Mass'][indices]*10**11,
galdata[i]['Mass'][hal_centgal[i][indices]-1] / halodata[i]['Mass'][indices],
p0=[0.01, 10**12, 0.1, 0.1],
bounds=[[0, 10**9, 0, 0], [0.5, 10**14, 5, 5]], method='trf')
print(yang_fit_true)
yang_fit_phot = np.empty([numzbin-1, 4])
yang_cov_phot = np.empty([numzbin-1, 4, 4])
for i in range(numzbin-1):
yang_fit_phot[i], yang_cov_phot[i] = curve_fit(
mstar_over_mh_yang,
10**medHMperSMPhot[i][~np.isnan(medHMperSMPhot[i])],
10**(((stellarmassbins[:-1]+stellarmassbins[1:]) / 2)[~np.isnan(medHMperSMPhot[i])] -
medHMperSMPhot[i][~np.isnan(medHMperSMPhot[i])]),
sigma=stdHMperSMPhot[i][~np.isnan(medHMperSMPhot[i])],
p0=[0.01, 10**12, 0.5, 0.1],
bounds=[[0, 10**10, 0, 0], [0.5, 10**13, 5, 5]], method='trf')
print(yang_fit_phot)
"""Plot Yang fit"""
x = np.logspace(10, 14, num=1000)
for i in range(numzbin-1):
plt.figure()
indices = np.where(
np.logical_and(
np.log10(galdata[i]['Mass'][hal_centgal[i]-1]*10**11) > 9,
np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1)
)
)
plt.hist2d(
np.log10(halodata[i]['Mass'][indices]*10**11),
np.log10(galdata[i]['Mass'][hal_centgal[i][indices]-1] / halodata[i]['Mass'][indices]),
bins=100, cmin=1, range=[[10.3, 13], [-2.5, -0.5]]
)
p = plt.plot(
np.log10(x), np.log10(mstar_over_mh_yang(x, *yang_fit_true[i])),
label=str('Moster et al. fit'), c='b')
plt.plot(boo_MhMs(boofitsSMbins, *boo_fit_true[i]),
boofitsSMbins - boo_MhMs(boofitsSMbins, *boo_fit_true[i]),
label=str('Behroozi et al. fit'), c='r')
plt.xlabel('Log($M_{h} / M_{\odot}$)', size=15)
plt.ylabel('Log($M_{s}/M_{h}$)', size=15)
plt.legend()
plt.text(0.1, 0.1, str(zbins_Cone[i])+'<z<'+str(zbins_Cone[i+1]),
size=12, transform=ax1.transAxes, bbox=dict(boxstyle='round', facecolor='white'))
plt.tight_layout()
plt.savefig('../Plots/HAGN_Matching/ClotMatchBis/True_MsonMH_fits' +
str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]) + '.pdf')
x = np.logspace(10, 14, num=1000)
plt.figure()
for i in range(numzbin-1):
# p = plt.plot(np.log10(x), np.log10(mstar_over_mh_yang(x, *yang_fit_true[i])))
# plt.plot(np.log10(x), np.log10(mstar_over_mh_yang(x, *yang_fit_phot[i])),
# color=p[0].get_color())
# plt.scatter(
# medHMperSM[i],
# (stellarmassbins[:-1]+stellarmassbins[1:]) / 2 - medHMperSM[i],
# facecolors='none', edgecolors=p[0].get_color(),
# label='True catalog, z='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1])
# )
# plt.scatter(
# medHMperSM[i],
# (stellarmassbins[:-1]+stellarmassbins[1:]) / 2 - medHMperSMPhot[i],
# facecolors='none', edgecolors=p[0].get_color(),
# label='Photo catalog, z='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1])
# )
# Plot Behroozi fit
p = plt.plot(
boo_MhMs(boofitsSMbins, *boo_fit_true[i]),
boofitsSMbins - boo_MhMs(boofitsSMbins, *boo_fit_true[i]),
label='z='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]))
plt.plot(boo_MhMs(boofitsSMbins, *boo_fit_phot[i]),
boofitsSMbins - boo_MhMs(boofitsSMbins, *boo_fit_phot[i]),
linestyle ='--', color=p[0].get_color())
plt.legend()
plt.xlabel('Log($M_{h} / M_{\odot}$)', size=15)
plt.ylabel('Log($M_{s}/M_{h}$)', size=15)
plt.show()
"""Find MhPeak with the Yanf fit"""
MhaloPeak_true = np.zeros(numzbin-1)
for i in range(numzbin-1):
MhaloPeak_true[i] = np.log10(x[np.argmax(mstar_over_mh_yang(x, *yang_fit_true[i]))])
MhaloPeak_phot = np.zeros(numzbin-1)
for i in range(numzbin-1):
MhaloPeak_phot[i] = np.log10(x[np.argmax(mstar_over_mh_yang(x, *yang_fit_phot[i]))])
"""Find MhPeak with Behroozi fit"""
MhaloPeak_true_boo = np.zeros(numzbin-1)
for i in range(numzbin-1):
idx_max = np.argmax(boofitsSMbins - boo_MhMs(boofitsSMbins, *boo_fit_true[i]))
MhaloPeak_true_boo[i] = boo_MhMs(boofitsSMbins[idx_max], *boo_fit_true[i])
# MhaloPeak_phot_boo = np.zeros(numzbin-1)
# for i in range(numzbin-1):
# MhaloPeak_phot_boo[i] = np.log10(x[np.argmax(mstar_over_mh_yang(x, *yang_fit_phot[i]))])
"""Plot MhaloPeak versus z"""
# Lauthaud+17 use a different cosmology with H0=72
redshiftLeauthaud = np.array([(0.22 + 0.48) / 2, (0.48 + 0.74) / 2, (0.74 + 1) / 2])
MhaloPeakLeauthaud = np.log10(np.array([9.5 * 10**11, 1.45 * 10**12, 1.4 * 10**12]))
MhaloSigmaLeauthaud = np.log10(np.array(
[1.05 * 10**12, 1.55 * 10**12, 1.5 * 10**12])) - MhaloPeakLeauthaud
# Load Coupon+17 draft Peak values
# We use PeakPosMCMCMean and PeakPosMCMCstd
# Values are given in Log10(Mh*h^-1 Msun)
redshiftCoupon17 = np.array([0.34, 0.52, 0.70, 0.90, 1.17, 1.50,
1.77, 2.15, 2.75, 3.37, 3.96, 4.83])
MhaloPeakCoupon17 = np.zeros([np.size(redshiftCoupon17)])
MhaloSigmaCoupon17 = np.zeros([np.size(redshiftCoupon17)])
for i in range(len(redshiftCoupon17)):
MhaloPeakCoupon17[i], MhaloSigmaCoupon17[i] = np.loadtxt(
'../Data/Coupon17/peak/peak_{:1.2f}.ascii'.format(redshiftCoupon17[i]),
usecols=(2, 3))
plt.figure()
# plt.plot((zbins_Cone[1:-1]+zbins_Cone[:-2])/2, MhaloPeak_true, 'd',
# label='Original Catalog')
# plt.plot((zbins_Cone[1:-1]+zbins_Cone[:-2])/2, MhaloPeak_phot, 'd',
# label='Photometric Catalog')
# Coming From AM__COSMOSIari_BolshoiPlanc.py
plt.errorbar((redshifts[1:] + redshifts[:-1]) / 2, MhaloPeak + np.log10(67.74/70),
yerr=np.transpose(MhaloPeakSigma),
fmt='o', color='red', capsize=5, label='Cosmos AM')
plt.errorbar(
(zbins_Cone[1:-1]+zbins_Cone[:-2])/2, MhaloPeak_true_boo,
yerr=0.1, fmt='o', capsize=5, c='g',
label='Horizon-AGN Lightcone')
plt.errorbar(redshiftCoupon17, MhaloPeakCoupon17 - np.log10(0.7),
yerr=MhaloSigmaCoupon17, c='b',
fmt='o', capsize=5, label='Coupon et al. 2017 Draft')
plt.errorbar(redshiftLeauthaud, MhaloPeakLeauthaud + np.log10(72/70),
yerr=MhaloSigmaLeauthaud, c='black',
fmt='o', capsize=5, label='Leauthaud et al. 2011')
plt.ylabel('Log($M_{halo}^{peak}/ M_{\odot}$)', size=15)
plt.xlabel('Redshift', size=15)
plt.legend(loc=2)
# plt.title('Horizon-AGN, MhaloPeak')
plt.tight_layout()
"""Plot sSFR vs Mh for true catalogs"""
for i in range(numzbin-1):
plt.figure()
indices = np.where(np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1))
# verification that all galaxies selected are central
# print(galdata[i]['level'][hal_centgal[i][indices]-1].min())
plt.hist2d(
np.log10(halodata[i]['Mass'][indices]*10**11),
# np.log10(galdata[i]['SFRCorr'][hal_centgal[i][indices]-1] /
# (galdata[i]['Mass'][hal_centgal[i][indices]-1]*10**11)),
np.log10(galdata[i]['SFRCorr'][hal_centgal[i][indices]-1]),
bins=100, cmin=1, range=[[10, 14], [-2, 2]])
plt.colorbar()
plt.xlabel('Log($M_{h}$) [Log($M_{\odot}$)]', size=12)
plt.ylabel('Log(SFR) [Log($yr^{-1}$)]', size=12)
plt.title('HorizonAGN, Central galz='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]))
# plt.savefig('../Plots/HAGN_Matching/ClotMatchBis/TrueSpecificSFR_HaloMass' +
# str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]) + '.pdf')
# TODO : compute median sSFR for true and photo galaxies
"""Plot SFR vs Mh for photo catalogs"""
# TODO plot only for Ms > 10**9
for i in range(numzbin-1):
plt.figure()
indices = np.where(
np.logical_and(
np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1),
galdata_allz['Obs_gal_idx'][
hal_centgal[i][:] - 1 + sum(len(galdata[j]) for j in range(i))
] > 0
)
)
plt.hist2d(
np.log10(halodata[i]['Mass'][indices]*10**11),
np.log10(galphot['Ra'][
galdata_allz['Obs_gal_idx'][
hal_centgal[i][indices] - 1 + sum(len(galdata[j]) for j in range(i))
].astype('int')
]),
bins=100, cmin=1)
plt.colorbar()
plt.legend()
plt.xlabel('Log($M_{h}$) [Log($M_{\odot}$)]', size=12)
plt.ylabel('Log(SFR) Photometric [Log($M_{\odot}/yr$)]', size=12)
plt.title('HorizonAGN, z='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]))
# plt.savefig('../Plots/HAGN_Matching/ClotMatch/PhotoSFR_HaloMass' +
# str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]) + '.pdf')
"""PLot sSFR vs Mh for photo cat"""
for i in range(numzbin-1):
plt.figure()
indices = np.where(
np.logical_and(
np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1),
np.logical_and(
galdata_allz['Obs_gal_idx'][
hal_centgal[i][:] - 1 + sum(len(galdata[j]) for j in range(i))
] > 0,
galphot['Mass'][
galdata_allz['Obs_gal_idx'][
hal_centgal[i][:] - 1 + sum(len(galdata[j]) for j in range(i))
].astype('int')] > 9
)
)
)
plt.hist2d(
np.log10(halodata[i]['Mass'][indices]*10**11),
np.log10(galphot['SFR'][
galdata_allz['Obs_gal_idx'][
hal_centgal[i][indices] - 1 + sum(len(galdata[j]) for j in range(i))
].astype('int')
] / 10**(galphot['Mass'][
galdata_allz['Obs_gal_idx'][
hal_centgal[i][indices] - 1 + sum(len(galdata[j]) for j in range(i))
].astype('int')])
),
bins=100, cmin=1, range=[[10, 14], [-13.5, -6.5]])
plt.colorbar()
plt.legend()
plt.xlabel('Log($M_{h}$) [Log($M_{\odot}$)]', size=12)
plt.ylabel('Log(sSFR) Photometric [Log($yr^{-1}$)]', size=12)
plt.title('HorizonAGN, z='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]))
plt.savefig('../Plots/HAGN_Matching/ClotMatchBis/PhotoSpecificSFR_HaloMass' +
str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]) + '.pdf')
"""Gas Met vs Mh for photo catalog"""
# Load gas mass and gas met
gas_mass, gas_met = np.loadtxt('../Data/HorizonAGNLightconePhotometric/GasInfo.dat', unpack=True)
# Add a column with gas mass and metalicity in galphot catalog
galphot = rfn.append_fields(galphot, 'Gas_mass', gas_mass, usemask=False)
galphot = rfn.append_fields(galphot, 'Gas_met', gas_met, usemask=False)
def boost(z):
"""Boost the metalicity of gas and stars because of the low resolution of H-AGN."""
return 4.08430 - 0.213574 * z - 0.111197 * z**2
# Compute boosted Metalicity for photometric catalog
gas_met_boost = np.empty(gas_met.shape)
for idx_phot in range(len(gas_met_boost)):
gas_met_boost[idx_phot] = gas_met[idx_phot] * boost(
galdata_allz['z'][obstotrue[idx_phot].astype('int')])
# Add a column on gal_phot
galphot = rfn.append_fields(galphot, 'Gas_met_boost', gas_met_boost, usemask=False)
plt.close('all')
"""Compute Median Metalicity per halo mass and 68% interval."""
massbins = np.linspace(10, 15, num=100)
medMetperHMPhot = np.zeros([numzbin, np.size(massbins)-1])
avMetperHMPhot = np.zeros([numzbin, np.size(massbins)-1])
stdMetperHMPhot = np.zeros([numzbin, np.size(massbins)-1])
# supMetperHM = np.zeros([numzbin, np.size(massbins)-1])
# infMetperHM = np.zeros([numzbin, np.size(massbins)-1])
for i in range(numzbin-1):
indices_selec = np.where(
np.logical_and(
np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1),
np.logical_and(
np.logical_and(
galdata_allz['Obs_gal_idx'][
hal_centgal[i][:] - 1 + sum(len(galdata[j]) for j in range(i))
] > 0,
galphot['Gas_met_boost'][
galdata_allz['Obs_gal_idx'][
hal_centgal[i][:] - 1 + sum(len(galdata[j]) for j in range(i))
].astype('int')
]
),
galphot['Mass'][
galdata_allz['Obs_gal_idx'][
hal_centgal[i][:] - 1 + sum(len(galdata[j]) for j in range(i))
].astype('int')
] > 9
)
)
)
gal_gasmet = galphot['Gas_met_boost'][
galdata_allz['Obs_gal_idx'][
hal_centgal[i][:] - 1 + sum(len(galdata[j]) for j in range(i))
].astype('int')]
for j in range(np.size(massbins)-1):
m1 = massbins[j]
m2 = massbins[j+1]
indices = np.where(np.logical_and(
np.log10(halodata[i]['Mass']*10**11) > m1,
np.log10(halodata[i]['Mass']*10**11) <= m2))[0]
indices = np.intersect1d(indices_selec, indices)
if len(indices) > 4:
avMetperHMPhot[i, j] = np.average(gal_gasmet[indices])
medMetperHMPhot[i, j] = np.median(gal_gasmet[indices])
stdMetperHMPhot[i, j] = np.std(gal_gasmet[indices])
else:
avMetperHMPhot[i, j] = np.nan
medMetperHMPhot[i, j] = np.nan
stdMetperHMPhot[i, j] = np.nan
"""Plot Gas metalicity vs Mh for photo galaxies"""
# TODO: problem with certain galaxies having a gas metalicity of 0
for i in range(numzbin-1):
plt.figure()
indices = np.where(
np.logical_and(
np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1),
np.logical_and(
np.logical_and(
galdata_allz['Obs_gal_idx'][
hal_centgal[i][:] - 1 + sum(len(galdata[j]) for j in range(i))
] > 0,
galphot['Gas_met_boost'][
galdata_allz['Obs_gal_idx'][
hal_centgal[i][:] - 1 + sum(len(galdata[j]) for j in range(i))
].astype('int')
]
),
galphot['Mass'][
galdata_allz['Obs_gal_idx'][
hal_centgal[i][:] - 1 + sum(len(galdata[j]) for j in range(i))
].astype('int')
] > 9
)
)
)
plt.hist2d(
#np.log10(halodata[i]['Mass'][indices]*10**11),
np.log10(haloes_env[i][indices, 0][0]),
galphot['Gas_met_boost'][
galdata_allz['Obs_gal_idx'][
hal_centgal[i][indices] - 1 + sum(len(galdata[j]) for j in range(i))
].astype('int')
],
bins=100, cmin=1)
plt.colorbar()
# plt.plot((massbins[:-1]+massbins[1:])/2, avMetperHMPhot[i],
# color='red', label='Average Metalicity for a given HM, $\pm 1\sigma$')
# plt.plot((massbins[:-1]+massbins[1:])/2, avMetperHMPhot[i] + stdMetperHMPhot[i],
# color='red', linestyle='--')
# plt.plot((massbins[:-1]+massbins[1:])/2, avMetperHMPhot[i] - stdMetperHMPhot[i],
# color='red', linestyle='--')
# plt.errorbar((massbins[:-1]+massbins[1:])/2, avMetperHMPhot[i][:],
# color='red', yerr=stdMetperHMPhot[i],
# label='Average Metalicity for a given HM')
# plt.scatter((massbins[:-1]+massbins[1:])/2, medMetperHMPhot[i][:],
# color='green', label='Median Metalicity for a given HM')
plt.legend()
plt.xlabel('Log($M_{h}$) [Log($M_{\odot}$)]', size=12)
plt.ylabel('Gas Metalicity', size=12)
plt.title('Photometric HorizonAGN, z='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]))
# plt.savefig('../Plots/HAGN_Matching/ClotMatchBis/GasMet/gasmet_' +
# str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]) + '.pdf')
"""Evolution of photometric Gas metalicity with redshift"""
plt.figure()
for i in range(numzbin-1):
plt.plot((massbins[:-1]+massbins[1:])/2, avMetperHMPhot[i],
label='z='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]))
plt.fill_between(
(massbins[:-1]+massbins[1:])/2, avMetperHMPhot[i] + stdMetperHMPhot[i],
avMetperHMPhot[i] - stdMetperHMPhot[i], alpha=0.3,
linestyle='--')
plt.legend()
plt.xlabel('Log($M_{h}$) [Log($M_{\odot}$)]', size=12)
plt.ylabel('Gas Metalicity', size=12)
plt.title('Photometric HorizonAGN Gas metalicity')
plt.tight_layout()
"""Boost stellar metalicity in True catalog"""
stellar_met_boost = np.empty(galdata_allz['met'].shape)
for idx_true in range(len(stellar_met_boost)):
stellar_met_boost[idx_true] = galdata_allz['met'][idx_true] * boost(
galdata_allz['z'][idx_true])
galdata_allz = rfn.append_fields(galdata_allz, 'Stellar_met_boost',
stellar_met_boost, usemask=False)
"""Compute average of stellar metalicity and standard deviation"""
massbins = np.linspace(10, 15, num=100)
medMetperHMtrue = np.zeros([numzbin, np.size(massbins)-1])
avMetperHMtrue = np.zeros([numzbin, np.size(massbins)-1])
stdMetperHMtrue = np.zeros([numzbin, np.size(massbins)-1])
# supMetperHM = np.zeros([numzbin, np.size(massbins)-1])
# infMetperHM = np.zeros([numzbin, np.size(massbins)-1])
for i in range(numzbin-1):
indices_selec = np.where(np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1))
gal_stemet = galdata_allz['Stellar_met_boost'][
hal_centgal[i][:] - 1 + sum(len(galdata[j]) for j in range(i))]
for j in range(np.size(massbins)-1):
m1 = massbins[j]
m2 = massbins[j+1]
indices = np.where(np.logical_and(
np.log10(halodata[i]['Mass']*10**11) > m1,
np.log10(halodata[i]['Mass']*10**11) <= m2))[0]
indices = np.intersect1d(indices_selec, indices)
if len(indices) > 0:
avMetperHMtrue[i, j] = np.average(gal_stemet[indices])
medMetperHMtrue[i, j] = np.median(gal_stemet[indices])
stdMetperHMtrue[i, j] = np.std(gal_stemet[indices])
else:
avMetperHMtrue[i, j] = np.nan
medMetperHMtrue[i, j] = np.nan
stdMetperHMtrue[i, j] = np.nan
"""Plot Stellar Met vs Mh for photo catalogs"""
for i in range(numzbin-1):
plt.figure()
indices = np.where(
np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1),
)
plt.hist2d(
np.log10(halodata[i]['Mass'][indices]*10**11),
galdata_allz['Stellar_met_boost'][
hal_centgal[i][indices] - 1 + sum(len(galdata[j]) for j in range(i))],
bins=100, cmin=1
)
plt.colorbar()
plt.plot((massbins[:-1]+massbins[1:])/2, avMetperHMtrue[i],
color='red', label='Average Metalicity for a given HM, $\pm 1\sigma$')
plt.plot((massbins[:-1]+massbins[1:])/2, avMetperHMtrue[i] + stdMetperHMtrue[i],
color='red', linestyle='--')
plt.plot((massbins[:-1]+massbins[1:])/2, avMetperHMtrue[i] - stdMetperHMtrue[i],
color='red', linestyle='--')
plt.legend()
plt.xlabel('Log($M_{h}$) [Log($M_{\odot}$)]', size=12)
plt.ylabel('Stellar Metalicity', size=12)
plt.title('Original HorizonAGN, z='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]))
# plt.savefig('../Plots/HAGN_Matching/ClotMatch/StellarMet/stellarmet_' +
# str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]) + '.pdf')
"""Evolution of stellar metalicity with redshift"""
plt.figure()
for i in range(numzbin-1):
plt.plot((massbins[:-1]+massbins[1:])/2, avMetperHMtrue[i],
label='z='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]))
plt.fill_between(
(massbins[:-1]+massbins[1:])/2, avMetperHMtrue[i] + stdMetperHMtrue[i],
avMetperHMtrue[i] - stdMetperHMtrue[i], alpha=0.3,
linestyle='--')
plt.legend()
plt.xlabel('Log($M_{h}$) [Log($M_{\odot}$)]', size=12)
plt.ylabel('Stellar Metalicity', size=12)
plt.title('Original HorizonAGN Stellar metalicity')
plt.tight_layout()
"""Compare Photometric Gas Metalicity and Original Stellar Metalicity"""
for i in range(numzbin-1):
plt.figure()
plt.plot((massbins[:-1]+massbins[1:])/2, avMetperHMPhot[i],
color='green', label='Photometric Gas Metalicity $\pm 1\sigma$')
plt.fill_between(
(massbins[:-1]+massbins[1:])/2, avMetperHMPhot[i] + stdMetperHMPhot[i],
avMetperHMPhot[i] - stdMetperHMPhot[i], alpha=0.3,
color='green', linestyle='--')
plt.plot((massbins[:-1]+massbins[1:])/2, avMetperHMtrue[i],
color='red', label='True Stellar Metalicity $\pm 1\sigma$')
plt.fill_between(
(massbins[:-1]+massbins[1:])/2, avMetperHMtrue[i] + stdMetperHMtrue[i],
avMetperHMtrue[i] - stdMetperHMtrue[i], alpha=0.3,
color='red', linestyle='--')
plt.legend()
plt.xlabel('Log($M_{h}$) [Log($M_{\odot}$)]', size=12)
plt.ylabel('Metalicity', size=12)
plt.title('HorizonAGN, z='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]))
# plt.savefig('../Plots/HAGN_Matching/ClotMatch/Gas+StellarMet/gas+stellarmet_' +
# str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]) + '.pdf')
plt.close('all')
"""Compute average stellar met for a given halo local density"""
densbins = np.linspace(-2.5, 1, num=100)
medMetperHDtrue = np.zeros([numzbin, np.size(densbins)-1])
avMetperHDtrue = np.zeros([numzbin, np.size(densbins)-1])
stdMetperHDtrue = np.zeros([numzbin, np.size(densbins)-1])
# supMetperHM = np.zeros([numzbin, np.size(massbins)-1])
# infMetperHM = np.zeros([numzbin, np.size(massbins)-1])
for i in range(numzbin-1):
indices_selec = np.where(np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1))
gal_stemet = galdata_allz['Stellar_met_boost'][
hal_centgal[i][:] - 1 + sum(len(galdata[j]) for j in range(i))]
for j in range(np.size(densbins)-1):
d1 = densbins[j]
d2 = densbins[j+1]
indices = np.where(np.logical_and(
np.log10(haloes_env[i][:, 0]) > d1,
np.log10(haloes_env[i][:, 0]) <= d2))[0]
indices = np.intersect1d(indices_selec, indices)
if len(indices) > 0:
avMetperHDtrue[i, j] = np.average(gal_stemet[indices])
medMetperHDtrue[i, j] = np.median(gal_stemet[indices])
stdMetperHDtrue[i, j] = np.std(gal_stemet[indices])
else:
avMetperHDtrue[i, j] = np.nan
medMetperHDtrue[i, j] = np.nan
stdMetperHDtrue[i, j] = np.nan
"""Evolution of stellar metalicity with environment density"""
for i in range(numzbin-1):
plt.figure()
indices = np.where(
np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1),
)
plt.hist2d(
np.log10(haloes_env[i][indices, 0][0]),
galdata_allz['Stellar_met_boost'][
hal_centgal[i][indices] - 1 + sum(len(galdata[j]) for j in range(i))],
bins=100, cmin=1
)
plt.colorbar()
plt.plot((densbins[:-1]+densbins[1:])/2, avMetperHDtrue[i],
color='red', label='Average Original Stellar Metalicity $\pm 1\sigma$')
plt.fill_between(
(densbins[:-1]+densbins[1:])/2, avMetperHDtrue[i] + stdMetperHDtrue[i],
avMetperHDtrue[i] - stdMetperHDtrue[i], alpha=0.3,
color='red', linestyle='--')
plt.legend()
plt.xlabel('Halo local density smoothed at 3Mpc (log)', size=12)
plt.ylabel('Stellar Metalicity', size=12)
plt.title('Original HorizonAGN, z='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]))
plt.tight_layout()
# plt.savefig('../Plots/HAGN_Matching/ClotMatch/StellarMet/Stellarmet_Density_' +
# str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]) + '.pdf')
"""Density of haloes versus halo mass"""
for i in range(numzbin-1):
plt.figure()
# Comment this if you want to plot all haloes and not only central haloes with central galaxies
indices = np.where(
np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1),
)
plt.hist2d(
np.log10(halodata[i]['Mass'][indices]*10**11),
np.log10(haloes_env[i][indices, 0][0]),
bins=100, cmin=1
)
plt.colorbar()
plt.legend()
plt.xlabel('Halo Mass', size=12)
plt.ylabel('Halo local density', size=12)
plt.title('Original HorizonAGN, z='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]))
plt.tight_layout()
"""Original Ms/Mh versus density"""
# compute average and std deviation
densbins = np.linspace(-2.5, 1, num=100)
medMSMHperHDtrue = np.zeros([numzbin, np.size(densbins)-1])
avMSMHperHDtrue = np.zeros([numzbin, np.size(densbins)-1])
stdMSMHperHDtrue = np.zeros([numzbin, np.size(densbins)-1])
# supMetperHM = np.zeros([numzbin, np.size(massbins)-1])
# infMetperHM = np.zeros([numzbin, np.size(massbins)-1])
for i in range(numzbin-1):
indices_selec = np.where(np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1))
for j in range(np.size(densbins)-1):
d1 = densbins[j]
d2 = densbins[j+1]
indices = np.where(np.logical_and(
np.log10(haloes_env[i][:, 0]) > d1,
np.log10(haloes_env[i][:, 0]) <= d2))[0]
indices = np.intersect1d(indices_selec, indices)
if len(indices) > 0:
avMSMHperHDtrue[i, j] = np.average(
np.log10(galdata[i]['Mass'][hal_centgal[i][indices]-1] /
halodata[i]['Mass'][indices]))
medMSMHperHDtrue[i, j] = np.median(
np.log10(galdata[i]['Mass'][hal_centgal[i][indices]-1] /
halodata[i]['Mass'][indices]))
stdMSMHperHDtrue[i, j] = np.std(
np.log10(galdata[i]['Mass'][hal_centgal[i][indices]-1] /
halodata[i]['Mass'][indices]))
else:
avMSMHperHDtrue[i, j] = np.nan
medMSMHperHDtrue[i, j] = np.nan
stdMSMHperHDtrue[i, j] = np.nan
"""Plot Original Ms/Mh versus density"""
for i in range(numzbin-1):
plt.figure()
indices = np.where(np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1))
# indices = np.where(hal_centgal[i] > 0)
plt.hist2d(
np.log10(haloes_env[i][indices, 0][0]),
np.log10(galdata[i]['Mass'][hal_centgal[i][indices]-1] /
halodata[i]['Mass'][indices]),
bins=100, cmin=1)
plt.colorbar()
plt.plot((densbins[:-1]+densbins[1:])/2, avMSMHperHDtrue[i],
color='red', label='Average $\pm 1\sigma$')
plt.fill_between(
(densbins[:-1]+densbins[1:])/2, avMSMHperHDtrue[i] + stdMSMHperHDtrue[i],
avMSMHperHDtrue[i] - stdMSMHperHDtrue[i], alpha=0.3,
color='red', linestyle='--')
plt.legend()
plt.xlabel('Log(Halo density)', size=12)
plt.ylabel('Log($M_{*}/M_{h}$)', size=12)
plt.title('Original HorizonAGN, Central gal, z='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]))
# plt.savefig('../Plots/HAGN_Matching/ClotMatch/Density/dens_msmh' +
# str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]) + '.pdf')
"""Plot Hexbins of everything for original lightcone catalog"""
# Trace a line for node distance vs halo mass
# x = np.linspace(10, 14)
# y = 0.375*x - 4.75
for i in range(numzbin-1):
plt.figure()
indices = np.where(np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1))
# indices = np.where(np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] > 1))
# indices = np.where(hal_centgal[i] > 0)
plt.hexbin(
# np.log10(halodata[i]['mass'][indices]*10**11),
# np.log10(galdata[i]['mass'][hal_centgal[i][indices]-1]*10**11),
np.log10(halodata[i]['Mass'][indices]*10**11),
# np.log10(halodata[i]['mvir'][indices]*10**11),
np.log10(galdata[i]['Mass'][hal_centgal[i][indices]-1]*10**11),
# np.log10(haloes_env[i][indices, 1][0]),
C=np.log10(galdata[i]['SFRcorr'][hal_centgal[i][indices]-1] /
(galdata[i]['Mass'][hal_centgal[i][indices]-1]*10**11)),
# C=np.log10(galdata[i]['spin'][hal_centgal[i][indices]-1]),
# C=np.log10(galdata[i]['Mass'][hal_centgal[i][indices]-1]/halodata[i]['Mass'][indices]),
# C=np.log10(haloes_env[i][indices, 1][0]),
# C=np.log10(galdata[i]['Mass'][hal_centgal[i][indices]-1]),
gridsize=60, mincnt=50, cmap='jet', extent=[10, 14, 8, 12]
)
cb = plt.colorbar()
cb.set_label('Log(sSFR)', size=12)
plt.xlabel('Log(Halo Mass)', size=12)
plt.ylabel('Log(Stellar Mass)', size=12)
plt.title('Original HorizonAGN, Central haloes, z=' +
str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]))
# plt.savefig('../Plots/HorizonAGN/Hexbins/NodesFilaments/HM_Fil_MsMh_' +
# str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]) + '.pdf')
"""Plot Hexbins on one fig"""
boofitsSMbins = np.linspace(8, 12, num=100)
fig = plt.figure(figsize=(12, 4))
gs = GridSpec(1, 3, width_ratios=[1, 1, 1])
for i in range(3):
ax1 = plt.subplot(gs[i])
indices = np.where(np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1))
# indices = np.where(np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] > 1))
# indices = np.where(hal_centgal[i] > 0)
im = ax1.hexbin(
halodata[i]['z'][indices],
np.log10(halodata[i]['mass'][indices]*10**11),
# np.log10(galdata[i]['mass'][hal_centgal[i][indices]-1]*10**11),
# np.log10(halodata[i]['Mass'][indices]*10**11),
# np.log10(halodata[i]['mvir'][indices]*10**11),
# np.log10(galdata[i]['Mass'][hal_centgal[i][indices]-1]*10**11),
# np.log10(haloes_env[i][indices, 0][0]),
C=np.log10(galdata[i]['SFRcorr'][hal_centgal[i][indices]-1] /
(galdata[i]['Mass'][hal_centgal[i][indices]-1]*10**11)),
# C=np.log10(galdata[i]['SFRcorr'][hal_centgal[i][indices]-1]),
# C=np.log10(galdata[i]['spin'][hal_centgal[i][indices]-1]),
# C=np.log10(galdata[i]['Mass'][hal_centgal[i][indices]-1]/halodata[i]['Mass'][indices]),
# C=np.log10(haloes_env[i][indices, 1][0]),
# C=np.log10(galdata[i]['Mass'][hal_centgal[i][indices]-1]),
# gridsize=60, mincnt=50, cmap='jet', extent=[10, 14, 8, 12]
gridsize=60, mincnt=50, cmap='jet', # vmin=-10.3, vmax=-8.7,
extent=[zbins_Cone[i], zbins_Cone[i+1], 10, 12.5]
)
# ax1.plot(boo_MhMs(boofitsSMbins, *boo_fit_true[i]),
# boofitsSMbins,
# label=str('Behroozi function fit'), c='black')
# ax1.axvline(x=MhaloPeak_true_boo[i], linestyle='--')
axins1 = inset_axes(
ax1,
width="10%", # width = 10% of parent_bbox width
height="35%", # height : 50%
loc=8,
bbox_to_anchor=[0.5, 0, 0.5, 1],
bbox_transform=ax1.transAxes
)
cbar = fig.colorbar(im, cax=axins1)
# cbar.set_label('Log(sSFR)', size=10)
# ax1.set_xlabel('Log($M_{h}/M_{\odot}$)', size=12)
# ax1.set_ylabel('Log($M_{*}/M_{\odot}$)', size=12)
# cbar.set_label('Log(Ms)', size=10)
cbar.set_label('Log(SFR)', size=10)
# ax1.set_xlabel('Log($M_{h}/M_{\odot}$)', size=12)
ax1.set_xlabel('Redshift', size=12)
# ax1.set_ylabel('Log($d_{node}/Mpc$)', size=12)
# ax1.set_ylabel('Log($n(halo)/Mpc^{-3}$)', size=12)
ax1.set_ylabel('Log($M_{h}/M_{\odot}$)', size=12)
cbar.ax.tick_params(labelsize=9)
tick_locator = ticker.MaxNLocator(nbins=5)
cbar.locator = tick_locator
cbar.update_ticks()
plt.text(0.7, 0.9, str(zbins_Cone[i])+'<z<'+str(zbins_Cone[i+1]),
size=12, transform=ax1.transAxes, bbox=dict(boxstyle='round', facecolor='white'))
fig.tight_layout()
"""Plot sSFR hexbins for photo catalog"""
fig = plt.figure(figsize=(12, 4))
gs = GridSpec(1, 3, width_ratios=[1, 1, 1])
for i in range(3):
ax1 = plt.subplot(gs[i])
indices = np.where(
np.logical_and(
np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1),
np.logical_and(
galdata_allz['Obs_gal_idx'][
hal_centgal[i][:] - 1 + sum(len(galdata[j]) for j in range(i))
] > 0,
galphot['Mass'][
galdata_allz['Obs_gal_idx'][
hal_centgal[i][:] - 1 + sum(len(galdata[j]) for j in range(i))
].astype('int')] > 9
)
)
)
# indices = np.where(np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] > 1))
# indices = np.where(hal_centgal[i] > 0)
im = ax1.hexbin(
np.log10(halodata[i]['Mass'][indices]*10**11),
galphot['Mass'][
galdata_allz['Obs_gal_idx'][
hal_centgal[i][indices] - 1 + sum(len(galdata[j]) for j in range(i))
].astype('int')
],
C=np.log10(galphot['SFR'][
galdata_allz['Obs_gal_idx'][
hal_centgal[i][indices] - 1 + sum(len(galdata[j]) for j in range(i))
].astype('int')
] / 10**(galphot['Mass'][
galdata_allz['Obs_gal_idx'][
hal_centgal[i][indices] - 1 + sum(len(galdata[j]) for j in range(i))
].astype('int')])
),
gridsize=60, mincnt=50, cmap='jet', extent=[10, 14, 8, 12],
)
axins1 = inset_axes(
ax1,
width="10%", # width = 10% of parent_bbox width
height="35%", # height : 50%
loc=8,
bbox_to_anchor=[0.5, 0, 0.5, 1],
bbox_transform=ax1.transAxes
)
cbar = fig.colorbar(im, cax=axins1)
# cbar.set_label('Log(sSFR)', size=10)
ax1.set_xlabel('Log($M_{h}/M_{\odot}$)', size=12)
ax1.set_ylabel('Log($M_{*}/M_{\odot}$)', size=12)
cbar.ax.tick_params(labelsize=9)
tick_locator = ticker.MaxNLocator(nbins=5)
cbar.locator = tick_locator
cbar.update_ticks()
plt.text(0.1, 0.9, str(zbins_Cone[i])+'<z<'+str(zbins_Cone[i+1]),
size=12, transform=ax1.transAxes, bbox=dict(boxstyle='round', facecolor='white'))
fig.tight_layout()
"""Plot sSFR versus Halo mass"""
for i in range(numzbin-1):
plt.figure()
indices = np.where(
np.logical_and(
np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1),
np.log10(halodata[i]['Mass']*10**11) > 0
)
)
plt.hist2d(
np.log10(galdata[i]['SFRcorr'][hal_centgal[i][indices]-1] /
(galdata[i]['Mass'][hal_centgal[i][indices]-1]*10**11)),
np.log10(galdata[i]['Mass'][hal_centgal[i][indices]-1] / halodata[i]['Mass'][indices]),
range=[[-12, -8], [-4, 0]], bins=100, cmin=1
)
plt.xlabel('sSFR', size=20)
plt.ylabel('HaloMass', size=20)
"""Plot sSFR vs SM/HM"""
for i in range(numzbin):
plt.figure()
indices = np.where(np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1))
plt.hist2d(
np.log10(galdata[i]['SFRcorr'][hal_centgal[i][indices]-1]),
galdata[i]['Mass'][hal_centgal[i][indices]-1]/halodata[i]['Mass'][indices],
range=[[-2, 2], [-4, 0]], bins=100, cmin=20
)
plt.colorbar()
plt.xlabel('Log(SFR)', size=20)
plt.ylabel('Log(SM/HM)', size=20)
plt.title('Original HorizonAGN, Central gal, z='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]))
plt.tight_layout()
"""Select galaxies with distance to node < 10**-0.5"""
d = 10**-0.5
for i in range(numzbin-1):
plt.figure()
# plot histogram for halos with distance to node > 10**-0.5 Mpc
indices = np.where(
np.logical_and(
np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1),
haloes_env[i][:, 2] > d
)
)
plt.hist2d(
np.log10(halodata[i]['Mass'][indices]*10**11),
np.log10(galdata[i]['Mass'][hal_centgal[i][indices]-1]*10**11),
bins=100, cmin=1)
plt.colorbar()
# add a scatter for haloes > 10**-0.5 Mpc
indices = np.where(
np.logical_and(
np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1),
haloes_env[i][:, 2] < d
)
)
print('N haloes close to nodes : ' + str(len(indices[0])))
plt.scatter(
np.log10(halodata[i]['Mass'][indices]*10**11),
np.log10(galdata[i]['Mass'][hal_centgal[i][indices]-1]*10**11),
c='red', label=('Haloes with d(Node)<10**-0.5 Mpc'))
plt.legend()
plt.xlabel('Log(Halo Mass)', size=12)
plt.ylabel('Log(Stellar Mass)', size=12)
plt.title('Original HorizonAGN, Central gal, z='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]))
# plt.savefig('../Plots/HorizonAGN/Hexbins/NodesFilaments/Ms_Mh_distanceSeparation' +
# str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]) + '.pdf')
"""Plot Hexbins for the photometric catalog"""
# selection of relevant galaxies (central with level 1 halo and matched)
indices_allz = []
galphotselec = []
for i in range(numzbin-1):
indices_allz.append(np.where(
np.logical_and(
np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1),
galdata_allz['Obs_gal_idx'][
hal_centgal[i][:] - 1 + sum(len(galdata[j]) for j in range(i))] > 0
)
))
galphotselec.append(galphot[
galdata_allz['Obs_gal_idx'][
hal_centgal[i][:] - 1 + sum(len(galdata[j]) for j in range(i))
].astype('int')
])
for i in range(numzbin-1):
plt.figure()
indices = np.intersect1d(indices_allz[i], np.where(galphotselec[i]['Mass'] > 9))
plt.hexbin(
galphotselec[i]['Mass'][indices],
# galphotselec[i]['mag_u'][indices],
galphotselec[i]['mag_J'][indices],
C=galphotselec[i]['Mass'][indices] - np.log10(halodata[i]['Mass'][indices]*10**11),
# np.log10(haloes_env[i][indices, 2][0]),
# galphotselec[i]['Mass'][indices],
# C=np.log10(galphotselec[i]['SFR'][indices]/(galphotselec[i]['Mass'][indices]*10**11)),
# C=np.log10(galphotselec[i]['SFR'][indices]),
# C=np.log10(haloes_env[i][indices, 2][0]),
# galphotselec[i]['mag_K'][indices],
# C=galphotselec[i]['mag_J'][indices]-galphotselec[i]['mag_u'][indices],
gridsize=60, mincnt=20, cmap='jet', extent=[9, 12, 20, 30]
)
cb = plt.colorbar()
cb.set_label('Log(Ms/Mh)', size=12)
plt.xlabel('Stellar mass', size=12)
plt.ylabel('Mag J', size=12)
plt.title('Photometric HorizonAGN, Central gal, z=' +
str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]))
# plt.savefig('../Plots/HAGN_Matching/ClotMatch/Hexbins/Colors/J_U_MsMH_' +
# str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]) + '.pdf')
"""Plot gas mass vs Halo mass"""
for i in range(numzbin-1):
plt.figure()
indices = indices_allz[i]
plt.hist2d(
np.log10(halodata[i]['Mass'][indices]*10**11),
np.log10(galphotselec[i]['Gas_mass'][indices] / (halodata[i]['Mass'][indices]*10**11)),
bins=50, cmin=20, range=[[10, 12], [-1.5, -0.5]]
)
plt.xlabel('Log(Halo mass)', size=12)
plt.ylabel('Log(Gas mass)', size=12)
plt.title('Photometric HorizonAGN, Central gal, z=' +
str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]) + '.pdf')
""" Compute average gas mass per halo mass"""
def averageperHM(data, data_name, indices_selec, numzbin, massbins):
"""Retun average, median and standard eviation of the data per halo mass.
Routine to compute useful info on the data.
Warning : it is full of particular cases, as for instance for gas mass I take only
positiove masses, and I compute them on a logscale.
"""
medperHM = np.zeros([numzbin, np.size(massbins)-1])
avperHM = np.zeros([numzbin, np.size(massbins)-1])
stdperHM = np.zeros([numzbin, np.size(massbins)-1])
for i in range(numzbin):
for j in range(np.size(massbins)-1):
m1 = massbins[j]
m2 = massbins[j+1]
indices = np.where(np.logical_and(
np.logical_and(
np.log10(halodata[i]['Mass']*10**11) > m1,
np.log10(halodata[i]['Mass']*10**11) <= m2),
data[i][data_name] > 0
))[0]
indices = np.intersect1d(indices_selec[i], indices)
if len(indices) > 0:
avperHM[i, j] = np.average(np.log10(data[i][data_name][indices]))
medperHM[i, j] = np.median(np.log10(data[i][data_name][indices]))
stdperHM[i, j] = np.std(np.log10(data[i][data_name][indices]))
else:
avperHM[i, j] = np.nan
medperHM[i, j] = np.nan
stdperHM[i, j] = np.nan
return avperHM, medperHM, stdperHM
massbins = np.linspace(10, 13, num=20)
avGMperHM, medGMperHM, stdGMperHM = averageperHM(galphotselec, 'Gas_mass',
indices_allz, 3, massbins)
"""Plot Gas_mass versus Halo_mass"""
for i in range(numzbin-1):
plt.figure()
indices = indices_allz[i]
plt.hist2d(
np.log10(halodata[i]['Mass'][indices]*10**11),
np.log10(galphotselec[i]['Gas_mass'][indices]) / np.log10(
halodata[i]['Mass'][indices]*10**11),
bins=100, cmin=1, range=[[10, 13], [0.6, 1.1]]
)
plt.colorbar()
# plt.errorbar(
# (massbins[:-1]+massbins[1:])/2, avGMperHM[i],
# yerr=stdGMperHM[i], color='red'
# )
plt.xlabel('Log(Halo virial mass)', size=12)
plt.ylabel('Log(Gas virial mass)/Log(Halo Mass)', size=12)
plt.title('Photometric HorizonAGN, Central gal, z=' +
str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]) + '.pdf')
# plt.savefig('../Plots/HAGN_Matching/ClotMatch/Hexbins/GasMass/logGMonlogHVM_' +
# str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]) + '.pdf')
for i in range(numzbin-1):
plt.figure()
indices = np.intersect1d(indices_allz[i], np.where(galphotselec[i]['Mass'] > 0))
plt.hexbin(
np.log10(halodata[i]['mvir'][indices]*10**11),
np.log10(galphotselec[i]['Gas_mass'][indices]) / np.log10(
halodata[i]['mvir'][indices]*10**11),
# C=galphotselec[i]['Mass'][indices] - np.log10(
# halodata[i]['Mass'][indices]*10**11) ,
gridsize=60, mincnt=10, cmap='jet', extent=[10, 13, 0.6, 1.1]
)
cb = plt.colorbar()
cb.set_label('Log(Ms/Mh)', size=12)
plt.xlabel('Log(Halo mass)', size=12)
plt.ylabel('Log(Gas mass)/Log(Halo Mass)', size=12)
plt.title('Photometric HorizonAGN, Central gal, z=' +
str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]) + '.pdf')
# plt.savefig('../Plots/HAGN_Matching/ClotMatch/Hexbins/GasMass/logGMonlogHM_' +
# str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]) + '.pdf')
cut = 0.85
for i in range(numzbin-1):
plt.figure()
# plot histogram for halos with distance to node > 10**-0.5 Mpc
indices = indices_allz[i]
indices = np.intersect1d(indices, np.where(galphotselec[i]['Mass'] > 9))
plt.hist2d(
np.log10(halodata[i]['Mass'][indices]*10**11),
galphotselec[i]['Mass'][indices],
bins=100, cmin=1)
plt.colorbar()
# add a scatter for haloes > 10**-0.5 Mpc
indices = np.intersect1d(indices,
np.where(np.log10(galphotselec[i]['Gas_mass'][:]) / np.log10(
halodata[i]['mvir'][:]*10**11) < cut)
)
print('N haloes inferior at cut : ' + str(len(indices)))
plt.scatter(
np.log10(halodata[i]['Mass'][indices]*10**11),
galphotselec[i]['Mass'][indices],
c='red', label=('Haloes with d(Node)<10**-0.5 Mpc'))
plt.legend()
plt.xlabel('Log(Halo Mass)', size=12)
plt.ylabel('Log(Stellar Mass)', size=12)
plt.title('Original HorizonAGN, Central gal, z='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]))
# plt.savefig('../Plots/HorizonAGN/Hexbins/NodesFilaments/Ms_Mh_distanceSeparation' +
"""Plot colors"""
for i in range(numzbin-1):
plt.figure()
indices = np.intersect1d(indices_allz[i], np.where(galphotselec[i]['Mass'] > 9))
plt.hist2d(
# galphotselec[i]['Mass'][indices],
np.log10(halodata[i]['Mass'][indices]*10**11),
galphotselec[i]['mag_u'][indices],
cmin=1, bins=50
)
"""Test de faire des corner plot"""
from getdist import plots, MCSamples
i = 0
indices = indices_allz[i]
indices = np.intersect1d(indices_allz[i], np.where(galphotselec[i]['Mass'] > 9))
indices = np.intersect1d(indices, np.where(galphotselec[i]['Gas_mass'] > 0) )
# names = ['Ms', 'Mh', 'Ms/Mh', 'J-U', 'U-R']
# data = [
# galphotselec[i]['Mass'][indices],
# np.log10(halodata[i]['Mass'][indices]*10**11),
# galphotselec[i]['Mass'][indices] - np.log10(halodata[i]['Mass'][indices]*10**11),
# galphotselec[i]['mag_J'][indices] - galphotselec[i]['mag_u'][indices],
# galphotselec[i]['mag_u'][indices] - galphotselec[i]['mag_r'][indices],
# ]
names = ['Ms', 'Mh', 'Mg', 'log(Mg)/log(Mh)']
data = [
galphotselec[i]['Mass'][indices],
np.log10(halodata[i]['Mass'][indices]*10**11),
np.log10(galphotselec[i]['Gas_mass'][indices]),
np.log10(galphotselec[i]['Gas_mass'][indices])/np.log10(halodata[i]['Mass'][indices]*10**11),
]
samples = MCSamples(samples=data, names=names)
# Si l'on souhaite changer les zones de confiance des graphs,
# par défaut ce sont les zones de confiance à 65% et 95%
samples.contours = np.array([0.68, 0.95, 0.99])
samples.updateBaseStatistics()
g = plots.getSubplotPlotter()
g.settings.num_plot_contours = 3
g.triangle_plot(samples, filled=True, contours=0.2)
#g.export('statistiques')
#plt.close('all')
"""Try to do Principal component analysis on the data"""
i=2
indices = np.intersect1d(indices_allz[i], np.where(galphotselec[i]['Mass'] > 9))
indices = np.intersect1d(indices, np.where(galphotselec[i]['Gas_mass'] > 0) )
data = np.transpose(np.array([
galphotselec[i]['Mass'][indices],
np.log10(halodata[i]['Mass'][indices]*10**11),
np.log10(galphotselec[i]['Gas_mass'][indices]),
]))
# result = mlab.PCA(data)
# from mpl_toolkits.mplot3d import Axes3D
# x = []
# y = []
# z = []
# for item in result.Y:
# x.append(item[0])
# y.append(item[1])
# z.append(item[2])
# plt.close('all') # close all latent plotting windows
# fig1 = plt.figure() # Make a plotting figure
# ax = Axes3D(fig1) # use the plotting figure to create a Axis3D object.
# pltData = [x,y,z]
# ax.scatter(pltData[0], pltData[1], pltData[2], 'bo') # make a scatter plot of blue dots from the
# data
# # make simple, bare axis lines through space:
# xAxisLine = ((min(pltData[0]), max(pltData[0])), (0, 0), (0,0)) # 2 points make the x-axis line
# at the data extrema along x-axis
# ax.plot(xAxisLine[0], xAxisLine[1], xAxisLine[2], 'r') # make a red line for the x-axis.
# yAxisLine = ((0, 0), (min(pltData[1]), max(pltData[1])), (0,0)) # 2 points make the y-axis line
# at the data extrema along y-axis
# ax.plot(yAxisLine[0], yAxisLine[1], yAxisLine[2], 'r') # make a red line for the y-axis.
# zAxisLine = ((0, 0), (0,0), (min(pltData[2]), max(pltData[2]))) # 2 points make the z-axis line
# at the data extrema along z-axis
# ax.plot(zAxisLine[0], zAxisLine[1], zAxisLine[2], 'r') # make a red line for the z-axis.
# # label the axes
# ax.set_xlabel("x-axis label")
# ax.set_ylabel("y-axis label")
# ax.set_zlabel("y-axis label")
# ax.set_title("The title of the plot")
# plt.show() # show the plot
from sklearn.decomposition import PCA
sk_pca = PCA(n_components=2)
sklearn_result = sk_pca.fit_transform(data)
plt.plot(sklearn_result[:, 0], sklearn_result[:, 1], '.')
""""PLot Ms/Mh separating halos with environment"""
# Fit Boo on it :
boo_fit_hd = np.empty([numzbin-1, 5])
boo_cov_hd = np.empty([numzbin-1, 5, 5])
for i in range(numzbin-1):
print(i)
indices = np.where(
np.logical_and(
np.logical_and(
np.log10(galdata[i]['Mass'][hal_centgal[i]-1]*10**11) > 9,
np.log10(haloes_env[i][:, 0]) > -0.5
),
np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1)
)
)
boo_fit_hd[i], boo_cov_hd[i] = curve_fit(
boo_MhMs,
np.log10(galdata[i]['Mass'][hal_centgal[i][indices]-1]*10**11),
np.log10(halodata[i]['Mass'][indices]*10**11),
bounds=[[10, 8, 0, 0, 0], [13, 11, 5, 5, 5]],
method='trf')
print(boo_fit_hd)
boofitsSMbins = np.linspace(9, 12, num=100)
for i in range(numzbin-1):
plt.figure()
indices = np.where(
np.logical_and(
np.logical_and(hal_centgal[i] > 0, halodata[i]['level'] == 1),
np.log10(haloes_env[i][:, 0]) > 0
)
)
plt.hist2d(
np.log10(halodata[i]['Mass'][indices]*10**11),
np.log10(galdata[i]['Mass'][hal_centgal[i][indices]-1]*10**11),
bins=100, cmin=1)
plt.colorbar()
# Plot Behroozi fit
# plt.plot(boo_MhMs(boofitsSMbins, *boo_fit_true[i]), boofitsSMbins,
# label=str('True Behroozi function fit'), c='r')
plt.legend()
plt.xlabel('Log($M_{h}$) [Log($M_{\odot}$)]', size=12)
plt.ylabel('Log($M_{*}$) [Log($M_{\odot}$)]', size=12)
plt.title('HorizonAGN, Central galz='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]))
# plt.savefig('../Plots/HAGN_Matching/ClotMatchBis/TrueMass_HaloMass_Boofit' +
# str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]) + '.pdf')
plt.figure()
for i in range(3):
p = plt.plot(
boo_MhMs(boofitsSMbins, *boo_fit_true[i]),
boofitsSMbins-boo_MhMs(boofitsSMbins, *boo_fit_true[i]),
label=str('All '+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1])))
plt.plot(
boo_MhMs(boofitsSMbins, *boo_fit_hd[i]),
boofitsSMbins-boo_MhMs(boofitsSMbins, *boo_fit_hd[i]),
label=str('HD '+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1])),
color=p[0].get_color(), linestyle='--')
plt.plot(
boo_MhMs(boofitsSMbins, *boo_fit_ld[i]),
boofitsSMbins-boo_MhMs(boofitsSMbins, *boo_fit_ld[i]),
label=str('LD '+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1])),
color=p[0].get_color(), linestyle=':')
plt.legend()
plt.xlabel('Log($M_{h}/M_{\odot}$)', size=12)
plt.ylabel('Log($M_{*}/M_{h}$)', size=12)
"""Load data to compute number of galaxies per halo"""
# Main halos
gal_mainhaloes = []
mainHaloMass = []
for i in range(np.size(zbins_Cone)-2):
gal_mainhaloes.append(
np.loadtxt('../Data/HorizonAGNLaigleCatalogs/Cat_' +
str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1])+'_Gal_MainHaloes_newb.txt',
dtype='i4'))
# Sub halos
gal_subhaloes = []
subHaloMass = []
for i in range(np.size(zbins_Cone)-2):
gal_subhaloes.append(
np.loadtxt('../Data/HorizonAGNLaigleCatalogs/Cat_' +
str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1])+'_Gal_SubHaloes_newb.txt',
dtype='i4'))
"""Number of galaxies per halo"""
# Number of halo with minimal mass
# minimum = min(10**11*halodata['Mass'])
# indices = [i for i, v in enumerate(10**11*halodata['Mass']) if v == minimum]
# np.size(indices)
# gal_subhaloes give the ondex of the closest halo -> not relevant
# nbgalaxiesperhalos = []
# for i in range(numzbin-1):
# # index j of nbgalaxiesperhalos gives the number of galaxies in the halo of
# # ID = j+1
# nbgalaxiesperhalos.append(np.zeros(np.size(halodata[i]['Mass'])))
# for j in gal_subhaloes[i].astype(int):
# nbgalaxiesperhalos[i][j-1] += 1
nbgalaxiesperhalos_main = []
for i in range(numzbin-1):
# index j of nbgalaxiesperhalos gives the number of galaxies in the halo of
# ID = j+1
nbgalaxiesperhalos_main.append(np.zeros(np.size(halodata[i]['Mass'])))
indices = np.where(gal_mainhaloes[i] > 0)
for j in gal_mainhaloes[i][indices].astype(int):
nbgalaxiesperhalos_main[i][j-1] += 1
# nb_centralgalaxies_per_mainhalo = []
# for i in range(numzbin-1):
# print(i)
# nb_centralgalaxies_per_mainhalo.append(np.zeros(np.size(halodata[i]['Mass'])))
# indices = np.where(np.logical_and(
# galdata[i]['level'] == 1,
# gal_mainhaloes[i] > 0))
# for j in gal_mainhaloes[i][indices]:
# nb_centralgalaxies_per_mainhalo[i][j-1] += 1
# WARNING : central gal are to be asssociated using hal_centgal, where only one gal is associated
# to each halo
nb_centralgalaxies_per_mainhalo = []
for i in range(numzbin-1):
nb_centralgalaxies_per_mainhalo.append(np.zeros(np.size(halodata[i]['Mass'])))
nb_centralgalaxies_per_mainhalo[i][(hal_centgal[i] > 0) & (halodata[i]['level'] == 1)] = 1
nb_levelMore1_galaxies_per_mainhalo = []
for i in range(numzbin-1):
print(i)
nb_levelMore1_galaxies_per_mainhalo.append(np.zeros(np.size(halodata[i]['Mass'])))
indices = np.where(np.logical_and(
galdata[i]['level'] >= 1,
gal_mainhaloes[i] > 0))
for j in gal_mainhaloes[i][indices]:
nb_levelMore1_galaxies_per_mainhalo[i][j-1] += 1
nb_level1galaxies_per_mainhalo = []
for i in range(numzbin-1):
print(i)
nb_level1galaxies_per_mainhalo.append(np.zeros(np.size(halodata[i]['Mass'])))
indices = np.where(np.logical_and(
galdata[i]['level'] == 1,
gal_mainhaloes[i] > 0))
for j in gal_mainhaloes[i][indices]:
nb_level1galaxies_per_mainhalo[i][j-1] += 1
nb_level1galaxies_per_mainhalo = []
for i in range(numzbin-1):
print(i)
nb_level1galaxies_per_mainhalo.append(np.zeros(np.size(halodata[i]['Mass'])))
indices = set(np.where(gal_mainhaloes[i] > 0)).difference(set(hal_centgal[i]-1))
for j in gal_mainhaloes[i][indices]:
nb_level1galaxies_per_mainhalo[i][j-1] += 1
"""Plot"""
# for i in range(4):
# plt.hist(nbgalaxiesperhalos[i], bins=range(nbgalaxiesperhalos[i].max().astype(int)))
# plt.yscale('log')
# plt.show()
"""Number galaxy per halo versus Halo Mass"""
# Compute Average mass of halos for a given number of galaxies in the halo
# averageHaloMassPerNgal = []
# for i in range(numzbin-1):
# averageHaloMassPerNgal.append(np.empty(nbgalaxiesperhalos_main[i].astype(int).max()+1))
# for j in range(nbgalaxiesperhalos_main[i].astype(int).max()+1):
# averageHaloMassPerNgal[i][j] = np.mean(
# halodata[i]['Mass'][nbgalaxiesperhalos_main[i] == j])
# Compute average number of galaxies in halos given a halo mass interval
massbins = np.linspace(10, 15, num=100)
averageNgalperHaloMass = np.zeros([numzbin-1, np.size(massbins)-1])
av_centralgalaxies_per_mainhalo = np.zeros([numzbin-1, np.size(massbins)-1])
av_levelMore1_galaxies_per_mainhalo = np.zeros([numzbin-1, np.size(massbins)-1])
av_level1galaxies_per_mainhalo = np.zeros([numzbin-1, np.size(massbins)-1])
for i in range(numzbin-1):
for j in range(np.size(massbins)-1):
m1 = massbins[j]
m2 = massbins[j+1]
averageNgalperHaloMass[i][j] = np.average(
nbgalaxiesperhalos_main[i][
np.logical_and(
np.log10(halodata[i]['Mass']*10**11) > m1,
np.log10(halodata[i]['Mass']*10**11) < m2)
])
av_centralgalaxies_per_mainhalo[i][j] = np.average(
nb_centralgalaxies_per_mainhalo[i][
np.logical_and(
np.log10(halodata[i]['Mass']*10**11) > m1,
np.log10(halodata[i]['Mass']*10**11) < m2)
])
av_levelMore1_galaxies_per_mainhalo[i][j] = np.average(
nb_levelMore1_galaxies_per_mainhalo[i][
np.logical_and(
np.log10(halodata[i]['Mass']*10**11) > m1,
np.log10(halodata[i]['Mass']*10**11) < m2)
])
av_level1galaxies_per_mainhalo[i][j] = np.average(
nb_level1galaxies_per_mainhalo[i][
np.logical_and(
np.log10(halodata[i]['Mass']*10**11) > m1,
np.log10(halodata[i]['Mass']*10**11) < m2)
])
# massbins = np.linspace(10, 15, num=100)
# averageNgalperSubHaloMass = np.zeros([numzbin, np.size(massbins)-1])
# for i in range(numzbin-1):
# for j in range(np.size(massbins)-1):
# m1 = massbins[j]
# m2 = massbins[j+1]
# averageNgalperSubHaloMass[i][j] = np.average(
# nbgalaxiesperhalos[i][
# np.logical_and(
# np.log10(halodata[i]['Mass']*10**11) > m1,
# np.log10(halodata[i]['Mass']*10**11) < m2)
# ])
"""Plot"""
# plt.hist2d(np.log10(halodata[0]['Mass'][nbgalaxiesperhalos_main[0]>0]*10**11),
# nbgalaxiesperhalos_main[0][nbgalaxiesperhalos_main[0]>0], bins=100, cmin=1)
# for i in range(4):
# fig = plt.figure()
# plt.yscale('log')
# plt.scatter(np.log10(halodata[i]['Mass'][nbgalaxiesperhalos_main[i]>0]*10**11),
# nbgalaxiesperhalos_main[i][nbgalaxiesperhalos_main[i]>0],
# marker='.')
# # plt.scatter(np.log10(averageHaloMassPerNgal[i][1:]*10**11),
# # np.arange(1, nbgalaxiesperhalos_main[i].astype(int).max()+1), label='Average Mass')
# plt.title('HorizonAGN, z='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]) +
# ', match gal-Mainhalo')
# plt.xlabel('Log($M_{h}$) [Log($M_{\odot}$)]')
# plt.ylabel('Number of galaxies in the halo')
# plt.legend()
# plt.show()
# for i in range(numzbin-1):
# plt.scatter(
# (massbins[:-1]+massbins[1:])/2, av_levelMore1_galaxies_per_mainhalo[i][:],
# label='z='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1]))
# plt.yscale('log')
# plt.ylabel('Average number of galaxies per halo', size=15)
# plt.xlabel('Log($M_{h}$) [Log($M_{\odot}$)]', size=15)
fig = plt.figure(figsize=(12, 4))
gs = GridSpec(1, 3, width_ratios=[1, 1, 1])
for i in range(numzbin-1):
ax1 = plt.subplot(gs[i])
ax1.scatter((massbins[:-1]+massbins[1:])/2, averageNgalperHaloMass[i][:],
label='z='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1])+', all', marker='d')
ax1.scatter((massbins[:-1]+massbins[1:])/2, av_centralgalaxies_per_mainhalo[i][:],
label='z='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1])+', central', marker='.')
ax1.scatter((massbins[:-1]+massbins[1:])/2, av_levelMore1_galaxies_per_mainhalo[i][:],
label='z='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1])+', level>1', marker='+')
ax1.scatter((massbins[:-1]+massbins[1:])/2, av_level1galaxies_per_mainhalo[i][:],
label='z='+str(zbins_Cone[i])+'-'+str(zbins_Cone[i+1])+', level=1', marker='*')
ax1.set_yscale('log')
ax1.legend()
ax1.set_ylabel('Average number of galaxies per halo')
ax1.set_xlabel('Log($M_{h}/M_{\odot}$)')
plt.tight_layout()
|
gpl-3.0
| -2,726,468,782,197,244,400
| 39.112942
| 99
| 0.574976
| false
| 2.674023
| false
| false
| false
|
gdietz/OpenMEE
|
imputation/ui_mice_parameters_page.py
|
1
|
5170
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mice_parameters_page.ui'
#
# Created: Fri Mar 7 09:30:08 2014
# by: PyQt4 UI code generator 4.10.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_WizardPage(object):
def setupUi(self, WizardPage):
WizardPage.setObjectName(_fromUtf8("WizardPage"))
WizardPage.resize(391, 288)
self.verticalLayout = QtGui.QVBoxLayout(WizardPage)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label = QtGui.QLabel(WizardPage)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout.addWidget(self.label)
self.m_spinBox = QtGui.QSpinBox(WizardPage)
self.m_spinBox.setMinimum(1)
self.m_spinBox.setMaximum(20)
self.m_spinBox.setProperty("value", 5)
self.m_spinBox.setObjectName(_fromUtf8("m_spinBox"))
self.horizontalLayout.addWidget(self.m_spinBox)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.label_5 = QtGui.QLabel(WizardPage)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.horizontalLayout_2.addWidget(self.label_5)
self.maxit_spinBox = QtGui.QSpinBox(WizardPage)
self.maxit_spinBox.setMinimum(1)
self.maxit_spinBox.setMaximum(20)
self.maxit_spinBox.setProperty("value", 5)
self.maxit_spinBox.setObjectName(_fromUtf8("maxit_spinBox"))
self.horizontalLayout_2.addWidget(self.maxit_spinBox)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.defaultMethod_groupBox = QtGui.QGroupBox(WizardPage)
self.defaultMethod_groupBox.setObjectName(_fromUtf8("defaultMethod_groupBox"))
self.gridLayout = QtGui.QGridLayout(self.defaultMethod_groupBox)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label_2 = QtGui.QLabel(self.defaultMethod_groupBox)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 0, 0, 1, 1)
self.numeric_comboBox = QtGui.QComboBox(self.defaultMethod_groupBox)
self.numeric_comboBox.setObjectName(_fromUtf8("numeric_comboBox"))
self.gridLayout.addWidget(self.numeric_comboBox, 0, 1, 1, 1)
self.label_3 = QtGui.QLabel(self.defaultMethod_groupBox)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout.addWidget(self.label_3, 1, 0, 1, 1)
self.factor_2_levels_comboBox = QtGui.QComboBox(self.defaultMethod_groupBox)
self.factor_2_levels_comboBox.setObjectName(_fromUtf8("factor_2_levels_comboBox"))
self.gridLayout.addWidget(self.factor_2_levels_comboBox, 1, 1, 1, 1)
self.label_4 = QtGui.QLabel(self.defaultMethod_groupBox)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout.addWidget(self.label_4, 2, 0, 1, 1)
self.factor_gt_2_levels_comboBox = QtGui.QComboBox(self.defaultMethod_groupBox)
self.factor_gt_2_levels_comboBox.setObjectName(_fromUtf8("factor_gt_2_levels_comboBox"))
self.gridLayout.addWidget(self.factor_gt_2_levels_comboBox, 2, 1, 1, 1)
self.verticalLayout.addWidget(self.defaultMethod_groupBox)
spacerItem2 = QtGui.QSpacerItem(20, 50, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem2)
self.retranslateUi(WizardPage)
QtCore.QMetaObject.connectSlotsByName(WizardPage)
def retranslateUi(self, WizardPage):
WizardPage.setWindowTitle(_translate("WizardPage", "WizardPage", None))
self.label.setText(_translate("WizardPage", "# of multiple imputations:", None))
self.label_5.setText(_translate("WizardPage", "# of iterations:", None))
self.defaultMethod_groupBox.setTitle(_translate("WizardPage", "Imputation Methods", None))
self.label_2.setText(_translate("WizardPage", "numeric covariates:", None))
self.label_3.setText(_translate("WizardPage", "categorical with 2 levels:", None))
self.label_4.setText(_translate("WizardPage", "categorical with \n"
"more than 2 levels:", None))
|
gpl-3.0
| 7,559,098,870,301,518,000
| 51.755102
| 103
| 0.706576
| false
| 3.751814
| false
| false
| false
|
RenolY2/battalion-tools
|
bw_archive/bw_archive_base.py
|
1
|
3311
|
import io
import struct
from array import array
from rxet.helper import read_uint32
class BWResource(object):
def __init__(self, name, size, memview):
self.name = name
self._size = size
self._data = memview
self._fileobj = io.BytesIO(self._data)
@property
def fileobj(self):
return self._fileobj
# File object and data object should be kept up to date together when
# one of them is changed.
@fileobj.setter
def fileobj(self, fobj):
self._fileobj.close()
self._fileobj = fobj
self._data = fobj.getbuffer()
@property
def data(self):
return self._data
@data.setter
def data(self, data):
self._fileobj.close()
self._data = data
self._fileobj = io.BytesIO(self._data)
def pack(self):
#data = self.fileobj.read()
data = self._data#self.fileobj.getbuffer()
#print(self.name, len(data))
return self.name, len(data), data
# Interpret a data entry as a section. If cls is given, an instance of that will be returned.
# When using cls, offset is unused.
def as_section(self, offset=0, cls=None):
if cls is None:
return BWSection(self.name, self._size, self._data, section_offset=offset)
else:
return cls(self.name, self._size, self._data)
class BWSection(BWResource):
def __init__(self, name, size, memview, section_offset=0):
super().__init__(name, size, memview)
self.entries = []
self._header = self._data[0:section_offset]
self._fileobj.seek(section_offset)
while self._fileobj.tell() < self._size:
name, size, entry_memview = read_section(self._fileobj, memview)
res_obj = BWResource(name, size, entry_memview)
self.entries.append(res_obj)
def pack(self):
packed = io.BytesIO()
packed.write(self._header)
section_size = len(self._header)
for entry in self.entries:
name, size, data = entry.pack()
packed.write(name)
assert size == len(data)
packed.write(struct.pack("I", size))
packed.write(data)
# 4 bytes for the ID, 4 bytes for the length, and the rest is from the data
section_size += 4 + 4 + len(data)
packed_data = packed.getvalue()
packed.close()
return self.name, section_size, packed_data
def as_section(self, offset=0):
return self
class BWArchiveBase(BWSection):
# f should be a file open in binary mode
def __init__(self, f):
# We read the content of the file into memory and put it in a bytearray,
# which is necessary so the content can be modified.
file_content = bytearray(f.read())
#file_content = array("B", f.read())
super().__init__(name=None, size=len(file_content), memview=file_content)
def write(self, f):
unused, size, data = self.pack()
f.write(data)
def read_section(f, memview):
name = f.read(4)
size = read_uint32(f)
offset = f.tell()
data = memoryview(memview[offset:(offset+size)])#f.read(data_len)
f.seek(size, io.SEEK_CUR)
#print(len(memview), len(f.getbuffer()))
return name, size, data
|
mit
| 4,123,272,925,999,109,000
| 26.363636
| 97
| 0.595892
| false
| 3.724409
| false
| false
| false
|
Rfam/rfam-production
|
scripts/support/mirnas/auto_commit.py
|
1
|
5214
|
import os
import sys
import argparse
import subprocess
import json
import time
from datetime import date
from subprocess import Popen, PIPE
search_dirs = ["/hps/nobackup/production/xfam/rfam/RELEASES/14.3/miRNA_relabelled/batch1_chunk1_searches",
"/hps/nobackup/production/xfam/rfam/RELEASES/14.3/miRNA_relabelled/batch1_chunk2_searches",
"/hps/nobackup/production/xfam/rfam/RELEASES/14.3/miRNA_relabelled/batch2/searches"]
# ---------------------------------------------------------------------------------------------
def check_desc_ga(DESC, cut_ga):
"""
"""
process = Popen(['grep', "GA", DESC], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = process.communicate()
if output.find("%.2f"%float(cut_ga)) == -1:
return False
return True
# ---------------------------------------------------------------------------------------------
def check_family_passes_qc(family_dir):
dir_elements = os.path.split(family_dir)
search_dir = dir_elements[0]
os.chdir(search_dir)
process = Popen(["rqc-all.pl", dir_elements[1]], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output = process.communicate()[1]
if output.find("Family passed with no serious errors") == -1:
return False
return True
# ---------------------------------------------------------------------------------------------
def commit_family(family_dir, mirna_name):
dir_elements = os.path.split(family_dir)
os.chdir(dir_elements[0])
family_dir = dir_elements[1]
process = Popen(['rfnew.pl', '-m', "\"Adding new miRNA family %s \""% (mirna_name), family_dir], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output = process.communicate()[1]
if output.find("This family has been assigned the accession") == -1:
return False
return True
# ---------------------------------------------------------------------------------------------
def calculate_progress(num_to_commit, num_processed):
return num_processed*100/num_to_comit
# ---------------------------------------------------------------------------------------------
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--mirna-ids", help="A .json file with miRNAs to commit", action="store")
parser.add_argument("--skip", help="A list of miRNA ids to skip", action="store", default=None)
parser.add_argument("--log-dir", help="Log file destination", action="store", default=os.getcwd())
parser.add_argument("--verbose", help="Display progress messages", action="store_true", default=False)
parser.add_argument("--no-qc", help="Skips QC step", action="store_true", default=False)
return parser
# ---------------------------------------------------------------------------------------------
if __name__=='__main__':
parser = parse_arguments()
args = parser.parse_args()
fp = open(args.mirna_ids, 'r')
miRNA_accessions = json.load(fp)
fp.close()
existing_fams = {}
if args.skip is not None:
fp = open("/hps/nobackup/production/xfam/rfam/RELEASES/14.3/input/existing_mirna_families.json", 'r')
existing_fams = json.load(fp)
fp.close()
committed = {}
num_to_commit = len(miRNA_accessions.keys())
count_processed = 0
#skip = ["MIPF0001496__mir-6012", "MIPF0001508__mir-4504", "MIPF0001511__mir-4803"]
#skip = []
#for miRNA in skip:
# del(miRNA_accessions[miRNA])
fp = open(os.path.join(args.log_dir, 'failed_mirna_commits_'+str(date.today())+'.log'), 'w')
for accession in miRNA_accessions.keys():
if accession not in committed:
dir_label = ''
if accession.find("_relabelled")==-1:
dir_label = accession+"_relabelled"
for search_dir in search_dirs:
family_dir_loc = os.path.join(search_dir, dir_label)
if os.path.exists(family_dir_loc):
desc_file = os.path.join(family_dir_loc, "DESC")
if check_desc_ga(desc_file, miRNA_accessions[accession]) is True:
check = False
if args.no_qc is True:
mirna_name = ""
if accession[0:2]=='MI':
mirna_name = accession.split("_")[2]
else:
mirna_name = accession.split("_")[0]
check = commit_family(family_dir_loc, mirna_name)
elif check_family_passes_qc(family_dir_loc) is True:
mirna_name = ""
if accession[0:2]=='MI':
mirna_name = accession.split("_")[2]
else:
mirna_name = accession.split("_")[0]
check = commit_family(family_dir_loc, mirna_name)
if check is True:
committed[accession] = ""
print ("Family %s committed" % (accession))
else:
fp.write(accession+'\n')
count_processed += 1
else:
continue
#if args.verbose:
# print ("%s%s families processed"%(calculate_progress(num_to_commit, count_processed)))
# close log file
fp.close()
# create a json dump with all successful family commits
print ("\nDumping committed family list...")
fp = open(os.path.join(args.log_dir,"committed_mirnas_"+str(date.today())+".json"), 'w')
json.dump(committed, fp)
fp.close()
print ("\nDone!\n")
|
apache-2.0
| 9,119,662,882,263,894,000
| 29.313953
| 135
| 0.561949
| false
| 3.314685
| false
| false
| false
|
mscuthbert/abjad
|
abjad/tools/documentationtools/ReSTAutosummaryDirective.py
|
1
|
2421
|
# -*- encoding: utf-8 -*-
from abjad.tools import datastructuretools
from abjad.tools.documentationtools.ReSTDirective import ReSTDirective
class ReSTAutosummaryDirective(ReSTDirective):
r'''A ReST Autosummary directive.
::
>>> toc = documentationtools.ReSTAutosummaryDirective()
>>> for item in ['foo.Foo', 'bar.Bar', 'baz.Baz']:
... toc.append(documentationtools.ReSTAutosummaryItem(text=item))
...
>>> toc
ReSTAutosummaryDirective(
children=(
ReSTAutosummaryItem(
text='foo.Foo'
),
ReSTAutosummaryItem(
text='bar.Bar'
),
ReSTAutosummaryItem(
text='baz.Baz'
),
),
directive='autosummary'
)
::
>>> print(toc.rest_format)
.. autosummary::
<BLANKLINE>
foo.Foo
bar.Bar
baz.Baz
'''
### CLASS VARIABLES ###
__documentation_section__ = 'reStructuredText'
### SPECIAL METHODS ###
def __setitem__(self, i, expr):
r'''Sets item `i` to `expr`.
Returns none.
'''
from abjad.tools import documentationtools
newexpr = []
for x in expr:
if isinstance(x, str):
newexpr.append(documentationtools.ReSTAutosummaryItem(text=x))
else:
newexpr.append(x)
datastructuretools.TreeContainer.__setitem__(self, i, newexpr)
### PRIVATE PROPERTIES ###
@property
def _children_rest_format_contributions(self):
result = ['']
for child in self.children:
contribution = child._rest_format_contributions
for x in contribution:
if x:
result.append(' ' + x)
else:
result.append(x)
return result
### PUBLIC PROPERTIES ###
@property
def directive(self):
r'''Directive of ReST autosummary diretive.
Returns ``'autosummary'``.
'''
return 'autosummary'
@property
def node_class(self):
r'''Node class of ReST autosummary directive.
'''
from abjad.tools import documentationtools
return (
documentationtools.ReSTAutosummaryItem,
)
|
gpl-3.0
| 665,987,143,855,517,400
| 25.043011
| 78
| 0.518381
| false
| 4.5
| false
| false
| false
|
apdavison/elephant
|
elephant/test/test_spike_train_dissimilarity.py
|
1
|
29313
|
# -*- coding: utf-8 -*-
"""
Tests for the spike train dissimilarity measures module.
:copyright: Copyright 2016 by the Elephant team, see `doc/authors.rst`.
:license: Modified BSD, see LICENSE.txt for details.
"""
import unittest
from neo import SpikeTrain
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
import scipy.integrate as spint
from quantities import ms, s, Hz
import elephant.kernels as kernels
import elephant.spike_train_generation as stg
import elephant.spike_train_dissimilarity as stds
class TimeScaleDependSpikeTrainDissimMeasures_TestCase(unittest.TestCase):
def setUp(self):
self.st00 = SpikeTrain([], units='ms', t_stop=1000.0)
self.st01 = SpikeTrain([1], units='ms', t_stop=1000.0)
self.st02 = SpikeTrain([2], units='ms', t_stop=1000.0)
self.st03 = SpikeTrain([2.9], units='ms', t_stop=1000.0)
self.st04 = SpikeTrain([3.1], units='ms', t_stop=1000.0)
self.st05 = SpikeTrain([5], units='ms', t_stop=1000.0)
self.st06 = SpikeTrain([500], units='ms', t_stop=1000.0)
self.st07 = SpikeTrain([12, 32], units='ms', t_stop=1000.0)
self.st08 = SpikeTrain([32, 52], units='ms', t_stop=1000.0)
self.st09 = SpikeTrain([42], units='ms', t_stop=1000.0)
self.st10 = SpikeTrain([18, 60], units='ms', t_stop=1000.0)
self.st11 = SpikeTrain([10, 20, 30, 40], units='ms', t_stop=1000.0)
self.st12 = SpikeTrain([40, 30, 20, 10], units='ms', t_stop=1000.0)
self.st13 = SpikeTrain([15, 25, 35, 45], units='ms', t_stop=1000.0)
self.st14 = SpikeTrain([10, 20, 30, 40, 50], units='ms', t_stop=1000.0)
self.st15 = SpikeTrain([0.01, 0.02, 0.03, 0.04, 0.05],
units='s', t_stop=1000.0)
self.st16 = SpikeTrain([12, 16, 28, 30, 42], units='ms', t_stop=1000.0)
self.st21 = stg.homogeneous_poisson_process(50*Hz, 0*ms, 1000*ms)
self.st22 = stg.homogeneous_poisson_process(40*Hz, 0*ms, 1000*ms)
self.st23 = stg.homogeneous_poisson_process(30*Hz, 0*ms, 1000*ms)
self.rd_st_list = [self.st21, self.st22, self.st23]
self.st31 = SpikeTrain([12.0], units='ms', t_stop=1000.0)
self.st32 = SpikeTrain([12.0, 12.0], units='ms', t_stop=1000.0)
self.st33 = SpikeTrain([20.0], units='ms', t_stop=1000.0)
self.st34 = SpikeTrain([20.0, 20.0], units='ms', t_stop=1000.0)
self.array1 = np.arange(1, 10)
self.array2 = np.arange(1.2, 10)
self.qarray1 = self.array1 * Hz
self.qarray2 = self.array2 * Hz
self.tau0 = 0.0 * ms
self.q0 = np.inf / ms
self.tau1 = 0.000000001 * ms
self.q1 = 1.0 / self.tau1
self.tau2 = 1.0 * ms
self.q2 = 1.0 / self.tau2
self.tau3 = 10.0 * ms
self.q3 = 1.0 / self.tau3
self.tau4 = 100.0 * ms
self.q4 = 1.0 / self.tau4
self.tau5 = 1000000000.0 * ms
self.q5 = 1.0 / self.tau5
self.tau6 = np.inf * ms
self.q6 = 0.0 / ms
self.tau7 = 0.01 * s
self.q7 = 1.0 / self.tau7
self.t = np.linspace(0, 200, 20000001) * ms
def test_wrong_input(self):
self.assertRaises(TypeError, stds.victor_purpura_dist,
[self.array1, self.array2], self.q3)
self.assertRaises(TypeError, stds.victor_purpura_dist,
[self.qarray1, self.qarray2], self.q3)
self.assertRaises(TypeError, stds.victor_purpura_dist,
[self.qarray1, self.qarray2], 5.0 * ms)
self.assertRaises(TypeError, stds.victor_purpura_dist,
[self.array1, self.array2], self.q3,
algorithm='intuitive')
self.assertRaises(TypeError, stds.victor_purpura_dist,
[self.qarray1, self.qarray2], self.q3,
algorithm='intuitive')
self.assertRaises(TypeError, stds.victor_purpura_dist,
[self.qarray1, self.qarray2], 5.0 * ms,
algorithm='intuitive')
self.assertRaises(TypeError, stds.van_rossum_dist,
[self.array1, self.array2], self.tau3)
self.assertRaises(TypeError, stds.van_rossum_dist,
[self.qarray1, self.qarray2], self.tau3)
self.assertRaises(TypeError, stds.van_rossum_dist,
[self.qarray1, self.qarray2], 5.0 * Hz)
self.assertRaises(TypeError, stds.victor_purpura_dist,
[self.st11, self.st13], self.tau2)
self.assertRaises(TypeError, stds.victor_purpura_dist,
[self.st11, self.st13], 5.0)
self.assertRaises(TypeError, stds.victor_purpura_dist,
[self.st11, self.st13], self.tau2,
algorithm='intuitive')
self.assertRaises(TypeError, stds.victor_purpura_dist,
[self.st11, self.st13], 5.0,
algorithm='intuitive')
self.assertRaises(TypeError, stds.van_rossum_dist,
[self.st11, self.st13], self.q4)
self.assertRaises(TypeError, stds.van_rossum_dist,
[self.st11, self.st13], 5.0)
self.assertRaises(NotImplementedError, stds.victor_purpura_dist,
[self.st01, self.st02], self.q3,
kernel=kernels.Kernel(2.0 / self.q3))
self.assertRaises(NotImplementedError, stds.victor_purpura_dist,
[self.st01, self.st02], self.q3,
kernel=kernels.SymmetricKernel(2.0 / self.q3))
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st02], self.q1,
kernel=kernels.TriangularKernel(
2.0 / (np.sqrt(6.0) * self.q2)))[0, 1],
stds.victor_purpura_dist(
[self.st01, self.st02], self.q3,
kernel=kernels.TriangularKernel(
2.0 / (np.sqrt(6.0) * self.q2)))[0, 1])
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st02],
kernel=kernels.TriangularKernel(
2.0 / (np.sqrt(6.0) * self.q2)))[0, 1], 1.0)
self.assertNotEqual(stds.victor_purpura_dist(
[self.st01, self.st02],
kernel=kernels.AlphaKernel(
2.0 / (np.sqrt(6.0) * self.q2)))[0, 1], 1.0)
self.assertRaises(NameError, stds.victor_purpura_dist,
[self.st11, self.st13], self.q2, algorithm='slow')
def test_victor_purpura_distance_fast(self):
# Tests of distances of simplest spike trains:
self.assertEqual(stds.victor_purpura_dist(
[self.st00, self.st00], self.q2)[0, 1], 0.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st00, self.st01], self.q2)[0, 1], 1.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st00], self.q2)[0, 1], 1.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st01], self.q2)[0, 1], 0.0)
# Tests of distances under elementary spike operations
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st02], self.q2)[0, 1], 1.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st03], self.q2)[0, 1], 1.9)
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st04], self.q2)[0, 1], 2.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st05], self.q2)[0, 1], 2.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st00, self.st07], self.q2)[0, 1], 2.0)
self.assertAlmostEqual(stds.victor_purpura_dist(
[self.st07, self.st08], self.q4)[0, 1], 0.4)
self.assertAlmostEqual(stds.victor_purpura_dist(
[self.st07, self.st10], self.q3)[0, 1], 0.6 + 2)
self.assertEqual(stds.victor_purpura_dist(
[self.st11, self.st14], self.q2)[0, 1], 1)
# Tests on timescales
self.assertEqual(stds.victor_purpura_dist(
[self.st11, self.st14], self.q1)[0, 1],
stds.victor_purpura_dist(
[self.st11, self.st14], self.q5)[0, 1])
self.assertEqual(stds.victor_purpura_dist(
[self.st07, self.st11], self.q0)[0, 1], 6.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st07, self.st11], self.q1)[0, 1], 6.0)
self.assertAlmostEqual(stds.victor_purpura_dist(
[self.st07, self.st11], self.q5)[0, 1], 2.0, 5)
self.assertEqual(stds.victor_purpura_dist(
[self.st07, self.st11], self.q6)[0, 1], 2.0)
# Tests on unordered spiketrains
self.assertEqual(stds.victor_purpura_dist(
[self.st11, self.st13], self.q4)[0, 1],
stds.victor_purpura_dist(
[self.st12, self.st13], self.q4)[0, 1])
self.assertNotEqual(stds.victor_purpura_dist(
[self.st11, self.st13], self.q4,
sort=False)[0, 1],
stds.victor_purpura_dist(
[self.st12, self.st13], self.q4,
sort=False)[0, 1])
# Tests on metric properties with random spiketrains
# (explicit calculation of second metric axiom in particular case,
# because from dist_matrix it is trivial)
dist_matrix = stds.victor_purpura_dist(
[self.st21, self.st22, self.st23], self.q3)
for i in range(3):
for j in range(3):
self.assertGreaterEqual(dist_matrix[i, j], 0)
if dist_matrix[i, j] == 0:
assert_array_equal(self.rd_st_list[i], self.rd_st_list[j])
assert_array_equal(stds.victor_purpura_dist(
[self.st21, self.st22], self.q3),
stds.victor_purpura_dist(
[self.st22, self.st21], self.q3))
self.assertLessEqual(dist_matrix[0, 1],
dist_matrix[0, 2] + dist_matrix[1, 2])
self.assertLessEqual(dist_matrix[0, 2],
dist_matrix[1, 2] + dist_matrix[0, 1])
self.assertLessEqual(dist_matrix[1, 2],
dist_matrix[0, 1] + dist_matrix[0, 2])
# Tests on proper unit conversion
self.assertAlmostEqual(
stds.victor_purpura_dist([self.st14, self.st16], self.q3)[0, 1],
stds.victor_purpura_dist([self.st15, self.st16], self.q3)[0, 1])
self.assertAlmostEqual(
stds.victor_purpura_dist([self.st16, self.st14], self.q3)[0, 1],
stds.victor_purpura_dist([self.st16, self.st15], self.q3)[0, 1])
self.assertEqual(
stds.victor_purpura_dist([self.st01, self.st05], self.q3)[0, 1],
stds.victor_purpura_dist([self.st01, self.st05], self.q7)[0, 1])
# Tests on algorithmic behaviour for equal spike times
self.assertEqual(
stds.victor_purpura_dist([self.st31, self.st34], self.q3)[0, 1],
0.8 + 1.0)
self.assertEqual(
stds.victor_purpura_dist([self.st31, self.st34], self.q3)[0, 1],
stds.victor_purpura_dist([self.st32, self.st33], self.q3)[0, 1])
self.assertEqual(
stds.victor_purpura_dist(
[self.st31, self.st33], self.q3)[0, 1] * 2.0,
stds.victor_purpura_dist(
[self.st32, self.st34], self.q3)[0, 1])
# Tests on spike train list lengthes smaller than 2
self.assertEqual(stds.victor_purpura_dist(
[self.st21], self.q3)[0, 0], 0)
self.assertEqual(len(stds.victor_purpura_dist([], self.q3)), 0)
def test_victor_purpura_distance_intuitive(self):
# Tests of distances of simplest spike trains
self.assertEqual(stds.victor_purpura_dist(
[self.st00, self.st00], self.q2,
algorithm='intuitive')[0, 1], 0.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st00, self.st01], self.q2,
algorithm='intuitive')[0, 1], 1.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st00], self.q2,
algorithm='intuitive')[0, 1], 1.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st01], self.q2,
algorithm='intuitive')[0, 1], 0.0)
# Tests of distances under elementary spike operations
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st02], self.q2,
algorithm='intuitive')[0, 1], 1.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st03], self.q2,
algorithm='intuitive')[0, 1], 1.9)
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st04], self.q2,
algorithm='intuitive')[0, 1], 2.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st05], self.q2,
algorithm='intuitive')[0, 1], 2.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st00, self.st07], self.q2,
algorithm='intuitive')[0, 1], 2.0)
self.assertAlmostEqual(stds.victor_purpura_dist(
[self.st07, self.st08], self.q4,
algorithm='intuitive')[0, 1], 0.4)
self.assertAlmostEqual(stds.victor_purpura_dist(
[self.st07, self.st10], self.q3,
algorithm='intuitive')[0, 1], 2.6)
self.assertEqual(stds.victor_purpura_dist(
[self.st11, self.st14], self.q2,
algorithm='intuitive')[0, 1], 1)
# Tests on timescales
self.assertEqual(stds.victor_purpura_dist(
[self.st11, self.st14], self.q1,
algorithm='intuitive')[0, 1],
stds.victor_purpura_dist(
[self.st11, self.st14], self.q5,
algorithm='intuitive')[0, 1])
self.assertEqual(stds.victor_purpura_dist(
[self.st07, self.st11], self.q0,
algorithm='intuitive')[0, 1], 6.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st07, self.st11], self.q1,
algorithm='intuitive')[0, 1], 6.0)
self.assertAlmostEqual(stds.victor_purpura_dist(
[self.st07, self.st11], self.q5,
algorithm='intuitive')[0, 1], 2.0, 5)
self.assertEqual(stds.victor_purpura_dist(
[self.st07, self.st11], self.q6,
algorithm='intuitive')[0, 1], 2.0)
# Tests on unordered spiketrains
self.assertEqual(stds.victor_purpura_dist(
[self.st11, self.st13], self.q4,
algorithm='intuitive')[0, 1],
stds.victor_purpura_dist(
[self.st12, self.st13], self.q4,
algorithm='intuitive')[0, 1])
self.assertNotEqual(stds.victor_purpura_dist(
[self.st11, self.st13], self.q4,
sort=False, algorithm='intuitive')[0, 1],
stds.victor_purpura_dist(
[self.st12, self.st13], self.q4,
sort=False, algorithm='intuitive')[0, 1])
# Tests on metric properties with random spiketrains
# (explicit calculation of second metric axiom in particular case,
# because from dist_matrix it is trivial)
dist_matrix = stds.victor_purpura_dist(
[self.st21, self.st22, self.st23],
self.q3, algorithm='intuitive')
for i in range(3):
for j in range(3):
self.assertGreaterEqual(dist_matrix[i, j], 0)
if dist_matrix[i, j] == 0:
assert_array_equal(self.rd_st_list[i], self.rd_st_list[j])
assert_array_equal(stds.victor_purpura_dist(
[self.st21, self.st22], self.q3,
algorithm='intuitive'),
stds.victor_purpura_dist(
[self.st22, self.st21], self.q3,
algorithm='intuitive'))
self.assertLessEqual(dist_matrix[0, 1],
dist_matrix[0, 2] + dist_matrix[1, 2])
self.assertLessEqual(dist_matrix[0, 2],
dist_matrix[1, 2] + dist_matrix[0, 1])
self.assertLessEqual(dist_matrix[1, 2],
dist_matrix[0, 1] + dist_matrix[0, 2])
# Tests on proper unit conversion
self.assertAlmostEqual(stds.victor_purpura_dist(
[self.st14, self.st16], self.q3,
algorithm='intuitive')[0, 1],
stds.victor_purpura_dist(
[self.st15, self.st16], self.q3,
algorithm='intuitive')[0, 1])
self.assertAlmostEqual(stds.victor_purpura_dist(
[self.st16, self.st14], self.q3,
algorithm='intuitive')[0, 1],
stds.victor_purpura_dist(
[self.st16, self.st15], self.q3,
algorithm='intuitive')[0, 1])
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st05], self.q3,
algorithm='intuitive')[0, 1],
stds.victor_purpura_dist(
[self.st01, self.st05], self.q7,
algorithm='intuitive')[0, 1])
# Tests on algorithmic behaviour for equal spike times
self.assertEqual(stds.victor_purpura_dist(
[self.st31, self.st34], self.q3,
algorithm='intuitive')[0, 1],
0.8 + 1.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st31, self.st34], self.q3,
algorithm='intuitive')[0, 1],
stds.victor_purpura_dist(
[self.st32, self.st33], self.q3,
algorithm='intuitive')[0, 1])
self.assertEqual(stds.victor_purpura_dist(
[self.st31, self.st33], self.q3,
algorithm='intuitive')[0, 1] * 2.0,
stds.victor_purpura_dist(
[self.st32, self.st34], self.q3,
algorithm='intuitive')[0, 1])
# Tests on spike train list lengthes smaller than 2
self.assertEqual(stds.victor_purpura_dist(
[self.st21], self.q3,
algorithm='intuitive')[0, 0], 0)
self.assertEqual(len(stds.victor_purpura_dist(
[], self.q3, algorithm='intuitive')), 0)
def test_victor_purpura_algorithm_comparison(self):
assert_array_almost_equal(
stds.victor_purpura_dist([self.st21, self.st22, self.st23],
self.q3),
stds.victor_purpura_dist([self.st21, self.st22, self.st23],
self.q3, algorithm='intuitive'))
def test_van_rossum_distance(self):
# Tests of distances of simplest spike trains
self.assertEqual(stds.van_rossum_dist(
[self.st00, self.st00], self.tau2)[0, 1], 0.0)
self.assertEqual(stds.van_rossum_dist(
[self.st00, self.st01], self.tau2)[0, 1], 1.0)
self.assertEqual(stds.van_rossum_dist(
[self.st01, self.st00], self.tau2)[0, 1], 1.0)
self.assertEqual(stds.van_rossum_dist(
[self.st01, self.st01], self.tau2)[0, 1], 0.0)
# Tests of distances under elementary spike operations
self.assertAlmostEqual(stds.van_rossum_dist(
[self.st01, self.st02], self.tau2)[0, 1],
float(np.sqrt(2*(1.0-np.exp(-np.absolute(
((self.st01[0]-self.st02[0]) /
self.tau2).simplified))))))
self.assertAlmostEqual(stds.van_rossum_dist(
[self.st01, self.st05], self.tau2)[0, 1],
float(np.sqrt(2*(1.0-np.exp(-np.absolute(
((self.st01[0]-self.st05[0]) /
self.tau2).simplified))))))
self.assertAlmostEqual(stds.van_rossum_dist(
[self.st01, self.st05], self.tau2)[0, 1],
np.sqrt(2.0), 1)
self.assertAlmostEqual(stds.van_rossum_dist(
[self.st01, self.st06], self.tau2)[0, 1],
np.sqrt(2.0), 20)
self.assertAlmostEqual(stds.van_rossum_dist(
[self.st00, self.st07], self.tau1)[0, 1],
np.sqrt(0 + 2))
self.assertAlmostEqual(stds.van_rossum_dist(
[self.st07, self.st08], self.tau4)[0, 1],
float(np.sqrt(2*(1.0-np.exp(-np.absolute(
((self.st07[0]-self.st08[-1]) /
self.tau4).simplified))))))
f_minus_g_squared = (
(self.t > self.st08[0]) * np.exp(
-((self.t-self.st08[0])/self.tau3).simplified) +
(self.t > self.st08[1]) * np.exp(
-((self.t-self.st08[1])/self.tau3).simplified) -
(self.t > self.st09[0]) * np.exp(
-((self.t-self.st09[0])/self.tau3).simplified))**2
distance = np.sqrt(2.0 * spint.cumtrapz(
y=f_minus_g_squared, x=self.t.magnitude)[-1] /
self.tau3.rescale(self.t.units).magnitude)
self.assertAlmostEqual(stds.van_rossum_dist(
[self.st08, self.st09], self.tau3)[0, 1], distance, 5)
self.assertAlmostEqual(stds.van_rossum_dist(
[self.st11, self.st14], self.tau2)[0, 1], 1)
# Tests on timescales
self.assertAlmostEqual(
stds.van_rossum_dist([self.st11, self.st14], self.tau1)[0, 1],
stds.van_rossum_dist([self.st11, self.st14], self.tau5)[0, 1])
self.assertAlmostEqual(
stds.van_rossum_dist([self.st07, self.st11], self.tau0)[0, 1],
np.sqrt(len(self.st07) + len(self.st11)))
self.assertAlmostEqual(
stds.van_rossum_dist([self.st07, self.st14], self.tau0)[0, 1],
np.sqrt(len(self.st07) + len(self.st14)))
self.assertAlmostEqual(
stds.van_rossum_dist([self.st07, self.st11], self.tau1)[0, 1],
np.sqrt(len(self.st07) + len(self.st11)))
self.assertAlmostEqual(
stds.van_rossum_dist([self.st07, self.st14], self.tau1)[0, 1],
np.sqrt(len(self.st07) + len(self.st14)))
self.assertAlmostEqual(
stds.van_rossum_dist([self.st07, self.st11], self.tau5)[0, 1],
np.absolute(len(self.st07) - len(self.st11)))
self.assertAlmostEqual(
stds.van_rossum_dist([self.st07, self.st14], self.tau5)[0, 1],
np.absolute(len(self.st07) - len(self.st14)))
self.assertAlmostEqual(
stds.van_rossum_dist([self.st07, self.st11], self.tau6)[0, 1],
np.absolute(len(self.st07) - len(self.st11)))
self.assertAlmostEqual(
stds.van_rossum_dist([self.st07, self.st14], self.tau6)[0, 1],
np.absolute(len(self.st07) - len(self.st14)))
# Tests on unordered spiketrains
self.assertEqual(
stds.van_rossum_dist([self.st11, self.st13], self.tau4)[0, 1],
stds.van_rossum_dist([self.st12, self.st13], self.tau4)[0, 1])
self.assertNotEqual(
stds.van_rossum_dist([self.st11, self.st13],
self.tau4, sort=False)[0, 1],
stds.van_rossum_dist([self.st12, self.st13],
self.tau4, sort=False)[0, 1])
# Tests on metric properties with random spiketrains
# (explicit calculation of second metric axiom in particular case,
# because from dist_matrix it is trivial)
dist_matrix = stds.van_rossum_dist(
[self.st21, self.st22, self.st23], self.tau3)
for i in range(3):
for j in range(3):
self.assertGreaterEqual(dist_matrix[i, j], 0)
if dist_matrix[i, j] == 0:
assert_array_equal(self.rd_st_list[i], self.rd_st_list[j])
assert_array_equal(
stds.van_rossum_dist([self.st21, self.st22], self.tau3),
stds.van_rossum_dist([self.st22, self.st21], self.tau3))
self.assertLessEqual(dist_matrix[0, 1],
dist_matrix[0, 2] + dist_matrix[1, 2])
self.assertLessEqual(dist_matrix[0, 2],
dist_matrix[1, 2] + dist_matrix[0, 1])
self.assertLessEqual(dist_matrix[1, 2],
dist_matrix[0, 1] + dist_matrix[0, 2])
# Tests on proper unit conversion
self.assertAlmostEqual(
stds.van_rossum_dist([self.st14, self.st16], self.tau3)[0, 1],
stds.van_rossum_dist([self.st15, self.st16], self.tau3)[0, 1])
self.assertAlmostEqual(
stds.van_rossum_dist([self.st16, self.st14], self.tau3)[0, 1],
stds.van_rossum_dist([self.st16, self.st15], self.tau3)[0, 1])
self.assertEqual(
stds.van_rossum_dist([self.st01, self.st05], self.tau3)[0, 1],
stds.van_rossum_dist([self.st01, self.st05], self.tau7)[0, 1])
# Tests on algorithmic behaviour for equal spike times
f_minus_g_squared = (
(self.t > self.st31[0]) * np.exp(
-((self.t-self.st31[0])/self.tau3).simplified) -
(self.t > self.st34[0]) * np.exp(
-((self.t-self.st34[0])/self.tau3).simplified) -
(self.t > self.st34[1]) * np.exp(
-((self.t-self.st34[1])/self.tau3).simplified))**2
distance = np.sqrt(2.0 * spint.cumtrapz(
y=f_minus_g_squared, x=self.t.magnitude)[-1] /
self.tau3.rescale(self.t.units).magnitude)
self.assertAlmostEqual(stds.van_rossum_dist([self.st31, self.st34],
self.tau3)[0, 1],
distance, 5)
self.assertEqual(stds.van_rossum_dist([self.st31, self.st34],
self.tau3)[0, 1],
stds.van_rossum_dist([self.st32, self.st33],
self.tau3)[0, 1])
self.assertEqual(stds.van_rossum_dist([self.st31, self.st33],
self.tau3)[0, 1] * 2.0,
stds.van_rossum_dist([self.st32, self.st34],
self.tau3)[0, 1])
# Tests on spike train list lengthes smaller than 2
self.assertEqual(stds.van_rossum_dist([self.st21], self.tau3)[0, 0], 0)
self.assertEqual(len(stds.van_rossum_dist([], self.tau3)), 0)
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
| -2,688,211,261,997,703,700
| 55.371154
| 79
| 0.495821
| false
| 3.430026
| true
| false
| false
|
icsnju/nap-core
|
nap_rest/orchestration/test/createFromTable.py
|
1
|
1102
|
from orchestration.nap_api.project_create import create_project_from_table
table = []
t = {}
t['name'] = 'master'
t['cpu_shares'] = '1024'
t['mem_limit'] = '32m'
t['command'] = '/usr/sbin/sshd -D'
t['image'] = 'docker.iwanna.xyz:5000/hmonkey/mpi:v1'
t['volumes'] = [{'container_path': '/data', 'host_path': '/va', 'mode': 'rw'}, {'container_path': '/datass', 'host_path': '/vagr', 'mode': 'ro'}]
t['ports'] = [{'container_port': '3200', 'host_port': '32400', 'protocol': 'tcp'}, {'container_port': '3300', 'host_port': '32401', 'protocol': 'udp'}]
table.append(t)
t = {}
t['name'] = 'slave'
t['cpu_shares'] = '1024'
t['mem_limit'] = '32m'
t['command'] = '/usr/sbin/sshd -D'
t['image'] = 'docker.iwanna.xyz:5000/hmonkey/mpi:v1'
t['volumes'] = [{'container_path': '/data', 'host_path': '/va', 'mode': 'rw'}, {'container_path': '/datass', 'host_path': '/vagr', 'mode': 'ro'}]
t['ports'] = [{'container_port': '3200', 'host_port': '32400', 'protocol': 'tcp'}, {'container_port': '3300', 'host_port': '32401', 'protocol': 'udp'}]
table.append(t)
print create_project_from_table('bana', 'tabless', table)
|
apache-2.0
| -3,719,459,185,290,209,000
| 44.916667
| 151
| 0.596189
| false
| 2.562791
| false
| true
| false
|
simphony/simphony-common
|
simphony/io/tests/test_h5_cuds.py
|
1
|
9192
|
import unittest
import os
from contextlib import closing
import shutil
import tempfile
import tables
from simphony.core import CUBA
from simphony.core.data_container import DataContainer
from simphony.io.h5_cuds import H5CUDS
from simphony.io.h5_mesh import H5Mesh
from simphony.io.h5_particles import H5Particles
from simphony.io.h5_lattice import H5Lattice
from simphony.cuds import Mesh, Particles
from simphony.cuds.mesh_items import Edge, Face, Cell, Point
from simphony.cuds.lattice import make_cubic_lattice
from simphony.testing.abc_check_engine import (
ParticlesEngineCheck, MeshEngineCheck,
LatticeEngineCheck)
class TestH5CUDS(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_open_with_append_mode(self):
filename = os.path.join(self.temp_dir, 'test.cuds')
with closing(H5CUDS.open(filename, 'a')) as handle:
self.assertTrue(handle.valid())
def test_open_with_write_mode(self):
filename = os.path.join(self.temp_dir, 'test.cuds')
with closing(H5CUDS.open(filename, 'w')) as handle:
self.assertTrue(handle.valid())
def test_open_with_read_only_mode(self):
filename = os.path.join(self.temp_dir, 'test.cuds')
with closing(H5CUDS.open(filename, 'w')) as handle:
self.assertTrue(handle.valid())
with closing(H5CUDS.open(filename, 'r')) as handle:
self.assertTrue(handle.valid())
def test_open_with_compression_off(self):
filters = tables.Filters(complevel=0)
filename = os.path.join(self.temp_dir, 'test.cuds')
with closing(H5CUDS.open(filename, 'w', filters=filters)) as handle:
self.assertTrue(handle.valid())
with closing(H5CUDS.open(filename, 'r', filters=filters)) as handle:
self.assertTrue(handle.valid())
with closing(H5CUDS.open(filename, 'a', filters=filters)) as handle:
self.assertTrue(handle.valid())
def test_init_with_non_file(self):
with self.assertRaises(Exception):
H5CUDS(None)
def test_valid(self):
filename = os.path.join(self.temp_dir, 'test.cuds')
with closing(H5CUDS.open(filename, 'w')) as handle:
self.assertTrue(handle.valid())
self.assertFalse(handle.valid())
with closing(H5CUDS.open(filename, 'a')) as handle:
self.assertTrue(handle.valid())
self.assertFalse(handle.valid())
def test_closed_file_not_usable(self):
filename = os.path.join(self.temp_dir, 'test.cuds')
with closing(H5CUDS.open(filename)) as handle:
handle.add_dataset(Mesh(name="test_1"))
handle.add_dataset(Particles(name="test_2"))
lattice = make_cubic_lattice("test_3", 1.0, (2, 3, 4))
handle.add_dataset(lattice)
test_h1 = handle.get_dataset("test_1")
test_h2 = handle.get_dataset("test_2")
test_h3 = handle.get_dataset("test_3")
with self.assertRaises(Exception):
handle.get_dataset('test_h1')
with self.assertRaises(Exception):
test_h1.name = 'foo'
with self.assertRaises(Exception):
test_h2.name = 'foo'
with self.assertRaises(Exception):
test_h3.name = 'foo'
class TestH5CUDSVersions(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.existing_filename = os.path.join(self.temp_dir, 'test.cuds')
handle = H5CUDS.open(self.existing_filename)
handle.close()
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_version(self):
with closing(tables.open_file(
self.existing_filename, mode="r")) as h5file:
self.assertTrue(isinstance(h5file.root._v_attrs.cuds_version, int))
def test_incorrect_version(self):
with closing(tables.open_file(
self.existing_filename, mode="a")) as h5file:
h5file.root._v_attrs.cuds_version = -1
with self.assertRaises(ValueError):
H5CUDS.open(self.existing_filename)
class TestParticlesCudsOperations(ParticlesEngineCheck, unittest.TestCase):
def setUp(self):
ParticlesEngineCheck.setUp(self)
self.temp_dir = tempfile.mkdtemp()
self.engines = []
def engine_factory(self):
filename = os.path.join(self.temp_dir, 'test.cuds')
engine = H5CUDS.open(filename)
self.engines.append(engine)
return engine
def check_instance_of_dataset(self, ds):
""" Check if a dataset is instance of a class
"""
self.assertTrue(isinstance(ds, H5Particles))
def test_add_get_dataset_with_cuba_keys_argument(self):
engine = self.engine_factory()
items = self.create_dataset_items()
reference = self.create_dataset(name='test')
expected = self.create_dataset(name='test')
# Add some CUBA data
for particle in items:
particle.data = DataContainer({CUBA.VELOCITY: [1, 0, 0]})
expected.add([particle])
particle.data = DataContainer(
{CUBA.VELOCITY: [1, 0, 0], CUBA.MASS: 1})
reference.add([particle])
# Store reference dataset along with its data
engine.add_dataset(reference, {CUBA.PARTICLE: [CUBA.VELOCITY]})
# Closing and reopening the file
engine.close()
engine = self.engine_factory()
ds = engine.get_dataset('test')
self.compare_dataset(ds, expected)
def tearDown(self):
for engine in self.engines:
engine.close()
class TestMeshCudsOperations(MeshEngineCheck, unittest.TestCase):
def setUp(self):
MeshEngineCheck.setUp(self)
self.temp_dir = tempfile.mkdtemp()
self.engines = []
def engine_factory(self):
filename = os.path.join(self.temp_dir, 'test.cuds')
engine = H5CUDS.open(filename)
self.engines.append(engine)
return engine
def check_instance_of_dataset(self, ds):
""" Check if a dataset is instance of a class
"""
self.assertTrue(isinstance(ds, H5Mesh))
def test_add_get_dataset_with_cuba_keys_argument(self):
engine = self.engine_factory()
items = self.create_dataset_items()
reference = self.create_dataset(name='test')
expected = self.create_dataset(name='test')
# Add some CUBA data
for point in [p for p in items if isinstance(p, Point)]:
point.data = DataContainer({CUBA.VELOCITY: [1, 0, 0]})
expected.add([point])
point.data = DataContainer(
{CUBA.VELOCITY: [1, 0, 0], CUBA.MASS: 1})
reference.add([point])
for edge in [e for e in items if isinstance(e, Edge)]:
expected.add([edge])
reference.add([edge])
for face in [f for f in items if isinstance(f, Face)]:
expected.add([face])
reference.add([face])
for cell in [c for c in items if isinstance(c, Cell)]:
expected.add([cell])
reference.add([cell])
# Store reference dataset along with its data
engine.add_dataset(reference, {CUBA.POINT: [CUBA.VELOCITY]})
# Closing and reopening the file
engine.close()
engine = self.engine_factory()
ds = engine.get_dataset('test')
self.compare_dataset(ds, expected)
def tearDown(self):
for engine in self.engines:
engine.close()
class TestLatticeCudsOperations(LatticeEngineCheck, unittest.TestCase):
def setUp(self):
LatticeEngineCheck.setUp(self)
self.temp_dir = tempfile.mkdtemp()
self.engines = []
def engine_factory(self):
filename = os.path.join(self.temp_dir, 'test.cuds')
engine = H5CUDS.open(filename)
self.engines.append(engine)
return engine
def check_instance_of_dataset(self, ds):
""" Check if a dataset is instance of a class
"""
self.assertTrue(isinstance(ds, H5Lattice))
def test_add_get_dataset_with_cuba_keys_argument(self):
engine = self.engine_factory()
reference = self.create_dataset(name='test')
expected = self.create_dataset(name='test')
# Add some CUBA data
for node in reference.iter(item_type=CUBA.NODE):
node.data = DataContainer({CUBA.NAME: 'test_container'})
expected.update([node])
node.data = DataContainer({CUBA.NAME: 'test_container',
CUBA.DENSITY: 2})
reference.update([node])
# Store reference dataset along with its data
engine.add_dataset(reference, {CUBA.NODE: [CUBA.NAME]})
# Closing and reopening the file
engine.close()
engine = self.engine_factory()
ds = engine.get_dataset('test')
self.compare_dataset(ds, expected)
def tearDown(self):
for engine in self.engines:
engine.close()
if __name__ == '__main__':
unittest.main()
|
bsd-2-clause
| 6,869,646,592,674,389,000
| 32.794118
| 79
| 0.619017
| false
| 3.633202
| true
| false
| false
|
lindenb/bedtools2
|
docs/conf.py
|
2
|
8228
|
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
sys.path.append(os.path.abspath('sphinxext'))
sys.path.append(os.path.abspath('pyplots'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.coverage', 'sphinx.ext.pngmath',
'sphinx.ext.ifconfig', 'sphinx.ext.viewcode',
'matplotlib.sphinxext.plot_directive']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bedtools'
copyright = u'2009 - 2017, Aaron R. Quinlan and Neil Kindlon'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.27.0'
# The full version, including alpha/beta/rc tags.
release = '2.27.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'rtd'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = project + " v" + release
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'bedtools.swiss.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'bedtools.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_style = 'labibi.css'
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['sidebar-intro.html', 'sourcelink.html', 'searchbox.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bedtools-docs'
# Google analytics
#googleanalytics_id = "UA-24167610-15"
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'bedtools.tex', u'Bedtools Documentation',
u'Quinlan lab @ Univ. of Utah', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bedtools', u'Bedtools Documentation', [u'UU'], 1)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'bedtools': ('http://bedtools.readthedocs.org/en/latest/', None)}
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
return type(name, (), {})
else:
return Mock()
MOCK_MODULES = ['numpy', 'matplotlib', 'matplotlib.pyplot',
'matplotlib.sphinxext', 'matplotlib.sphinxext.plot_directive']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
|
gpl-2.0
| 4,217,841,412,627,542,500
| 32.311741
| 88
| 0.69227
| false
| 3.689686
| false
| false
| false
|
jbudynk/sherpa
|
sherpa/logposterior.py
|
1
|
3090
|
#
# Copyright (C) 2009 Smithsonian Astrophysical Observatory
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from sherpa.stats import Likelihood, truncation_value, Stat
from sherpa.models import Parameter
from sherpa.utils import NoNewAttributesAfterInit
from sherpa.utils.err import StatErr
from itertools import izip
import numpy
class Prior(Likelihood):
# Provide a Model-like parameter interface
def __getattr__(self, name):
par = self.__dict__.get(name.lower())
if (par is not None) and isinstance(par, Parameter):
return par
raise AttributeError("'%s' object has no attribute '%s'" %
(type(self).__name__, name))
def __setattr__(self, name, val):
par = getattr(self, name.lower(), None)
if (par is not None) and isinstance(par, Parameter):
par.val = val
else:
NoNewAttributesAfterInit.__setattr__(self, name, val)
def __init__(self, statfunc=None, hyperpars={}, pars={}, name='prior'):
# Posterior hyper-parameters
self.hyperpars = []
for key in hyperpars.keys():
val = hyperpars[key]
param = Parameter(name, key, val, alwaysfrozen=True)
self.__dict__[key] = param
self.hyperpars.append(param)
# References to parameters in source model
self.pars = []
for key in pars.keys():
self.__dict__[key] = pars[key]
self.pars.append(pars[key])
self._statfuncset = False
self.statfunc = (lambda x: None)
if statfunc is not None:
self.statfunc = statfunc
self._statfuncset = True
Likelihood.__init__(self, name)
def __str__(self):
s = self.name
hfmt = '\n %-15s %-6s %12s'
s += hfmt % ('Param', 'Type', 'Value')
s += hfmt % ('-'*5, '-'*4, '-'*5)
for p in self.hyperpars:
s += ('\n %-15s %-6s %12g' %
(p.fullname,'frozen', p.val))
return s
def set_statfunc(self, func):
self.statfunc = func
self._statfuncset = True
def calc_stat(self, data, model, staterror=None, syserror=None,
weight=None):
if not self._statfuncset:
raise StatErr('nostat', self.name, 'calc_stat()')
return self.statfunc(self, data, model, staterror, syserror, weight)
|
gpl-3.0
| 2,770,907,433,389,379,000
| 31.87234
| 76
| 0.609385
| false
| 3.867334
| false
| false
| false
|
jimstorch/python-read-filepro
|
read_filepro/fpdatabase.py
|
1
|
4921
|
#------------------------------------------------------------------------------
# read_filepro/database.py
# Copyright 2010 Jim Storch
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain a
# copy of the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#------------------------------------------------------------------------------
"""
Open and interpret a Filepro database directory.
Not intended for use on files under control of an active Filepro session.
In other words; halt Filepro, copy the data files, use on the copies.
"""
from read_filepro.fpmap import FPMap
from read_filepro.fpkey import FPKey
from read_filepro.fpdata import FPData
class FPDatabase(object):
def __init__(self, folder):
self.fpmap = FPMap(folder)
self.fpkey = FPKey(folder, self.fpmap)
self.fpdata = FPData(folder, self.fpmap, self.fpkey)
def is_deleted(self, index):
"""
Given a record number, returns True if that record is marked as
deleted.
"""
return self.fpkey.deletes[index]
def get_total_record_count(self):
"""
Return the total number of records, including deleted ones.
"""
return self.fpkey.total_records
def get_active_record_count(self):
"""
Return the number of active records; i.e. total - deleted.
"""
return self.fpkey.active_records
def get_deleted_record_count(self):
"""
Return the number of deleted records.
"""
return self.fpkey.deleted_records
def get_field_count(self):
"""
Return the number of fields/columns in the database.
Omits dummy/placeholder fields with zero length.
"""
return len(self.get_field_names())
def get_field_names(self):
"""
Return the name of all fields/columns in the database.
Merges key file and data file field names.
Omits dummy/placeholder fields with zero length.
"""
key_fields = [ d[0] for d in self.fpkey.fields ]
data_fields = [ d[0] for d in self.fpdata.fields ]
return key_fields + data_fields
def get_all_records(self):
"""
Return a list of all records, including deleted.
"""
records = []
for x in range(self.fpkey.total_records):
row = self.fpkey.records[x] + self.fpdata.records[x]
records.append(row)
return records
def get_active_records(self):
"""
Return a list of active records, omitting deleted ones.
"""
records = []
for x in range(self.fpkey.total_records):
if not self.is_deleted(x):
row = self.fpkey.records[x] + self.fpdata.records[x]
records.append(row)
return records
def get_deleted_records(self):
"""
Return a list of deleted records, omitting active ones.
"""
records = []
for x in range(self.fpkey.total_records):
if self.is_deleted(x):
row = self.fpkey.records[x] + self.fpdata.records[x]
records.append(row)
return records
def get_record(self, index):
"""
Given an integer value, returns the corresponding record merges from
the key and data files.
"""
return self.fpkey.records[index] + self.fpdata.records[index]
def get_record_dict(self, index):
"""
Given an integer value, returns a dictionary of field names mapped
to record values.
"""
fields = self.get_field_names()
columns = self.get_record(index)
combo = zip(fields, columns)
return dict(combo)
def get_field_types(self):
"""
Scans all values in each database column to see if they are numeric
or string values.
Returns a table containing 'number' or 'string' for each column.
The purpose of this is determining whether to quote when
exporting to CSV files.
"""
column_types = []
for i in range(self.get_field_count()):
this_type = 'number'
for record in self.get_all_records():
entry = record[i]
if entry:
try:
foo = float(entry)
except ValueError:
this_type = 'string'
column_types.append(this_type)
return column_types
|
apache-2.0
| 5,153,057,967,458,974,000
| 32.705479
| 79
| 0.58037
| false
| 4.253241
| false
| false
| false
|
sunu/oppia-test-2
|
apps/image/tests.py
|
1
|
1768
|
# coding: utf-8
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Jeremy Emerson'
from oppia.apps.image.models import Image
from django.utils import unittest
from django.core.exceptions import ValidationError
from django.core.files import File
class ImageUnitTests(unittest.TestCase):
"""Test image models."""
def test_image_class(self):
"""Test the Image class."""
# An Image must have the 'raw' property set.
image = Image(id='The hash id')
with self.assertRaises(ValidationError):
image.put()
# TODO: image format validation.
# The 'raw' property must be a valid image.
# with self.assertRaises(AssertionError):
# image.raw = 'The image'
# Set the 'raw' property to be a valid image, then do a put().
with open("oppia/tests/data/img.png") as f:
image.raw = File(f)
image_file_content = image.raw.read()
image.put()
# Retrieve the image.
retrieved_image = Image.objects.get(id='The hash id')
# Read its content
retrieved_content = retrieved_image.raw.read()
self.assertEqual(retrieved_content, image_file_content)
|
apache-2.0
| 6,075,939,482,199,800,000
| 33.666667
| 74
| 0.671946
| false
| 4.027335
| true
| false
| false
|
JohnReid/biopsy
|
Python/gapped_pssms/score_pssms.py
|
1
|
5645
|
#
# Copyright John Reid 2008
#
"""
Code to rank PSSMs by "interesting-ness".
Information content.
Low-order predictability.
Number of sequences with sites.
"""
from gapped_pssms.parse_gapped_format import parse_models
from itertools import imap
from gapped_pssms.pssm_score import *
from cookbook import DictOfLists
import glob, logging, shutil, os, re
import hmm.pssm.logo as logo
def calculate_emissions(model):
emissions = numpy.zeros((model.N, model.M))
for i in xrange(model.N):
assert model.emissions[i][0] == i
emissions[i] = model.emissions[i][1]
return emissions
def calculate_gap_probs(model):
gap_probs = numpy.ones((model.N))
for f, t, p in model.transitions:
gap_probs[t] = p
return gap_probs
class Log(object):
"""
Parses log files.
"""
log_file_name_re = re.compile('(.*).log')
pssm_num_re = re.compile('PSSM ([0-9]+)')
sites_re = re.compile('[Ff]ound ([0-9]+) sites. ([0-9]+)/([0-9]+) sequences have at least one site')
def __init__(self, log_file):
"""
Parses log files:
************** PSSM 4 **************
Seed ctgctgtg with gap at 3 had 79 hits in 72/601 sequences
Seed score: 2528.810289
Found 238 sites. 145/601 sequences have at least one site
Entropy/base : 0.923442
Information content : 10.238500
"""
logging.info('Parsing log file %s', log_file)
self.log_file = log_file
self.site_numbers = dict()
re_match = Log.log_file_name_re.match(os.path.basename(log_file))
self.tag = re_match.group(1)
logging.info('%s: %s', self.log_file, self.tag)
for line in open(log_file):
m = Log.pssm_num_re.search(line)
if m:
pssm_num = int(m.group(1))
# logging.info('PSSM: %d', pssm_num)
m = Log.sites_re.search(line)
if m and -1 == line.find('Trained model'):
num_sites = int(m.group(1))
num_seqs_with_site = int(m.group(2))
num_seqs = int(m.group(3))
# logging.info('# sites: %d; # seqs with sites: %d; # seqs: %d', num_sites, num_seqs_with_site, num_seqs)
self.site_numbers[pssm_num] = (num_sites, num_seqs_with_site, num_seqs)
class Pssm(object):
pssm_file_name_re = re.compile('(.*)-([0-9]+).pssm')
def __init__(self, pssm_file, log):
self.pssm_file = pssm_file
self.png_file = pssm_file.replace('.pssm', '.png')
self.eps_file = pssm_file.replace('.pssm', '.eps')
re_match = Pssm.pssm_file_name_re.match(os.path.basename(pssm_file))
self.tag = re_match.group(1)
self.pssm_idx = int(re_match.group(2))
self.num_sites, self.num_seqs_with_site, self.num_seqs = log.site_numbers[self.pssm_idx]
# logging.info('%s: %s %d %d', self.pssm_file, self.fragment, self.cross_fold, self.pssm_idx)
self.model = parse_models(open(self.pssm_file)).next()
self.emissions = calculate_emissions(self.model)
self.gap_probs = calculate_gap_probs(self.model)
self.first_order_entropy_score = calculate_first_order_entropy_score(self.emissions)
self.information_content_score = calculate_information_content_score(self.emissions)
self.num_seqs_with_site_score = float(self.num_seqs_with_site) / float(self.num_seqs)
self.overall_score = weighted_geometric_mean(
(self.first_order_entropy_score, self.information_content_score, self.num_seqs_with_site_score),
[1.5 , 1. , 1.]
)
logging.info(
'%s; %8g; %8g; %8g; %8g',
self.pssm_file,
self.first_order_entropy_score,
self.information_content_score,
self.num_seqs_with_site_score,
self.overall_score
)
def write_image(self):
image = logo.pssm_as_image(
self.emissions,
transparencies=self.gap_probs
)
image.save(self.png_file, "PNG")
image.save(self.eps_file, "EPS")
def montage(input_files, output_file):
montage_cmd = 'montage -tile 1x -geometry x240 %s %s' % (' '.join(input_files), output_file)
os.system(montage_cmd)
class PssmSet(object):
def __init__(self, basename):
self.basename = basename
self.tag = os.path.basename(basename)
self.log = Log('%s.log' % self.basename)
self.pssms = dict(
(num, Pssm('%s-%03d.pssm' % (self.basename, num), self.log))
for num in self.log.site_numbers.keys()
)
def sorted_by_score(self):
"""
Returns a list of pssms sorted by score.
"""
sorted_pssms = self.pssms.values()
sorted_pssms.sort(key=lambda p: p.overall_score, reverse=True)
logging.info(' '.join(imap(str, (p.pssm_idx for p in sorted_pssms))))
return sorted_pssms
def montage_by_score(self):
sorted_pssms = self.sorted_by_score()
ranked_files = [p.png_file for p in sorted_pssms]
ranked_file = '%s-ranked.png' % self.basename
montage(ranked_files, ranked_file)
if '__main__' == __name__:
logging.basicConfig(level=logging.DEBUG)
import sys
root_dir = sys.argv[1]
tag_re = re.compile('(T.*).log')
tags = map(lambda m: m.group(1), filter(None, imap(tag_re.search, glob.glob(os.path.join(root_dir, 'T*.log')))))
# tags = ['T00140-3']
for tag in tags:
logging.info(tag)
pssm_set = PssmSet(os.path.join(root_dir, tag))
pssm_set.montage_by_score()
|
mit
| -1,598,437,059,165,579,800
| 33.631902
| 121
| 0.586005
| false
| 3.077972
| false
| false
| false
|
lutianming/leetcode
|
reverse_nodes_in_k_group.py
|
1
|
1097
|
from leetcode import ListNode
class Solution:
# @param head, a ListNode
# @param k, an integer
# @return a ListNode
def reverseKGroup(self, head, k):
if not head or not head.next or k==1:
return head
newhead = None
head = head
tail = head
prev = None
while True:
count = 1
while tail and tail.next and count < k:
count += 1
tail = tail.next
if count != k:
break
node = head
next = node.next
for i in range(k-1):
tmp = next.next
next.next = node
node = next
next = tmp
if not prev:
newhead = tail
else:
prev.next = tail
prev = head
head.next = tmp
head = tmp
tail = head
if not newhead:
newhead = head
return newhead
a = ListNode.from_list([1,2,3,4,5,6])
s = Solution()
print(s.reverseKGroup(a, 2))
|
mit
| -468,447,601,640,617,300
| 22.340426
| 51
| 0.44485
| false
| 4.318898
| false
| false
| false
|
ChampionZP/DeepLearningImplementations
|
Colorful/src/utils/general_utils.py
|
1
|
4257
|
import os
import numpy as np
from skimage import color
import matplotlib.pylab as plt
def remove_files(files):
"""
Remove files from disk
args: files (str or list) remove all files in 'files'
"""
if isinstance(files, (list, tuple)):
for f in files:
if os.path.isfile(os.path.expanduser(f)):
os.remove(f)
elif isinstance(files, str):
if os.path.isfile(os.path.expanduser(files)):
os.remove(files)
def create_dir(dirs):
"""
Create directory
args: dirs (str or list) create all dirs in 'dirs'
"""
if isinstance(dirs, (list, tuple)):
for d in dirs:
if not os.path.exists(os.path.expanduser(d)):
os.makedirs(d)
elif isinstance(dirs, str):
if not os.path.exists(os.path.expanduser(dirs)):
os.makedirs(dirs)
def setup_logging(model_name):
model_dir = "../../models"
# Output path where we store experiment log and weights
model_dir = os.path.join(model_dir, model_name)
fig_dir = "../../figures"
# Create if it does not exist
create_dir([model_dir, fig_dir])
def plot_batch(color_model, q_ab, X_batch_black, X_batch_color, batch_size, h, w, nb_q, epoch):
# Format X_colorized
X_colorized = color_model.predict(X_batch_black / 100.)[:, :, :, :-1]
X_colorized = X_colorized.reshape((batch_size * h * w, nb_q))
X_colorized = q_ab[np.argmax(X_colorized, 1)]
X_a = X_colorized[:, 0].reshape((batch_size, 1, h, w))
X_b = X_colorized[:, 1].reshape((batch_size, 1, h, w))
X_colorized = np.concatenate((X_batch_black, X_a, X_b), axis=1).transpose(0, 2, 3, 1)
X_colorized = [np.expand_dims(color.lab2rgb(im), 0) for im in X_colorized]
X_colorized = np.concatenate(X_colorized, 0).transpose(0, 3, 1, 2)
X_batch_color = [np.expand_dims(color.lab2rgb(im.transpose(1, 2, 0)), 0) for im in X_batch_color]
X_batch_color = np.concatenate(X_batch_color, 0).transpose(0, 3, 1, 2)
list_img = []
for i, img in enumerate(X_colorized[:min(32, batch_size)]):
arr = np.concatenate([X_batch_color[i], np.repeat(X_batch_black[i] / 100., 3, axis=0), img], axis=2)
list_img.append(arr)
plt.figure(figsize=(20,20))
list_img = [np.concatenate(list_img[4 * i: 4 * (i + 1)], axis=2) for i in range(len(list_img) / 4)]
arr = np.concatenate(list_img, axis=1)
plt.imshow(arr.transpose(1,2,0))
ax = plt.gca()
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
plt.tight_layout()
plt.savefig("../../figures/fig_epoch%s.png" % epoch)
plt.clf()
plt.close()
def plot_batch_eval(color_model, q_ab, X_batch_black, X_batch_color, batch_size, h, w, nb_q, T):
# Format X_colorized
X_colorized = color_model.predict(X_batch_black / 100.)[:, :, :, :-1]
X_colorized = X_colorized.reshape((batch_size * h * w, nb_q))
# Reweight probas
X_colorized = np.exp(np.log(X_colorized) / T)
X_colorized = X_colorized / np.sum(X_colorized, 1)[:, np.newaxis]
# Reweighted
q_a = q_ab[:, 0].reshape((1, 313))
q_b = q_ab[:, 1].reshape((1, 313))
X_a = np.sum(X_colorized * q_a, 1).reshape((batch_size, 1, h, w))
X_b = np.sum(X_colorized * q_b, 1).reshape((batch_size, 1, h, w))
X_colorized = np.concatenate((X_batch_black, X_a, X_b), axis=1).transpose(0, 2, 3, 1)
X_colorized = [np.expand_dims(color.lab2rgb(im), 0) for im in X_colorized]
X_colorized = np.concatenate(X_colorized, 0).transpose(0, 3, 1, 2)
X_batch_color = [np.expand_dims(color.lab2rgb(im.transpose(1, 2, 0)), 0) for im in X_batch_color]
X_batch_color = np.concatenate(X_batch_color, 0).transpose(0, 3, 1, 2)
list_img = []
for i, img in enumerate(X_colorized[:min(32, batch_size)]):
arr = np.concatenate([X_batch_color[i], np.repeat(X_batch_black[i] / 100., 3, axis=0), img], axis=2)
list_img.append(arr)
plt.figure(figsize=(20,20))
list_img = [np.concatenate(list_img[4 * i: 4 * (i + 1)], axis=2) for i in range(len(list_img) / 4)]
arr = np.concatenate(list_img, axis=1)
plt.imshow(arr.transpose(1,2,0))
ax = plt.gca()
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
plt.tight_layout()
plt.show()
|
mit
| -6,095,970,284,944,061,000
| 34.181818
| 108
| 0.603477
| false
| 2.830452
| false
| false
| false
|
visionegg/visionegg
|
VisionEgg/win32_maxpriority.py
|
1
|
2142
|
# This file was created automatically by SWIG.
# Don't modify this file, modify the SWIG interface instead.
# This file is compatible with both classic and new-style classes.
import _win32_maxpriority
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "this"):
if isinstance(value, class_type):
self.__dict__[name] = value.this
if hasattr(value,"thisown"): self.__dict__["thisown"] = value.thisown
del value.thisown
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name) or (name == "thisown"):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
IDLE_PRIORITY_CLASS = _win32_maxpriority.IDLE_PRIORITY_CLASS
NORMAL_PRIORITY_CLASS = _win32_maxpriority.NORMAL_PRIORITY_CLASS
HIGH_PRIORITY_CLASS = _win32_maxpriority.HIGH_PRIORITY_CLASS
REALTIME_PRIORITY_CLASS = _win32_maxpriority.REALTIME_PRIORITY_CLASS
THREAD_PRIORITY_IDLE = _win32_maxpriority.THREAD_PRIORITY_IDLE
THREAD_PRIORITY_LOWEST = _win32_maxpriority.THREAD_PRIORITY_LOWEST
THREAD_PRIORITY_BELOW_NORMAL = _win32_maxpriority.THREAD_PRIORITY_BELOW_NORMAL
THREAD_PRIORITY_NORMAL = _win32_maxpriority.THREAD_PRIORITY_NORMAL
THREAD_PRIORITY_ABOVE_NORMAL = _win32_maxpriority.THREAD_PRIORITY_ABOVE_NORMAL
THREAD_PRIORITY_HIGHEST = _win32_maxpriority.THREAD_PRIORITY_HIGHEST
THREAD_PRIORITY_TIME_CRITICAL = _win32_maxpriority.THREAD_PRIORITY_TIME_CRITICAL
set_self_process_priority_class = _win32_maxpriority.set_self_process_priority_class
set_self_thread_priority = _win32_maxpriority.set_self_thread_priority
|
lgpl-2.1
| 1,918,292,327,711,323,000
| 38.666667
| 84
| 0.737162
| false
| 3.449275
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.