text
stringlengths 8
6.05M
|
|---|
#!/usr/bin/python
import numpy as np
import pylab as py
from COMMON import nanosec,yr,week,grav,msun,light,mpc,hub0,h0,omm,omv,kpc,mchirpfun,fmaxlso
from scipy import integrate
from scipy.ndimage import gaussian_filter
import pyPdf,os
#Input parameters:
outputdir='../plots/Distances/'
outputfile1=outputdir+'K_binaries.pdf'
outputfile2=outputdir+'K_binaries_A.pdf'
outputfile3=outputdir+'K_binaries_B.pdf'
outputfile4=outputdir+'K_binaries_C.pdf'
maxreds=1e4 #Maximum redshift.
minreds=1e-4 #Minimum redshift.
zbin=1000 #Number of z-bins to construct K(z).
mchvec=np.array([1.,2.,3.,4.,5.])
#-----------------------------------------------------------------
#Calculate K(z) and D_L(z).
reds=np.logspace(np.log10(minreds),np.log10(maxreds),zbin)
dist_const=light/(hub0*h0)/mpc
kdistvec=np.zeros(len(reds)) #K(z).
lumdistvec=np.zeros(len(reds)) #D_L(z).
for zi in xrange(len(reds)):
lumdistvec[zi]=(1.+reds[zi])*integrate.quad(lambda z: (omm*(1.+z)**3.+omv)**(-0.5),0,reds[zi])[0]*dist_const
kdistvec[zi]=lumdistvec[zi]*(1.+reds[zi])**(-5./3.)
zpeak=reds[kdistvec.argmax()] #Redshift at which K(z) has a maximum.
print 'Maximum achieved at redshift %.4f' %zpeak
#Choose plotting options that look optimal for the paper.
fig_width = 3.4039
goldenmean=(np.sqrt(5.)-1.0)/2.0
fig_height = fig_width * goldenmean
sizepoints=8
legendsizepoints=8
py.rcParams.update({
'backend': 'ps',
'ps.usedistiller': 'xpdf',
'text.usetex': True,
'figure.figsize': [fig_width, fig_height],
'axes.titlesize': sizepoints,
'axes.labelsize': sizepoints,
'text.fontsize': sizepoints,
'xtick.labelsize': sizepoints,
'ytick.labelsize': sizepoints,
'legend.fontsize': legendsizepoints
})
######################################################################
#Define the borders of the plot.
left, right, top, bottom, cb_fraction=0.13, 0.96, 0.96, 0.16, 0.145
xmin,xmax=minreds,maxreds #Edges of the x-axis.
ymin,ymax=1,1e5 #Edges of the y-axis.
fig=py.figure()
fig.subplots_adjust(left=left,right=right,top=top,bottom=bottom)
ax=fig.gca()
ax.plot(reds,lumdistvec,color='black',linestyle='--',label='$\\mathrm{D_L(z)}$')
ax.plot(reds,kdistvec,color='black',linestyle='-',label='$\\mathrm{K(z)}$')
ax.legend(loc='upper left',handlelength=3.5)
ax.grid()
#ax.set_xlabel('$\\log_{10}(\\mathrm{GW\ Frequency\ /\ Hz})$')
#ax.set_ylabel('$\\log_{10}(\\mathrm{Redshift})$')
ax.set_ylabel('$\\mathrm{Distance}$')
ax.set_xlabel('$\\mathrm{Redshift}$')
#ax.set_xlabel('$\\mathrm{GW\ Frequency\ /\ Hz}$')
#ax.set_ylabel('$\\mathrm{D}_\\mathrm{L}/\ \\mathrm{Mpc}$')
#ax.set_xlabel('$\\mathrm{f\ [Hz]}$')
#ax.set_ylabel('$\\mathrm{D}_\\mathrm{H}\ [\\mathrm{Mpc}]$')
ax.set_xlim(xmin,xmax)
#ax.set_xticks([-8.5,-8.,-7.5,-7.,-6.5])
#ax.set_xticklabels(["$-8.5$","$-8$","$-7.5$","$-7$","$-6.5$"])
#ax.set_yticks([-14.5,-14.,-13.5,-13.])
#ax.set_yticklabels(["$-14.5$","$-14$","$-13.5$","$-13$"])
ax.set_ylim(ymin,ymax)
ax.set_xscale('log')
ax.set_yscale('log')
fig.savefig(outputfile1, transparent=True)
######################################################################
#Another plot.
havalue=2000
ha=havalue*np.ones(len(reds))
#Find zapp.
zapp=reds[abs(lumdistvec-ha).argmin()]
redslow=reds[reds<zpeak]
redsupp=reds[reds>zpeak]
kdistlow=kdistvec[reds<zpeak]
kdistupp=kdistvec[reds>zpeak]
halow=ha[reds<zpeak]
haupp=ha[reds>zpeak]
zabs1=redslow[abs(kdistlow-halow).argmin()]
zabs2=redsupp[abs(kdistupp-haupp).argmin()]
#Define the borders of the plot.
left, right, top, bottom, cb_fraction=0.07, 0.96, 0.96, 0.16, 0.145
xmin,xmax=5e-2,1e2 #Edges of the x-axis.
ymin,ymax=5e2,1e4 #Edges of the y-axis.
fig=py.figure()
fig.subplots_adjust(left=left,right=right,top=top,bottom=bottom)
ax=fig.gca()
ax.plot(reds,lumdistvec,color='black',linestyle='--',label='$\\mathrm{D_L(z)}$')
ax.plot(reds,kdistvec,color='black',linestyle='-',label='$\\mathrm{K(z)}$')
ax.plot(reds,ha,color='black',linestyle='-.',label='$\\mathcal{H}^A$')
factor=0.75
ax.text(zapp,ymin*factor,'$z_{\\mathrm{app}}$',fontsize=9,horizontalalignment='center')
ax.text(zabs1,ymin*factor,'$z_{\\mathrm{abs}}^1$',fontsize=9,horizontalalignment='center')
ax.text(zpeak,ymin*factor,'$z_{\\mathrm{peak}}$',fontsize=9,horizontalalignment='center')
ax.text(zabs2,ymin*factor,'$z_{\\mathrm{abs}}^2$',fontsize=9,horizontalalignment='center')
ax.vlines(zapp,ymin,havalue,color='black',linestyle=':')
ax.vlines(zabs1,ymin,havalue,color='black',linestyle=':')
ax.vlines(zpeak,ymin,max(kdistvec),color='black',linestyle=':')
ax.vlines(zabs2,ymin,havalue,color='black',linestyle=':')
ax.legend(loc='upper left',handlelength=3.5)
ax.set_ylabel('$\\mathrm{Distance}$')
ax.set_xlabel('$\\mathrm{Redshift}$',labelpad=15)
ax.set_xlim(xmin,xmax)
ax.set_ylim(ymin,ymax)
ax.set_xscale('log')
ax.set_yscale('log')
ax.tick_params(which='both', bottom='off', top='off', left='off', right='off', labelbottom='off', labelleft='off')
fig.savefig(outputfile2, transparent=True)
######################################################################
#Another plot.
havalue=max(kdistvec)
ha=havalue*np.ones(len(reds))
#Find zapp.
zapp=reds[abs(lumdistvec-ha).argmin()]
zabs=zpeak
#Define the borders of the plot.
left, right, top, bottom, cb_fraction=0.07, 0.96, 0.96, 0.16, 0.145
xmin,xmax=5e-2,1e2 #Edges of the x-axis.
ymin,ymax=5e2,1e4 #Edges of the y-axis.
fig=py.figure()
fig.subplots_adjust(left=left,right=right,top=top,bottom=bottom)
ax=fig.gca()
ax.plot(reds,lumdistvec,color='black',linestyle='--',label='$\\mathrm{D_L(z)}$')
ax.plot(reds,kdistvec,color='black',linestyle='-',label='$\\mathrm{K(z)}$')
ax.plot(reds,ha,color='black',linestyle='-.',label='$\\mathcal{H}^B$')
factor=0.75
ax.text(zapp,ymin*factor,'$z_{\\mathrm{app}}$',fontsize=9,horizontalalignment='center')
ax.text(zabs,ymin*factor,'$z_{\\mathrm{abs}}=z_{\\mathrm{peak}}$',fontsize=9,horizontalalignment='center')
ax.vlines(zapp,ymin,havalue,color='black',linestyle=':')
ax.vlines(zabs,ymin,havalue,color='black',linestyle=':')
ax.legend(loc='upper left',handlelength=3.5)
ax.set_ylabel('$\\mathrm{Distance}$')
ax.set_xlabel('$\\mathrm{Redshift}$',labelpad=15)
ax.set_xlim(xmin,xmax)
ax.set_ylim(ymin,ymax)
ax.set_xscale('log')
ax.set_yscale('log')
ax.tick_params(which='both', bottom='off', top='off', left='off', right='off', labelbottom='off', labelleft='off')
fig.savefig(outputfile3, transparent=True)
######################################################################
#Another plot.
havalue=max(kdistvec)*3
ha=havalue*np.ones(len(reds))
#Find zapp.
zapp=reds[abs(lumdistvec-ha).argmin()]
#Define the borders of the plot.
left, right, top, bottom, cb_fraction=0.07, 0.96, 0.96, 0.16, 0.145
xmin,xmax=5e-2,1e2 #Edges of the x-axis.
ymin,ymax=5e2,1e4 #Edges of the y-axis.
fig=py.figure()
fig.subplots_adjust(left=left,right=right,top=top,bottom=bottom)
ax=fig.gca()
ax.plot(reds,lumdistvec,color='black',linestyle='--',label='$\\mathrm{D_L(z)}$')
ax.plot(reds,kdistvec,color='black',linestyle='-',label='$\\mathrm{K(z)}$')
ax.plot(reds,ha,color='black',linestyle='-.',label='$\\mathcal{H}^C$')
factor=0.75
ax.text(zapp,ymin*factor,'$z_{\\mathrm{app}}$',fontsize=9,horizontalalignment='center')
ax.text(zpeak,ymin*factor,'$z_{\\mathrm{peak}}$',fontsize=9,horizontalalignment='center')
ax.vlines(zapp,ymin,havalue,color='black',linestyle=':')
ax.vlines(zpeak,ymin,max(kdistvec),color='black',linestyle=':')
ax.legend(loc='upper left',handlelength=3.5)
ax.set_ylabel('$\\mathrm{Distance}$')
ax.set_xlabel('$\\mathrm{Redshift}$',labelpad=15)
ax.set_xlim(xmin,xmax)
ax.set_ylim(ymin,ymax)
ax.set_xscale('log')
ax.set_yscale('log')
ax.tick_params(which='both', bottom='off', top='off', left='off', right='off', labelbottom='off', labelleft='off')
fig.savefig(outputfile4, transparent=True)
|
#Ejercicio 17
"""
Convertir un valor entero de horas a segundos.
"""
horas = int (input ("Ingrese el valor entero expresados en horas: "))
segundos = round ((horas * 3600 / 1), 2)
print ("El valor entero expresado en horas es: " , segundos, "segundos")
|
class Locators:
"""
Class containing locators from different pages that are being accessed.
"""
# FindHotel Locators
popup_xpath = './/div[@class="autopop__wrap makeFlex column defaultCursor"]'
login_menu_xpath = './/li[@class="makeFlex hrtlCenter font10 makeRelative lhUser userLoggedOut"]'
city_label_xpath = './/label[@for="city"]'
city_value_xpath = city_label_xpath + '/input'
# city_textbox_xpath = './/div[@class="react-autosuggest__container react-autosuggest__container--open"]/input'
city_textbox_xpath = city_label_xpath + '/following-sibling::div//child::input'
suggestion_li_xpath = './/li[@id="react-autowhatever-1-section-0-item-0"]/div/div/div/p[1]'
search_button_xpath = '.// button[ @ id = "hsw_search_button"]'
rooms_guests_xpath = './/div[@class="hsw_inputBox roomGuests "]'
guests_number_xpath = './/ul[@class="guestCounter font12 darkText"][1]/li'
children_number_xpath = './/ul[@class="guestCounter font12 darkText"][2]/li'
apply_button_xpath = './/button[@class="primaryBtn btnApply"]'
travel_for_xpath = './/div[@class="hsw_inputBox travelFor "]'
travel_reason_option_xpath = './/ul[@class="travelForPopup"]/li'
autosearch_options_div_xpath = './/ul[@class="react-autosuggest__suggestions-list"]/li'
autosearch_options_xpath = './/p[@class="locusLabel appendBottom5"]'
autosearch_noopt_xpath = './/div[@class="cantFindYouText appendBottom20 noSuggesstions"]'
guest_number_error_xpath = './/p[@class="redText font11"]'
child_age_error_xpath = './/p[@class="redText font11 appendBottom10"]'
selected_travel_reason = './/div[@class="font30 code latoBlack lineHeight36 blackText makeRelative"]/p'
date_month_body_xpath = './/div[@class="DayPicker-Month"][1]//div[@class="DayPicker-Body"]'
date_month_body_weeks_xpath = './/div[@class="DayPicker-Week"]'
today_xpath = './/div[@class="DayPicker-Day DayPicker-Day--today"]'
past_days_xpath = './/div[@class="DayPicker-Day DayPicker-Day--disabled"]'
selected_start_day_xpath = './/div[@class="DayPicker-Day DayPicker-Day--start DayPicker-Day--selected"]'
selected_end_day_xpath = './/div[@class="DayPicker-Day DayPicker-Day--end DayPicker-Day--selected"]'
future_days_xpath = './/div[@class="DayPicker-Day"]'
day_xpath = './/div[@role="gridcell"]'
label_checkin = './/label[@for="checkin"]'
checkout_header_xpath = './/span[@class="selectedDateField appendBottom8 pointer" and @data-cy="selectCheckOutDate"]'
child_age_select_xpath = './/select[@class="ageSelectBox"]'
prev_month_arrow_xpath = './/span[@class="DayPicker-NavButton DayPicker-NavButton--prev"]'
next_month_arrow_xpath = './/span[@class="DayPicker-NavButton DayPicker-NavButton--next"]'
current_month_xpath = './/div[@class="DayPicker-Caption"][1]//child::div'
future_month_xpath = './/div[@class="DayPicker-Caption"]//child::div'
search_result_end = './/p[@class="appendTop20 appendBottom20 font22 latoBlack blackText textCenter"]'
# constants used in method FindHotel::check_past_day_click()
elem_class_attrib = 'class'
elem_disable_attrib = 'aria-disabled'
elem_class_outside = 'outside'
elem_class_disable = 'disable'
elem_class_future_day = 'DayPicker-Day'
elem_class_today = 'today'
elem_class_day_start = 'Day--start'
elem_class_day_end = 'Day--end'
# Menu item XPath with name
mmt_menu = {
'My Trip': './/li[@class="makeFlex hrtlCenter lhMyTrips"]',
'24X7 Support': './/li[@class="makeFlex hrtlCenter lhSupport"]',
'Wallet': './/li[@class="makeFlex hrtlCenter lhMyWallet"]',
'My Biz': './/li[@class="makeFlex perfectCenter makeRelative myBizIntro"]',
'Login Account': './/li[@class="makeFlex hrtlCenter font10 makeRelative lhUser userLoggedOut"]',
'Country': './/li[@class="makeFlex column font10 makeRelative"][1]',
'Language': './/li[@class="makeFlex column font10 makeRelative whiteText"]',
'Currency': './/li[@class="makeFlex column font10 makeRelative"][2]'
}
# Locators for Result page
data_div_xpath = './/div[@class="infinite-scroll-component "]'
hotels_detail_xpath = '//div[@class="makeFlex flexOne padding20 relative lftCol"]'
# hotel_rate_details_xpath = './/div[@class="padding20 makeFlex column"]'
hotel_rate_details_xpath = './/div[@class="priceDetails textRight"]'
hotel_name_xpath = './/p[@itemprop="name"]/span[1]'
hotel_rate_xpath = './/p[@id="hlistpg_hotel_shown_price"]'
hotel_rating_score_xpath = './/span[@class="sprite mmtIcon"]//following-sibling::span'
hotel_rating_count_xpath = './/span[@itemprop="reviewCount"]'
# hotel_facilities_xpath = './/div[@class="appendTop10"]//child::div[@class="makeFlex persuasion "]//child::div'
hotel_facilities_xpath = './/ul[@class="amenList darkText"]'
# hotel_feature_xpath = './/div[@class="persuasion pc__inclusionsList"]'
hotel_feature_xpath = hotel_facilities_xpath + '//following-sibling::p/span[2]'
# result_count_xpath = './/div[@id="hlistpg_sortby_search"]//child::span[2]'
hotel_feature2_xpath = './/ul[@class="includes"]'
|
from face_recognition.api import face_encodings
import numpy
class Picture():
def __init__(self, name: str, encodings, face_locations) -> None:
self.name = name
self.encodings = encodings
self.face_locations = face_locations
|
import subprocess
import pyttsx3
import sys
import locale
import ghostscript
engine = pyttsx3.init()
rate = engine.getProperty('rate')
engine.setProperty('rate', 140)
args = [
"ps2pdf", #value doesn't matter
"-dNOPAUSE", "-dBatch", "-dSAFER",
"-sDEVICE=txtwrite",
"-sOutputFile=" + sys.argv[1],
"-c", ".setpdfwrite",
"-f", sys.argv[2]
]
# arguments have to be bytes, encode them
encoding = locale.getpreferredencoding()
args = [a.encode(encoding) for a in args]
ghostscript.Ghostscript(*args)
filename = "output.txt"
with open(filename) as f:
content = f.readlines()
content = [x.strip() for x in content]
for w in content:
engine.say("here is what I see. I see a")
engine.say(w)
engine.runAndWait()
engine.stop()
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from typing import TYPE_CHECKING
from pants.engine.internals.native_engine import EngineError as EngineError # noqa: F401
from pants.engine.internals.native_engine import ( # noqa: F401
IncorrectProductError as IncorrectProductError,
)
from pants.engine.internals.native_engine import IntrinsicError as IntrinsicError # noqa: F401
if TYPE_CHECKING:
from pants.engine.internals.native_engine import PyFailure
class PantsException(Exception):
"""Base exception type for Pants."""
class TargetDefinitionException(PantsException):
"""Indicates an invalid target definition.
:API: public
"""
def __init__(self, target, msg):
"""
:param target: the target in question
:param string msg: a description of the target misconfiguration
"""
super().__init__(f"Invalid target {target}: {msg}")
class BuildConfigurationError(PantsException):
"""Indicates an error in a pants installation's configuration."""
class BackendConfigurationError(BuildConfigurationError):
"""Indicates a plugin backend with a missing or malformed register module."""
class MappingError(PantsException):
"""Indicates an error mapping addressable objects."""
class RuleTypeError(PantsException):
"""Invalid @rule implementation."""
class NativeEngineFailure(Exception):
"""A wrapper around a `Failure` instance.
The failure instance being wrapped can come from an exception raised in a rule. When this
failure is returned to a requesting rule it is first unwrapped so the original exception will be
presented in the rule, thus the `NativeEngineFailure` exception will not be seen in rule code.
This is different from the other `EngineError` based exceptions which doesn't originate from
rule code.
TODO: This type is defined in Python because pyo3 doesn't support declaring Exceptions with
additional fields. See https://github.com/PyO3/pyo3/issues/295
"""
def __init__(self, msg: str, failure: PyFailure) -> None:
super().__init__(msg)
self.failure = failure
|
def removeKthLinkedListNode(head, k): # Space: O(1) Time: O(n) n = length of linked list
# Write your code here
# Checking for an edge case. If a node wasn't passed in to the first parameter
if head is None:
return None
# this is an artifact of my first pass solution - I forgot to delete it
output = ''
# this variable is used to keep track of the distance between the first pointer and the 2nd pointer.
distance = 0
p1 = head
p2 = head
num_of_nodes = 0
# Traverse through linked list
# We'll keep track of 2 pointers:
# pointer 1's goal is to find the end of the list
# pointer 2 will follow pointer 1 at k + 1 distance behind
# the reason we're following k + 1 distance, is because
# we need access to the node before the node that has to be removed,
# so it can easily be removed
while p1 is not None:
# Something I'd do different: I would only increment distance till I found the point
# the second pointer would start traversing
distance += 1
# Once p1 gets ahead k + 1, we start moving p2 in step with it
if distance > k + 1:
p2 = p2.next
p1 = p1.next
num_of_nodes += 1
# p2 will be the node before the node that has to be removed
if num_of_nodes > k:
# remove p2
p2.next = p2.next.next
# if the number of nodes is equal to k, then that means the head is what needs to be removed.
if num_of_nodes is k:
head = p2.next
# return the head. At this point, all necessary changes have been made.
return head
|
# BSD 3-Clause License.
#
# Copyright (c) 2019-2023 Robert A. Milton. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Type and constant definitions. """
from __future__ import annotations
from typing import *
from pathlib import Path
import numpy as np
import tensorflow as tf
import gpflow as gf
import romcomma.gpf as mf
import pandas as pd
from abc import abstractmethod
EFFECTIVELY_ZERO = 1.0E-64 #: Tolerance when testing floats for equality.
def INT() -> Type:
""" The ``dtype`` of ``int`` in :ref:`romcomma.run.context.Environment`. """
return gf.config.default_int()
def FLOAT() -> Type:
""" The ``dtype`` of ``float`` in :ref:`romcomma.run.context.Environment`. """
return gf.config.default_float()
# noinspection PyPep8Naming
class NP:
""" Extended numpy types."""
Array = np.ndarray
Tensor = np.ndarray # Generic Tensor.
Tensor1 = Tensor # Second Order Tensor, tf.shape = (i,j)
Tensor2 = Tensor # Second Order Tensor, tf.shape = (i,j)
Vector = Tensor2 # First Order Tensor, column vector, tf.shape = (j,1)
Covector = Tensor2 # First Order Tensor, row vector, tf.shape = (1,j)
Matrix = Tensor2 # Second Order Tensor, tf.shape = (i,j)
Tensor3 = Tensor # Third Order Tensor, tf.shape = (i,j,k).
Tensor4 = Tensor # Fourth Order Tensor, tf.shape = (i,j,k,l).
Tensor5 = Tensor
Tensor6 = Tensor
Tensor7 = Tensor
Tensor8 = Tensor
VectorLike = int | float | Sequence[int | float] | Array
MatrixLike = VectorLike | Sequence[VectorLike]
CovectorLike = MatrixLike
ArrayLike = TensorLike = MatrixLike | Sequence[MatrixLike] | Sequence[Sequence[MatrixLike]]
# noinspection PyPep8Naming
class TF:
""" Extended tensorflow types, and constants."""
Array = tf.Tensor
Tensor = tf.Tensor # Generic Tensor.
Tensor1 = Tensor # Second Order Tensor, tf.shape = (i,j)
Tensor2 = Tensor # Second Order Tensor, tf.shape = (i,j)
Vector = Tensor2 # First Order Tensor, column vector, tf.shape = (j,1)
Covector = Tensor2 # First Order Tensor, row vector, tf.shape = (1,j)
Matrix = Tensor2 # Second Order Tensor, tf.shape = (i,j)
Tensor3 = Tensor # Third Order Tensor, tf.shape = (i,j,k).
Tensor4 = Tensor # Fourth Order Tensor, tf.shape = (i,j,k,l).
Tensor5 = Tensor
Tensor6 = Tensor
Tensor7 = Tensor
Tensor8 = Tensor
VectorLike = int | float | Sequence[int | float] | Array
MatrixLike = Union[VectorLike, Sequence[VectorLike]]
CovectorLike = MatrixLike
ArrayLike = TensorLike = MatrixLike | Sequence[MatrixLike] | Sequence[Sequence[MatrixLike]]
Slice = PairOfInts = tf.Tensor #: A slice, for indexing and marginalization.
NaN: TF.Tensor = tf.constant(np.NaN, dtype=FLOAT()) #: A constant Tensor representing NaN.
|
from django.db import models
class ExpenseCode(models.Model):
"""
Represents a Budget of an order in the system
"""
number = models.CharField('ExpenseCode Number', max_length=100) #: Name
type = models.CharField('ExpenseCode Type', max_length=100) #: Type
project = models.ForeignKey('finance.Project', verbose_name='Finance project', on_delete=models.CASCADE)
@staticmethod
def autocomplete_search_fields():
return (
"number__icontains",
'type__icontains',
'project__name__icontains',
'project__code__icontains',
'project__costcenter__name__icontains'
)
class Meta:
verbose_name = "Expense code"
verbose_name_plural = "Expense codes"
unique_together = ("number", "project")
def __str__(self):
return self.project.costcenter.code + "-" + \
self.project.code + "-" + \
self.number + ": " + self.project.costcenter.name + "-" + \
self.project.name + "-" + self.type
def expensecode(self):
return str(self.project.costcenter.code + "-" + \
self.project.code + "-" + \
self.number + ": " + self.project.costcenter.name + "-" + \
self.project.name + "-" + self.type)
@property
def abbrv(self):
"""Returns a string with codes only, i.e. `CCCCCCC-PPP-EE`."""
cost_center = self.project.costcenter.code
project = self.project.code
expense_code = self.number
return f'{cost_center}-{project}-{expense_code}'
|
with open("input.txt") as f:
data = f.readlines()
# Get rid of the newlines
data = [n.strip() for n in data]
FIELD_WIDTH = len(data[0]) -1 # Zero index
FIELD_HEIGHT = len(data) - 1
char_x = 0
char_y = 0
trees_found = 0
tracked_list = [list(data[0])]
# Run until you reach the bottom
while char_y < FIELD_HEIGHT:
# Cater for the fact that the field repeats
if char_x + 3 > FIELD_WIDTH:
char_x = (char_x + 3) - FIELD_WIDTH - 1
else:
char_x += 3
char_y += 1
print("x is " + str(char_x) + " and y is " + str(char_y))
print("Found " + str(trees_found) + " trees so far")
if data[char_y][char_x] == "#":
trees_found += 1
# Record that you went here for debugging
this_row = list(data[char_y])
this_row[char_x] = "@"
tracked_list.append(this_row)
[print(n) for n in tracked_list]
print(trees_found)
|
# -*- coding: utf-8 -*-
import torch
import random
import pickle
import numpy as np
import torch.nn as nn
class DQNSolver(nn.Module):
def __init__(self, input_shape, n_actions):
super(DQNSolver, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(input_shape[0], 32, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.ReLU()
)
conv_out_size = self._get_conv_out(input_shape)
self.fc = nn.Sequential(
nn.Linear(conv_out_size, 512),
nn.ReLU(),
nn.Linear(512, n_actions)
)
def _get_conv_out(self, shape):
o = self.conv(torch.zeros(1, *shape))
return int(np.prod(o.size()))
def forward(self, x):
conv_out = self.conv(x).view(x.size()[0], -1)
return self.fc(conv_out)
class DQNAgent:
def __init__(self, state_space, action_space, max_memory_size, batch_size, gamma, lr,
dropout, exploration_max, exploration_min, exploration_decay, double_dq,
pretrained, path_level):
# Define DQN Layers
self.state_space = state_space
self.action_space = action_space
self.double_dq = double_dq
self.pretrained = pretrained
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
if self.double_dq:
self.local_net = DQNSolver(
state_space, action_space).to(self.device)
self.target_net = DQNSolver(
state_space, action_space).to(self.device)
if self.pretrained:
self.local_net.load_state_dict(torch.load(
path_level+"dq1.pt", map_location=torch.device(self.device)))
self.target_net.load_state_dict(torch.load(
path_level+"dq2.pt", map_location=torch.device(self.device)))
self.optimizer = torch.optim.Adam(
self.local_net.parameters(), lr=lr)
self.copy = 5000 # Copy the local model weights into the target network every 5000 steps
self.step = 0
else:
self.dqn = DQNSolver(state_space, action_space).to(self.device)
if self.pretrained:
self.dqn.load_state_dict(torch.load(
path_level+"dq.pt", map_location=torch.device(self.device)))
self.optimizer = torch.optim.Adam(self.dqn.parameters(), lr=lr)
# Create memory
self.max_memory_size = max_memory_size
if self.pretrained:
self.STATE_MEM = torch.load(path_level+"STATE_MEM.pt")
self.ACTION_MEM = torch.load(path_level+"ACTION_MEM.pt")
self.REWARD_MEM = torch.load(path_level+"REWARD_MEM.pt")
self.STATE2_MEM = torch.load(path_level+"STATE2_MEM.pt")
self.DONE_MEM = torch.load(path_level+"DONE_MEM.pt")
with open(path_level+"ending_position.pkl", 'rb') as f:
self.ending_position = pickle.load(f)
with open(path_level+"num_in_queue.pkl", 'rb') as f:
self.num_in_queue = pickle.load(f)
else:
self.STATE_MEM = torch.zeros(max_memory_size, *self.state_space)
self.ACTION_MEM = torch.zeros(max_memory_size, 1)
self.REWARD_MEM = torch.zeros(max_memory_size, 1)
self.STATE2_MEM = torch.zeros(max_memory_size, *self.state_space)
self.DONE_MEM = torch.zeros(max_memory_size, 1)
self.ending_position = 0
self.num_in_queue = 0
self.memory_sample_size = batch_size
# Learning parameters
self.gamma = gamma
self.l1 = nn.SmoothL1Loss().to(self.device) # Also known as Huber loss
self.exploration_max = exploration_max
self.exploration_rate = exploration_max
self.exploration_min = exploration_min
self.exploration_decay = exploration_decay
def remember(self, state, action, reward, state2, done):
self.STATE_MEM[self.ending_position] = state.float()
self.ACTION_MEM[self.ending_position] = action.float()
self.REWARD_MEM[self.ending_position] = reward.float()
self.STATE2_MEM[self.ending_position] = state2.float()
self.DONE_MEM[self.ending_position] = done.float()
self.ending_position = (self.ending_position +
1) % self.max_memory_size # FIFO tensor
self.num_in_queue = min(self.num_in_queue + 1, self.max_memory_size)
def recall(self):
# Randomly sample 'batch size' experiences
idx = random.choices(range(self.num_in_queue),
k=self.memory_sample_size)
STATE = self.STATE_MEM[idx]
ACTION = self.ACTION_MEM[idx]
REWARD = self.REWARD_MEM[idx]
STATE2 = self.STATE2_MEM[idx]
DONE = self.DONE_MEM[idx]
return STATE, ACTION, REWARD, STATE2, DONE
def act(self, state):
# Epsilon-greedy action
if self.double_dq:
self.step += 1
if random.random() < self.exploration_rate:
return torch.tensor([[random.randrange(self.action_space)]])
if self.double_dq:
# Local net is used for the policy
return torch.argmax(self.local_net(state.to(self.device))).unsqueeze(0).unsqueeze(0).cpu()
else:
return torch.argmax(self.dqn(state.to(self.device))).unsqueeze(0).unsqueeze(0).cpu()
def copy_model(self):
# Copy local net weights into target net
self.target_net.load_state_dict(self.local_net.state_dict())
def experience_replay(self):
if self.double_dq and self.step % self.copy == 0:
self.copy_model()
if self.memory_sample_size > self.num_in_queue:
return
STATE, ACTION, REWARD, STATE2, DONE = self.recall()
STATE = STATE.to(self.device)
ACTION = ACTION.to(self.device)
REWARD = REWARD.to(self.device)
STATE2 = STATE2.to(self.device)
DONE = DONE.to(self.device)
self.optimizer.zero_grad()
if self.double_dq:
# Double Q-Learning target is Q*(S, A) <- r + γ max_a Q_target(S', a)
target = REWARD + torch.mul((self.gamma *
self.target_net(STATE2).max(1).values.unsqueeze(1)),
1 - DONE)
# Local net approximation of Q-value
current = self.local_net(STATE).gather(1, ACTION.long())
else:
# Q-Learning target is Q*(S, A) <- r + γ max_a Q(S', a)
target = REWARD + torch.mul((self.gamma *
self.dqn(STATE2).max(1).values.unsqueeze(1)),
1 - DONE)
current = self.dqn(STATE).gather(1, ACTION.long())
loss = self.l1(current, target)
loss.backward() # Compute gradients
self.optimizer.step() # Backpropagate error
self.exploration_rate *= self.exploration_decay
# Makes sure that exploration rate is always at least 'exploration min'
self.exploration_rate = max(
self.exploration_rate, self.exploration_min)
|
import os
import shutil
import django
from django.core.files.images import ImageFile
from django.core.exceptions import ObjectDoesNotExist
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "metgs.settings")
django.setup()
import db_data
from mainapp.models import *
def fill_top_menu():
TopMenu.objects.all().delete()
data = db_data.top_menu()
for item in data:
try:
image = ImageFile(open(item.pop('image'), 'rb'))
model = TopMenu(**item)
model.image = image
except KeyError:
model = TopMenu(**item)
model.save()
print('Top menu created.')
def fill_organization():
Organization.objects.all().delete()
data = db_data.organization()
logo = ImageFile(open(data.pop('logo'), 'rb'))
org = Organization(**data)
org.logo = logo
org.save()
print('Organization created.')
def fill_categories():
Category.objects.all().delete()
data = db_data.category()
for item in data:
try:
image = ImageFile(open(item.pop('image'), 'rb'))
cat = Category(**item)
cat.image = image
except KeyError:
cat = Category(**item)
finally:
cat.save()
print('Categories created.')
def fill_good():
Good.objects.all().delete()
data = db_data.good()
for item in data:
try:
category = Category.objects.get(name=item.pop('category_name'))
except (ObjectDoesNotExist, KeyError):
category = None
try:
image = ImageFile(open(item.pop('image'), 'rb'))
good = Good(**item)
good.category = category
good.image = image
except KeyError:
good = Good(**item)
finally:
good.save()
print('Goods created.')
def fill_album():
PhotoAlbum.objects.all().delete()
data = db_data.album()
for item in data:
album = PhotoAlbum(**item)
album.save()
print('Albums created')
def fill_photo():
PhotoImage.objects.all().delete()
data = db_data.photo()
for item in data:
album = PhotoAlbum.objects.get(name=item["album"])
image = ImageFile(open(item["image"], 'rb'))
obj = PhotoImage(album=album, image=image)
obj.save()
print('Photos created.')
def fill_news():
News.objects.all().delete()
data = db_data.news()
for item in data:
news = News(**item)
news.save()
print('News created.')
def fill_contact():
Contact.objects.all().delete()
data = db_data.contact()
for item in data:
obj = Contact(**item)
obj.save()
print('Contacts created.')
if __name__ == '__main__':
if os.path.exists('media'):
shutil.rmtree('media')
fill_top_menu()
fill_organization()
fill_categories()
fill_good()
fill_album()
fill_photo()
fill_news()
fill_contact()
pass
|
import cProfile, pstats, io
def clingen(infile):
f=open(infile)
dic={}
for i in f:
line=i.split("\t")
if line[0] not in dic:
dic[line[0]]=[[line[1]], [line[3]], [line[4]], [line[5].strip()]]
else:
dic[line[0]][0].append(line[1])
dic[line[0]][1].append(line[3])
dic[line[0]][2].append(line[4])
dic[line[0]][3].append(line[5])
f.close()
return dic
def get_dd2p(infile):
dd2p={}
with open(infile, 'r', encoding="utf-8") as f:
i=f.readline()
line=i.split(",")
if line[0] not in dd2p:
dd2p[line[0]]=[[],[],[],[]]
dd2p[line[0]][0].append(line[4])
dd2p[line[0]][1].append(line[3].strip('"'))
dd2p[line[0]][2].append(line[2].strip('"'))
if len(line[9])>5:
ch=line[9].split(";")
el=ch[:2]
if len(el)>0:
dd2p[line[0]][3].append('=HYPERLINK("https://www.ncbi.nlm.nih.gov/pubmed/'+el[0]+'","SUBSTITUTE")')
else:
dd2p[line[0]][3].append('na')
else:
dd2p[line[0]][3].append('na')
f.close()
return dd2p
def read_panel_list(infile):
f=open(infile)
panel=set()
for i in f:
line=i.strip()
panel.add(line)
f.close()
return panel
|
x = "There are %d types of people." % 10
binary = "binary"
do_not = "don't"
y = "Those who know %s and those who %s." % (binary, do_not)
print x
print y
print "I said: %r." % x
print "I also said: '%s'." % y
hilarious = False
joke_evaluation = "Isn't that joke so funny?! "
print joke_evaluation % hilarious
w = "This is the left side of..."
e = "a string with a right side."
print w + e
|
f = open('input.txt', 'r')
line = f.read().split(' ')
x = [int(i) for i in line]
f.close()
i = 0
total = 0
while len(x) > 0:
children = x[i]
if children == 0:
metas = x[i+1]
meta_start = i+2
meta_end = meta_start + metas
total += sum(x[meta_start:meta_end])
del x[i:meta_end]
i -= 2
try:
x[i] -= 1
except IndexError as ex: # this happens once we are done
print(total)
break
else:
i += 2
|
import numpy as np
class store(object):
def __init__(self, products, location, owner):
self.products = products
#self.products = np.array["pen", "brush", "apple", "cap"]
self.location = location
self.owner = owner
self.display_info()
# print self.products
def add_product(self):
self.products.append("book")
return self
def remove_product(self):
self.products.remove('brush')
return self
def inventory(self):
self.inventory = self.products
return self
def display_info(self): # show all product details
print "store Info"
print "prodcut" + str(self.products)
print "location" + str(self.location)
print "owner" + str(self.owner)
def __str__(self):
return "Products ( {} ) Location ( {} ) Owner ( {} )".format(' '.join(self.products), self.location, self.owner)
store1 = store(["apple", "brush", "store"],"47 zanker","havisha")
print store1
print store1.add_product()
print store1.remove_product()
print store1.inventory()
|
n= int(input("enter a number: "))
n1=0
n2=1
print(n1)
print(n2)
for i in range(0,n):
n3=n1+n2
n1=n2
n2=n3
print(n3)
|
from pandac.PandaModules import * #basic Panda modules
from direct.showbase.DirectObject import DirectObject #event handling
from direct.particles.ParticleEffect import ParticleEffect #particle effects
from direct.actor.Actor import Actor
from Files.HUD import *
import math
class Movement(object):
def __init__(self, speed, bobSpd, bobAmt):
self.speed = speed
self.bobSpd = bobSpd
self.bobAmt = bobAmt
class Player(object):
#Initializes player
def __init__(self, parent):
self.parent = parent
self.level = 0
self.newLevel = False
#Movement data
self.speed = 40
self.playerScale = 0.05
self.lightDist = 0.1
self.movement = {}
self.movement['stand'] = Movement(0, 0.03, 1.3)
self.movement['caution'] = Movement(1, 0.051, 2.1)
self.movement['walk'] = Movement(2, 0.079, 2.7)
self.movement['sprint'] = Movement(6, 0.16, 3.6)
self.forward = Vec3(0,1,0)
self.back = Vec3(0,-1,0)
self.left = Vec3(-1,0,0)
self.right = Vec3(1,0,0)
self.bobTimer = 0
self.recharging = True
self.energyLeft = 100
self.maxEnergy = 100
#Ability data
self.itemNode = NodePath('item')
self.itemLoaded = False
self.itemMax = 6
self.itemDist = self.itemMax
self.sideBuffer = 0
self.wallModel = loader.loadModel('Models/Wall')
self.walls = []
self.lightModel = loader.loadModel('Models/light')
self.lights = []
self.lightZ = 2
self.cRay1 = None
self.cRay2 = None
self.cRay3 = None
self.hud = HUD(0,0)
self.timer = 0
self.initKeyMap()
self.initControls()
self.initPlayer()
self.initSounds()
base.enableParticles()
base.accept('enemy-into-player', self.die)
base.accept('playerEnv-into-exit', self.nextLevel)
#Initializes keyMap
def initKeyMap(self):
self.keyMap = {}
self.keyMap['forward'] = 0
self.keyMap['left'] = 0
self.keyMap['right'] = 0
self.keyMap['back'] = 0
self.keyMap['sprint'] = 0
self.keyMap['caution'] = 0
self.abilities = {}
self.abilities['wall'] = 0
self.abilities['light'] = 0
#Set key controls
def initControls(self):
#Movement
base.accept('w', self.setKey, ['forward', 1])
base.accept('a', self.setKey, ['left', 1])
base.accept('d', self.setKey, ['right', 1])
base.accept('s', self.setKey, ['back', 1])
base.accept('w-up', self.setKey, ['forward', 0])
base.accept('a-up', self.setKey, ['left', 0])
base.accept('d-up', self.setKey, ['right', 0])
base.accept('s-up', self.setKey, ['back', 0])
#Abilities
base.accept('shift', self.setKey, ['sprint', 1])
base.accept('shift-up', self.setKey, ['sprint', 0])
base.accept('shift-w', self.setKey, ['sprint', 1])
base.accept('shift-w', self.setKey, ['forward', 1])
base.accept('shift-a', self.setKey, ['left', 1])
base.accept('shift-d', self.setKey, ['right', 1])
base.accept('shift-s', self.setKey, ['back', 1])
base.accept('control', self.setKey, ['caution', 1])
base.accept('control-up', self.setKey, ['caution', 0])
base.accept('control-w', self.setKey, ['caution', 1])
base.accept('control-w', self.setKey, ['forward', 1])
base.accept('control-a', self.setKey, ['caution', 1])
base.accept('control-a', self.setKey, ['left', 1])
base.accept('control-d', self.setKey, ['caution', 1])
base.accept('control-d', self.setKey, ['right', 1])
base.accept('control-s', self.setKey, ['caution', 1])
base.accept('control-s', self.setKey, ['back', 1])
base.accept('1', self.toggleKey, ['wall'])
base.accept('2', self.toggleKey, ['light'])
base.accept('f', self.cancelKey)
base.accept('mouse1', self.click)
#Sets key values
def setKey(self, key, value):
if key == 'sprint' and self.energyLeft <= 0:
self.keyMap['sprint'] = 0
return
self.keyMap[key] = value
def sprint(self):
self.keyMap['sprint'] = 1
self.keyMap['forward'] = 1
#Toggles ability
def toggleKey(self, key):
#Turns off other abilities if a diff ability is toggled
for ability in self.abilities.keys():
if ability == key:
self.abilities[ability] += 1
else:
self.abilities[ability] = 0
#Loads/removes item
if self.abilities[key] == 1:
if ((key == 'wall' and self.wallsLeft > 0) or
(key == 'light' and self.lightsLeft > 0)):
self.loadItem(key)
else:
self.unloadItem(key)
#Loads passed item
def loadItem(self, item):
#Clears and resets itemNode to toggled ability
self.itemNode.detachNode()
if item == 'wall':
self.itemNode = self.wallModel
self.itemNode.setColor(Vec4(1,1,1,0))
self.itemNode.setScale(15)
elif item == 'light':
self.itemNode = self.lightModel
self.itemNode.setColor(Vec4(1,1,1,1))
self.itemNode.setScale(1.6)
self.itemNode.reparentTo(self.playerNode)
self.itemNode.setCollideMask(BitMask32.allOff())
self.itemLoaded = True
#Attach collisionRays to prevent items from going into env
self.cRay1.reparentTo(base.camera)
self.cRay2.reparentTo(base.camera)
self.cRay3.reparentTo(base.camera)
base.itemTrav.addCollider(self.cRay1, base.queue)
base.itemTrav.addCollider(self.cRay2, base.queue)
base.itemTrav.addCollider(self.cRay3, base.queue)
#Removes item when toggled off
def unloadItem(self, item):
self.itemLoaded = False
self.abilities[item] = 0
self.itemNode.detachNode()
self.cRay1.detachNode()
self.cRay2.detachNode()
self.cRay3.detachNode()
#Cancels ability
def cancelKey(self):
for ability in self.abilities.keys():
self.abilities[ability] = 0
self.itemLoaded = False
self.itemNode.detachNode()
self.cRay1.detachNode()
self.cRay2.detachNode()
self.cRay3.detachNode()
#Place item when clicked, then clear loaded item
def click(self):
if self.itemLoaded:
self.placeItem()
self.cancelKey()
def placeItem(self):
if self.abilities['wall'] == 1:
self.placeWall()
self.wallsLeft -= 1
elif self.abilities['light'] == 1:
self.placeLight()
self.lightsLeft -= 1
#Places wall
def placeWall(self):
item = render.attachNewNode('item-wall')
item.setPos(self.itemNode.getPos(render))
item.setZ(item.getZ() - 10)
item.setHpr(self.itemNode.getHpr(render))
wall = loader.loadModel('Models/Wall')
wall.reparentTo(item)
wall.setScale(self.playerScale*15)
#self.wallActor = Actor('Models/WallActor2', {'wallAnim':'Models/WallAnim2'})
self.wallActor = loader.loadModel('Models/WallActor.egg')
self.wallActor.reparentTo(item)
self.wallActor.setScale(self.playerScale*15)
#self.wallActor.setPlayRate(1.2, 'wallAnim')
#self.wallActor.loop('wallAnim')
self.walls.append(item)
self.wallSfx.play()
#Places light item and creates a point light
def placeLight(self):
item = render.attachNewNode('item-light')
item.setPos(self.itemNode.getPos(render))
item.setHpr(self.itemNode.getHpr(render))
light = loader.loadModel('Models/light')
#Adds emission material to placed light
mat = Material()
mat.setEmission(VBase4(0.2,0.2,0.45,1))
light.setMaterial(mat)
light.reparentTo(item)
light.setScale(self.playerScale*1.6)
#Attach point light to light ability item
iLightNode = NodePath('ilight')
iLightNode.reparentTo(item)
iLightNode.setZ(iLightNode.getZ() + 0.5)
iLight = PointLight('item-light')
iLightNP = iLightNode.attachNewNode(iLight)
iLightNP.node().setColor(Vec4(0.2, 0.25, 0.3, 1.0))
iLightNP.node().setAttenuation(Vec3(0, 0.001, 0.000009))
iLightNP.setZ(iLightNP.getZ() + 0.6)
render.setLight(iLightNP)
#Sets placement time for rotating
item.setTag('startTime', '%f' % self.timer)
self.lights.append(item)
self.magicSfx.play()
#Loads player node, camera, and light
def initPlayer(self):
self.playerNode = NodePath('player-node')
#setPos depends on spawn position in level
self.playerNode.setScale(self.playerScale)
self.playerNode.reparentTo(render)
self.playerNode.setZ(2)
#Loads camera
lens = base.cam.node().getLens()
lens.setFov(90)
base.cam.node().setLens(lens)
base.camera.reparentTo(self.playerNode)
#Loads hand
self.hand = Actor('Models/handActor', {'handAnim':'Models/handAnim'})
self.hand.reparentTo(base.camera)
self.hand.setScale(0.8)
self.hand.setPos(7,9,-9)
self.hand.setH(90)
self.hand.setPlayRate(1.2, 'handAnim')
self.hand.loop('handAnim')
self.initHandLight()
def initHandLight(self):
# illuminate the hand properly
self.hand.setLightOff()
handLight = PointLight("handLight")
hLightNode = NodePath('handLightNode')
hLightNode.reparentTo(base.camera)
hLightNode.setPos(Vec3(1.33, 2.4, 0))
hLightNP = hLightNode.attachNewNode(handLight)
hLightNP.node().setColor((0.002, 0.002, 0.002, 1.0))
hLightNP.node().setAttenuation(Vec3(0, 0.0005, 0.000005))
self.hand.setLight(hLightNP)
#Loads artifact point light
"""
mat = Material()
mat.setEmission(VBase4(0.2,0.2,0.45,1))
self.test = loader.loadModel('Models/light')
self.test.setMaterial(mat)
self.test.reparentTo(base.camera)
self.test.setScale(0.03)
self.test.setPos(Vec3(1.4,1.6,-0.5))
"""
self.pLightNode = NodePath('light-node')
self.pLightNode.reparentTo(base.camera)
self.pLightNode.setPos(Vec3(1.33,2.4,0))
pLight = PointLight('player-light')
pLightNP = self.pLightNode.attachNewNode(pLight)
pLightNP.node().setColor(Vec4(0.1, 0.15, 0.2, 1.0))
# pLightNP.node().setColor(Vec4(0.001, 0.0015, 0.002, 1.0))
pLightNP.node().setAttenuation(Vec3(0, 0.0005, 0.000005))
render.setLight(pLightNP)
#Spawn plays at given pos with walls and lights
def spawn(self, pos, walls, lights):
self.newLevel = False
self.spawnPos = pos
self.maxWalls = walls
self.maxLights = lights
heightPos = LPoint3f(pos[0], pos[1], 3)
self.playerNode.setPos(render, heightPos)
self.wallsLeft = walls
self.walls = []
self.lightsLeft = lights
self.lights = []
self.energyLeft = 100
self.bobTimer = 0
def die(self, cEntry = True):
self.parent.die(self.level, False)
def nextLevel(self, cEntry):
self.newLevel = True
self.parent.die(self.level, True)
#Initialize collisions
def initCollisions(self):
envMask = BitMask32(0x1)
sightMask = BitMask32(0x2)
deathMask = BitMask32(0x4)
clearSightMask = BitMask32(0x8)
activeMask = BitMask32(0x16)
#Collide with enemies
cSphere = CollisionSphere( 0, 0, 2, 4 )
cNode = CollisionNode('player')
cNode.addSolid(cSphere)
cNode.setCollideMask(BitMask32.allOff())
cNode.setIntoCollideMask(deathMask)
cNodePath = self.playerNode.attachNewNode(cNode)
#cNodePath.show()
base.cTrav.addCollider(cNodePath, base.queue)
#collide with enemy sight
cSphere = CollisionSphere( 0, 0, 2, 4 )
cNode = CollisionNode('playerSight')
cNode.addSolid(cSphere)
cNode.setCollideMask(BitMask32.allOff())
cNode.setFromCollideMask(sightMask)
cNode.setIntoCollideMask(clearSightMask)
cNodePath = self.playerNode.attachNewNode(cNode)
#cNodePath.show()
base.cTrav.addCollider(cNodePath, base.cHandler)
#Collide with env
cSphere = CollisionSphere(0,0,2,4)
cNode = CollisionNode('pusherNode')
cNode.addSolid(cSphere)
cNode.setCollideMask(BitMask32.allOff())
cNode.setFromCollideMask(envMask)
cNodePath = self.playerNode.attachNewNode(cNode)
#cNodePath.show()
base.cTrav.addCollider(cNodePath, base.pusher)
base.pusher.addCollider(cNodePath, self.playerNode, base.drive.node())
#Collide with active env
cSphere = CollisionSphere( 0, 0, 2, 4.1)
cNode = CollisionNode('playerEnv')
cNode.addSolid(cSphere)
cNode.setCollideMask(BitMask32.allOff())
cNode.setFromCollideMask(activeMask)
cNodePath = self.playerNode.attachNewNode(cNode)
#cNodePath.show()
base.cTrav.addCollider(cNodePath, base.cHandler)
#Item placement collision rays
cNode = CollisionNode('rayRight')
cRay = CollisionRay(0,0,0,0.4,1,0)
cNode.addSolid(cRay)
cNode.setCollideMask(BitMask32.allOff())
cNode.setFromCollideMask(envMask)
self.cRay1 = base.camera.attachNewNode(cNode)
cNode = CollisionNode('rayLeft')
cRay = CollisionRay(0,0,0,-0.4,1,0)
cNode.addSolid(cRay)
cNode.setCollideMask(BitMask32.allOff())
cNode.setFromCollideMask(envMask)
self.cRay2 = base.camera.attachNewNode(cNode)
cNode = CollisionNode('rayMid')
cRay = CollisionRay(0,0,0,0,1,0)
cNode.addSolid(cRay)
cNode.setCollideMask(BitMask32.allOff())
cNode.setFromCollideMask(envMask)
self.cRay3 = base.camera.attachNewNode(cNode)
########################################################
def initSounds(self):
self.walkSfx = base.loadSfx('sounds/footstep.ogg')
self.walkSfx.setLoopCount(0)
self.runSfx = base.loadSfx('sounds/run.ogg')
self.runSfx.setLoopCount(0)
self.movementSfx = None
self.wallSfx = base.loadSfx('sounds/wall.ogg')
self.magicSfx = base.loadSfx('sounds/magic.ogg')
self.magicSfx.setVolume(.5)
self.doorOpenSfx = base.loadSfx('sounds/door_open.ogg')
self.doorCloseSfx = base.loadSfx('sounds/door_close.ogg')
self.fireSfx = base.loadSfx('sounds/fire.ogg')
#Updates player
def update(self, dt):
self.moveCam()
self.movePlayer(dt)
self.moveLight()
if self.itemLoaded:
self.itemRay()
for wall in self.walls:
if wall.getZ() < 0:
wall.setZ(wall.getZ() + .1)
self.hud.updateHUD(self.wallsLeft, self.lightsLeft, self.energyLeft)
self.timer += 0.05
def itemRay(self):
base.itemTrav.traverse(render)
if base.queue.getNumEntries() == 0:
return
base.queue.sortEntries()
playerPos = base.camera.getPos(render)
if base.queue.getNumEntries() > 0:
first = base.queue.getEntry(0)
cPos = first.getSurfacePoint(render)
rayName = first.getFromNodePath().getName()
dist = math.sqrt((playerPos[0]-cPos[0])**2 + (playerPos[1]-cPos[1])**2)
self.itemDist = min(dist, self.itemMax)
"""
if rayName == 'rayLeft':
self.sideBuffer = 0.5
elif rayName == 'rayRight':
self.sideBuffer = -0.5
else:
self.sideBuffer = 0
"""
#Moves camera
def moveCam(self):
mouse = base.win.getPointer(0)
x = mouse.getX()
y = mouse.getY()
if base.win.movePointer(0, base.win.getXSize()/2, base.win.getYSize()/2):
self.playerNode.setH(self.playerNode.getH() - (x - base.win.getXSize()/2)*.1)
#Move camera based on if ability is toggled
if self.itemLoaded:
self.moveItem(y)
else:
cam_p = base.camera.getP() - (y - base.win.getYSize()/2)*.1
if cam_p >= -90 and cam_p <= 90:
base.camera.setP(cam_p)
#Moves pitch of player light based on camera pitch
rad = deg2Rad(base.camera.getP())
self.pLightNode.setPos(0,self.lightDist*math.cos(rad)/self.playerScale,
self.lightDist*math.sin(rad)/self.playerScale)
#Moves item and camera if ability is toggled
def moveItem(self, y):
base.camera.setP(0)
itemDist = max(self.itemDist-0.5,1)
if self.abilities['light'] == 1:
pos = Vec3(self.sideBuffer/self.playerScale,itemDist/self.playerScale,
(-1*self.playerNode.getZ()+self.lightZ)/self.playerScale)
else:
pos = Vec3(self.sideBuffer/self.playerScale,itemDist/self.playerScale,
-1*self.playerNode.getZ()/self.playerScale)
self.itemNode.setFluidPos(pos)
heading = self.playerNode.getH()
heading = (int(heading) % 180)
if (heading >= 60 and heading < 120):
self.itemNode.setH(render, 90)
elif (heading >= 30 and heading < 60):
self.itemNode.setH(render, 45)
elif (heading >= 120 and heading < 150):
self.itemNode.setH(render, 135)
else:
self.itemNode.setH(render, 0)
#Move player based on key movements
def movePlayer(self, dt):
self.recharging = True
#Not moving
if (self.keyMap['forward'] + self.keyMap['back'] +
self.keyMap['left'] + self.keyMap['right']) == 0:
move = self.movement['stand']
#Moving
elif self.keyMap['sprint'] == 1 and self.keyMap['forward'] == 1:
if self.energyLeft <= 0:
self.energyLeft = 0
self.keyMap['sprint'] = 0
move = self.movement['walk']
else:
self.energyLeft -= 0.3
self.recharging = False
move = self.movement['sprint']
elif self.keyMap['caution'] == 1:
move = self.movement['caution']
else:
move = self.movement['walk']
if self.keyMap['forward'] == 1:
self.playerNode.setFluidPos(self.playerNode, self.forward * dt * move.speed * self.speed)
elif self.keyMap['back'] == 1:
self.playerNode.setFluidPos(self.playerNode, self.back * dt * move.speed * self.speed)
if self.keyMap['left'] == 1:
self.playerNode.setFluidPos(self.playerNode, self.left * dt * move.speed * self.speed)
elif self.keyMap['right'] == 1:
self.playerNode.setFluidPos(self.playerNode, self.right * dt * move.speed * self.speed)
if move == self.movement['sprint'] and self.movementSfx != self.runSfx:
if self.movementSfx != None:
self.movementSfx.stop()
self.movementSfx = self.runSfx
self.movementSfx.play()
elif move == self.movement['walk'] and self.movementSfx != self.walkSfx:
if self.movementSfx != None:
self.movementSfx.stop()
self.movementSfx = self.walkSfx
self.movementSfx.play()
elif move != self.movement['sprint'] and move != self.movement['walk']:
if self.movementSfx != None:
self.movementSfx.stop()
self.movementSfx = None
self.headBob(move)
if self.recharging and self.energyLeft < 100:
self.energyLeft += 0.035
def headBob(self, movement):
waveslice = math.sin(self.bobTimer)
if waveslice != 0:
change = waveslice * movement.bobAmt
base.camera.setZ(change)
else:
base.camera.setZ(0)
self.bobTimer = (self.bobTimer + movement.bobSpd) % (math.pi*2)
def moveLight(self):
waveslice = math.sin(self.timer)
for light in self.lights:
change = waveslice * 0.1
light.setZ( self.lightZ + change )
light.setH((float(light.getTag('startTime')) + self.timer) * 8 )
#self.test.setZ( waveslice * 0.1 - 0.5)
#self.test.setH( self.timer * 4 )
def clearItems(self):
for light in self.lights:
light.removeNode()
for wall in self.walls:
wall.removeNode()
|
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/hello/')
@app.route('/hello/<name>')
def hello(name=None):
return render_template('hello.html', name=name)
@app.route('/test', methods=['GET', 'POST'])
def test():
return 'path={} method={}'.format(request.path, request.method)
@app.route('/login', methods=['POST', 'GET'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] == 'test' and request.form[
'password'] == 'test':
return 'login ' + request.form['username']
else:
error = 'Invalid username/password'
return render_template('login.html', error=error)
if __name__ == '__main__':
app.run()
|
@bot.command()
async def mute(ctx, member : discord.Member):
guild = ctx.guild
for role in guild.roles:
if role.name == 'Muted':
await member.add_roles(role)
await ctx.send('{} has been muted'.format(member.mention))
return
overwrite = discord.PermissionsOverwrite(send_messages=False)
newRole = await guild.create_role(name='Muted')
for channel in guild.text_channels:
await guild.set_permissions(newRole,overwrite=overwrite)
await member.add_roles(newRole)
await ctx.send('{} has been muted'.format(member.mention))
|
#-------------------------------------------------------------------------------
# Name: FirstGameMain
# Purpose: This is the main file of the game, it handles the game state
#
# Author: Marko Nerandzic
#
# Created: 25/12/2012
# Copyright: (c) Marko Nerandzic 2012
# Licence: This work is licensed under the Creative Commons Attribution-
# NonCommercial-NoDerivs 3.0 Unported License. To view a copy of
# this license, visit http://creativecommons.org/licenses/by-nd/3.0/.
#-------------------------------------------------------------------------------
import FirstPlayer, FirstPowerUp, FirstPlayerConstants, FirstInputs, FirstEnemy, FirstPoint, FirstButton, pygame, random
from pygame.locals import *
from FirstPlayerConstants import *
def main():
#Initialization of the game variables
endGame = False
state = MAINMENU
numOfTicks = 0
last2Types = [0, 0]
#Initization of PyGame
pygame.init()
#Display setup
DISPLAYSURF = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), 0, 32)
pygame.display.set_caption("Lucky Dodger")
#Initialization of clock used for delay to achieve 30 frames per second
FPSCLOCK = pygame.time.Clock()
#Initialization of text in the game
fontObj = pygame.font.Font('freesansbold.ttf', 12)
scoreSurface = fontObj.render('Score: ', True, BLACK, WHITE)
scoreNumSurface = fontObj.render('0', True, BLACK, WHITE)
textScoreNumRect = scoreNumSurface.get_rect()
textScoreRect = scoreSurface.get_rect()
textScoreNumRect.right = SCREEN_WIDTH
textScoreNumRect.top = 0
textScoreRect.right = textScoreNumRect.left
textScoreRect.top = 0
titleFontObject = pygame.font.Font('C:\\windows\\Fonts\\cambriai.ttf', 72)
titleFirstLineSurface = titleFontObject.render('Lucky', True, BLACK, WHITE)
titleFirstLineRect = titleFirstLineSurface.get_rect()
titleFirstLineRect.topleft = (SCREEN_WIDTH/2 - titleFirstLineRect.width/2 + 10, 5)
titleSecondLineSurface = titleFontObject.render('Dodger', True, BLACK, WHITE)
titleSecondLineRect = titleSecondLineSurface.get_rect()
titleSecondLineRect.topleft = (SCREEN_WIDTH/2 - titleSecondLineRect.width/2 + 10,90)
#Initialization of controls/instructions image
controlsImage = pygame.image.load('Controls.PNG')
#Initialization of return ot menu button in controls screen
controlsGoToMenuButton = FirstButton.Button((int(SCREEN_WIDTH/4), 355 - 25, int(SCREEN_WIDTH/2), 50), BLUE, "Go to Menu")
#Initialization of game over text images
gameOverFont = pygame.font.Font('freesansbold.ttf', 64)
gameOverSurface = gameOverFont.render('GAME OVER', True, BLACK, WHITE)
gameOverRect = gameOverSurface.get_rect()
gameOverRect.center = (SCREEN_WIDTH/2, (SCREEN_HEIGHT/2) - 50)
gameOverScoreFont = pygame.font.Font('freesansbold.ttf', 32)
gameOverResetButton = FirstButton.Button((int(SCREEN_WIDTH/4), 290 - 25, int(SCREEN_WIDTH/2), 50), GREEN, "Restart Game")
gameOverGoToMenuButton = FirstButton.Button((int(SCREEN_WIDTH/4), 355 - 25, int(SCREEN_WIDTH/2), 50), BLUE, "Go to Menu")
#Initialization of the player object the user controls
player = FirstPlayer.Player(((SCREEN_WIDTH-PLAYER_WIDTH)/2, (SCREEN_HEIGHT - PLAYER_HEIGHT)/ 2, PLAYER_WIDTH, PLAYER_HEIGHT), PLAYER_SPEED)
#Definition of an array to hold all of the enemies in the game, whose number increases
enemies = []
#Initialization of the point object
points = FirstPoint.Point(newRectNotAtObject(player.getRect(), POINT_WIDTH, POINT_HEIGHT))
#Initialization of the power up object that shrinks the enemies or increases the player's speed
powerUp = FirstPowerUp.PowerUp((0, 0, 0, 0))
#Initialization of the object that will handle user inputs
inputs = FirstInputs.Inputs()
#Initialization of main menu buttons
mainMenuButtons = [FirstButton.Button((int(SCREEN_WIDTH/4), SCREEN_HEIGHT/2,int(SCREEN_WIDTH/2), 50), RED, "Start Game"), FirstButton.Button((int(SCREEN_WIDTH/4), SCREEN_HEIGHT/2 + 70,int(SCREEN_WIDTH/2), 50), RED, "Controls/Instructions")]
#Initialization of the font that all buttons use for their messages
buttonFontObject = pygame.font.Font('C:\\windows\\Fonts\\nrkis.ttf', 24)
while not endGame:
#Resets the display
DISPLAYSURF.fill(WHITE)
#Updates the state of the inputs based on the user's actions since last called
inputs.update()
if state == MAINMENU:
mouseEvents = inputs.getMouseEvents()
#Checks if the user clicked on any of the buttons
counter = 0
buttonClicked = False
while counter < len(mainMenuButtons) and not buttonClicked:
counter2 = 0
while counter2 < len(mouseEvents) and not buttonClicked:
mouseX, mouseY = mouseEvents[counter2].pos
if mouseX >= mainMenuButtons[counter].getArea().left and mouseX <= mainMenuButtons[counter].getArea().right and mouseY >= mainMenuButtons[counter].getArea().top and mouseY <= mainMenuButtons[counter].getArea().bottom:
buttonClicked = True
counter2 += 1
counter += 1
#If they clicked on any of the buttons, deals accordingly
if buttonClicked:
if counter - 1 == MAINMENUSTARTGAME:
state = GAMEINIT
if counter - 1 == MAINMENUCONTROLS:
state = CONTROLS
#Checks if the user wants to quit the game
if inputs.getQuit():
endGame = True
if state == CONTROLS:
mouseEvents = inputs.getMouseEvents()
#Checks if the user wants to quit the game
if inputs.getQuit():
endGame = True
#Checks if the user clicked on the button
counter = 0
buttonClicked = False
while counter < len(mouseEvents) and not buttonClicked:
mouseX, mouseY = mouseEvents[counter].pos
if mouseX >= controlsGoToMenuButton.getArea().left and mouseX <= controlsGoToMenuButton.getArea().right and mouseY >= controlsGoToMenuButton.getArea().top and mouseY <= controlsGoToMenuButton.getArea().bottom:
buttonClicked = True
counter += 1
#If they did, returns them to the menu
if buttonClicked:
state = MAINMENU
#Renders the button text
controlsGoToMenuButtonTextSurface = buttonFontObject.render(controlsGoToMenuButton.getMessage(), True, BLACK, controlsGoToMenuButton.getColour())
controlsGoToMenuButtonTextRect = controlsGoToMenuButtonTextSurface.get_rect()
controlsGoToMenuButtonTextRect.center = controlsGoToMenuButton.getArea().center
#If the state is set to initialize the game, it initializes/resets all necessary parts of the game
if state == GAMEINIT:
player.reset((SCREEN_WIDTH-PLAYER_WIDTH)/2, (SCREEN_HEIGHT - PLAYER_HEIGHT)/ 2, PLAYER_SPEED)
tempRect = newRectNotAtObject(player.getRect(), POINT_WIDTH, POINT_HEIGHT)
points.setRect(tempRect)
enemies = []
powerUp.setActive(False)
inputs.resetInputs()
scoreSurface = fontObj.render('Score: ', True, BLACK, WHITE)
scoreNumSurface = fontObj.render(str(player.getScore()), True, BLACK, WHITE)
textScoreNumRect = scoreNumSurface.get_rect()
textScoreRect = scoreSurface.get_rect()
textScoreNumRect.right = SCREEN_WIDTH
textScoreNumRect.top = 0
textScoreRect.right = textScoreNumRect.left
textScoreRect.top = 0
numOfTicks = 0
state = PLAYGAME
if state == PLAYGAME:
#Interprets the inputs from the player and updates accordingly
if inputs.getQuit():
endGame = True
elif inputs.getUpKeyPressed() and not player.getMovingVertically():
player.moveUp(DISTANCEPERMOVE)
elif inputs.getDownKeyPressed() and not player.getMovingVertically():
player.moveDown(DISTANCEPERMOVE)
elif inputs.getLeftKeyPressed() and not player.getMovingHorizontally():
player.moveLeft(DISTANCEPERMOVE)
elif inputs.getRightKeyPressed() and not player.getMovingHorizontally():
player.moveRight(DISTANCEPERMOVE)
#This loop checks the player position against the position of all enemies to check for collisions
#If a collision is detected, the state is set to the gameOver state
counter = 0
while counter < len(enemies):
if player.getRect().colliderect(enemies[counter].getRect()) == 1:
state = GAMEOVER
counter += 1
#Checks if there is a collision between the player and the point
#If there is a collision, the point moves somewhere else, a point is added to the player's score and a new enemy is created
if player.getRect().colliderect(points.getRect()) == 1:
player.addPoints(1)
tempRect = newRectNotAtObject(player.getRect(), POINT_WIDTH, POINT_HEIGHT)
points.setRect(tempRect)
tempRect = newRectNotAtObject(player.getRect(), ENEMY_WIDTH, ENEMY_HEIGHT)
enemies.append(FirstEnemy.Enemy(tempRect, random.randint(1,2)))
#Checks if there is a collision between the player and the powerup object
#If there is a collision and the power up is shown on the screen, the player or enemies get adjusted based on the type of powerup
if player.getRect().colliderect(powerUp.getRect()) == 1 and powerUp.getActive():
powerUp.setActive(False)
if powerUp.getType() == SPEED_UP:
player.setPowerUp(powerUp.getType(), POWERUP_DURATION)
elif powerUp.getType() == SHRINK_ENEMIES:
counter = 0
while counter < len(enemies):
enemies[counter].setPowerUp(powerUp.getType(), POWERUP_DURATION)
counter += 1
#If the powerup is not shown on the screen and it is time for it to be active
#It generates a new location and type for the powerup and sets it to be active
if numOfTicks % POWERUP_RESPAWN_TICKS == 0 and not powerUp.getActive():
powerUp.setActive(True)
powerUp.setRect(newRectNotAtObject(player.getRect(), POWERUP_WIDTH, POWERUP_HEIGHT))
randomNum = random.randint(1, 2) #Because all of the powerup types are numbers between 1 to 2
while randomNum == last2Types[0] and randomNum == last2Types[1]: #If randomNum is equal to the last 2 numbers generated, pick a new one
randomNum = random.randint(1, 2)
powerUp.setType(randomNum)
last2Types[0] = last2Types[1]
last2Types[1] = randomNum
#Recalulates the score and text object required and position of the text objects and renders them
scoreNumSurface = fontObj.render(str(player.getScore()), True, BLACK, WHITE)
textScoreNumRect = scoreNumSurface.get_rect()
textScoreRect = scoreSurface.get_rect()
textScoreNumRect.right = SCREEN_WIDTH
textScoreNumRect.top = 0
textScoreRect.right = textScoreNumRect.left
textScoreRect.top = 0
if state == GAMEOVER:
#Waits until the user presses the quit button
if inputs.getQuit():
endGame = True
#Checks if the user clicked any of the buttons
counter = 0
buttonClicked = False
while counter < len(inputs.getMouseEvents()) and not buttonClicked:
mouseX, mouseY = inputs.getMouseEvents()[counter].pos
if mouseX >= gameOverResetButton.getArea().left and mouseX <= gameOverResetButton.getArea().right and mouseY >= gameOverResetButton.getArea().top and mouseX <= gameOverResetButton.getArea().bottom:
buttonClicked = True
buttonSelected = GAMEOVERRESTART
if mouseX >= gameOverGoToMenuButton.getArea().left and mouseX <= gameOverGoToMenuButton.getArea().right and mouseY >= gameOverGoToMenuButton.getArea().top and mouseX <= gameOverGoToMenuButton.getArea().bottom:
buttonClicked = True
buttonSelected = GAMEOVERGOTOMENU
counter += 1
#If they did, deals accordingly
if buttonClicked:
if buttonSelected == GAMEOVERRESTART:
state = GAMEINIT
if buttonSelected == GAMEOVERGOTOMENU:
state = MAINMENU
#Renders and calculates the position of the game over text
scoreNumSurface = gameOverScoreFont.render(str(player.getScore()), True, BLACK, WHITE)
scoreSurface = gameOverScoreFont.render('Score: ', True, BLACK, WHITE)
textScoreNumRect = scoreNumSurface.get_rect()
textScoreRect = scoreSurface.get_rect()
textScoreNumRect.right = (textScoreRect.width + textScoreNumRect.width + SCREEN_WIDTH)/2
textScoreNumRect.top = SCREEN_HEIGHT/ 2
textScoreRect.right = textScoreNumRect.left
textScoreRect.top = SCREEN_HEIGHT/ 2
#Renders and positions the button text
gameOverResetButtonTextSurface = buttonFontObject.render(gameOverResetButton.getMessage(), True, BLACK, gameOverResetButton.getColour())
gameOverResetButtonTextRect = gameOverResetButtonTextSurface.get_rect()
gameOverResetButtonTextRect.center = gameOverResetButton.getArea().center
gameOverGoToMenuButtonTextSurface = buttonFontObject.render(gameOverGoToMenuButton.getMessage(), True, BLACK, gameOverGoToMenuButton.getColour())
gameOverGoToMenuButtonTextRect = gameOverGoToMenuButtonTextSurface.get_rect()
gameOverGoToMenuButtonTextRect.center = gameOverGoToMenuButton.getArea().center
if state == MAINMENU:
#Draws the button and renders and draws the text inside of the button for all of the main menu buttons
counter = 0
while counter < len(mainMenuButtons):
pygame.draw.rect(DISPLAYSURF, mainMenuButtons[counter].getColour(), mainMenuButtons[counter].getArea())
textSurface = buttonFontObject.render(mainMenuButtons[counter].getMessage(), True, BLACK, mainMenuButtons[counter].getColour())
textRect = textSurface.get_rect()
textRect.center = mainMenuButtons[counter].getArea().center
DISPLAYSURF.blit(textSurface, textRect)
counter += 1
#Draws the title onto the screen
DISPLAYSURF.blit(titleFirstLineSurface, titleFirstLineRect)
DISPLAYSURF.blit(titleSecondLineSurface, titleSecondLineRect)
if state == CONTROLS:
#Draws the button and draws the text inside the button
pygame.draw.rect(DISPLAYSURF, controlsGoToMenuButton.getColour(), controlsGoToMenuButton.getArea())
DISPLAYSURF.blit(controlsGoToMenuButtonTextSurface, controlsGoToMenuButtonTextRect)
#Draws the controls/instructions image onto the screen
DISPLAYSURF.blit(controlsImage, (20,20))
if state == PLAYGAME:
#If the powerup is active, it draws it to the screen
if powerUp.getActive():
pygame.draw.rect(DISPLAYSURF, powerUpColours[powerUp.getType()], powerUp.getRect())
#Draws the point object to the screen
pygame.draw.rect(DISPLAYSURF, BROWN, points.getRect())
#Draws all of the enemies to the screen
counter = 0
while counter < len(enemies):
pygame.draw.rect(DISPLAYSURF, PURPLE, enemies[counter].update())
counter += 1
#Draws the player to the screen
pygame.draw.rect(DISPLAYSURF, RED, player.update())
#Draws the text objects to the screen
DISPLAYSURF.blit(scoreSurface, textScoreRect)
DISPLAYSURF.blit(scoreNumSurface, textScoreNumRect)
if state == GAMEOVER:
#Draws the text objects to the screen
DISPLAYSURF.blit(gameOverSurface, gameOverRect)
DISPLAYSURF.blit(scoreSurface, textScoreRect)
DISPLAYSURF.blit(scoreNumSurface, textScoreNumRect)
pygame.draw.rect(DISPLAYSURF, gameOverResetButton.getColour(), gameOverResetButton.getArea())
DISPLAYSURF.blit(gameOverResetButtonTextSurface, gameOverResetButtonTextRect)
pygame.draw.rect(DISPLAYSURF, gameOverGoToMenuButton.getColour(), gameOverGoToMenuButton.getArea())
DISPLAYSURF.blit(gameOverGoToMenuButtonTextSurface, gameOverGoToMenuButtonTextRect)
#Updates the screen
pygame.display.update()
#Delays the loop to have create 30 frames per second
FPSCLOCK.tick(FPS)
#Increments the number of times looped since the start of the game
numOfTicks += 1
#Exits pygame
pygame.quit()
pass
#This function makes sure that the object that is being created or moved is not at the same position as the rectangle to avoid
def newRectNotAtObject(avoidRect, createWidth, createHeight):
newRectNotAtPlayer = False
while not newRectNotAtPlayer:
newRect = pygame.Rect((random.randint(0, (SCREEN_WIDTH - createWidth)), random.randint(0, (SCREEN_HEIGHT - createHeight)), createWidth, createHeight))
if avoidRect.colliderect(newRect) != 1:
if abs((newRect.centerx + newRect.width/2) - (avoidRect.centerx + avoidRect.width/2)) > MIN_X_DISTANCE and abs((newRect.centery + newRect.height/2) - (avoidRect.centery + avoidRect.height/2)) > MIN_Y_DISTANCE:
newRectNotAtPlayer = True
return newRect
if __name__ == '__main__':
main()
|
default_app_config = 'teams.apps.TeamsAppConfig'
|
# coding=UTF-8
# 敏感操作Token验证
import random
from hashlib import sha256
# 验证集合
class Verify():
def __init__(self):
super().__init__()
def GetSubToken(self): # 生成注册用Token
num = random.randint(0, 100)
Token = sha256(str(num).encode('utf-8')).hexdigest()
return Token
def EmailCheckToken(self): # 生成邮件激活Token
Token = []
O_Token = ''
i = 1
bit = 6 # 验证码位数
while i <= bit: # 生成验证码数组
num = str(random.randint(0, 9))
lower = chr(random.randint(97, 122)) # 小写字母a~z
upper = chr(random.randint(65, 90)) # 大写字母A~Z
output = random.choice([num, lower, upper])
Token.append(output)
i += 1
for i in Token: # 转换为验证码字符串
O_Token += str(i)
# print(O_Token)
return O_Token
|
from xml.dom.minidom import parse
import xml.dom.minidom
DOMTree = xml.dom.minidom.parse("compendiums/movies.xml")
collection = DOMTree.documentElement
if collection.hasAttribute("shelf"):
print ("Root element : %s" % collection.getAttribute("shelf"))
movies = collection.getElementsByTagName("movie")
for movie in movies:
print ("*****Movie*****")
if movie.hasAttribute("title"):
print ("Title: %s" % movie.getAttribute("title"))
type = movie.getElementsByTagName('type')[0]
print ("Type: %s" % type.childNodes[0].data)
format = movie.getElementsByTagName('format')[0]
print ("Format: %s" % format.childNodes[0].data)
rating = movie.getElementsByTagName('rating')[0]
print ("Rating: %s" % rating.childNodes[0].data)
description = movie.getElementsByTagName('description')[0]
print ("Description: %s" % description.childNodes[0].data)
print("\n"*3)
DOMTree = xml.dom.minidom.parse("compendiums/Archmage")
collection = DOMTree.documentElement
if collection.hasAttribute("version"):
print ("Version is : %s" % collection.getAttribute("version"))
monsters = collection.getElementsByTagName("monster")
# actions = monsters.getElementsByTagName("action")
for monster in monsters:
print ("*****Monster*****")
if monster.hasAttribute("name"):
print ("Name: %s" % monster.getAttribute("name"))
type = monster.getElementsByTagName('type')[0]
print ("Type: %s" % type.childNodes[0].data)
hp = monster.getElementsByTagName('hp')[0]
print ("HP: %s" % hp.childNodes[0].data)
ac = monster.getElementsByTagName('ac')[0]
print ("AC: %s" % ac.childNodes[0].data)
alignment = monster.getElementsByTagName('alignment')[0]
print ("Alignment: %s" % alignment.childNodes[0].data)
action = monster.getElementsByTagName('action')[0]
print ("Action: %s" % action.childNodes[0].data)
# parse an xml file by name
mydoc = xml.dom.minidom.parse('compendiums/items.xml')
items = mydoc.getElementsByTagName('item')
# one specific item attribute
print('Item #2 attribute:')
print(items[1].attributes['name'].value)
# all item attributes
print('\nAll attributes:')
for elem in items:
print(elem.attributes['name'].value)
# one specific item's data
print('\nItem #2 data:')
print(items[1].firstChild.data)
print(items[1].childNodes[0].data)
# all items data
print('\nAll item data:')
for elem in items:
print(elem.firstChild.data)
print("\n"*2)
# try this out on the archmage xml
compendium = xml.dom.minidom.parse("compendiums/Archmage")
monsters = compendium.getElementsByTagName('monster')
# one specific item attribute
print(f"Monster 1 version: {monsters[0].attributes['version'].value}")
# all item attributes
print("All version attributes")
for elem in monsters:
print(elem.attributes['version'].value)
# one specific item's data
print(monsters[0].firstChild.data)
print(monsters[0].childNodes[0].data)
print(monsters[0].childNodes[1].data)
print(monsters[0].childNodes[2].data)
print(monsters[0].childNodes[3].data)
print(monsters[0].childNodes[4].data)
print(monsters[0].childNodes[5].data)
|
class Node:
def __init__(self, _value, _pos):
self.value = _value
self.pos = _pos
def __cmp__(self, other):
if self.value == other.value:
return self.pos - other.pos
return self.value - other.value
class Solution:
"""
@param nums: A list of integers
@return: A list of integers includes the index of the first number
and the index of the last number
"""
def subarraySumClosest(self, nums):
# write your code here
s = []
s.append(Node(0, -1))
sum = 0
for x in range(len(nums)):
sum += nums[x]
s.append(Node(sum, x))
print(sum, end= " ")
print("")
s = sorted(s,key=lambda node:node.value)
#print("s is ",s)
#s.sort()
results= [0,0]
ans = 1000000000000
for i in range(len(s)-1):
if s[i+1].value - s[i].value < ans or \
s[i+1].value - s[i].value == ans and \
min(s[i+1].pos, s[i].pos) + 1 < results[0]:
print("results is ", results, "min pos is ",min(s[i+1].pos, s[i].pos) + 1)
ans = s[i+1].value - s[i].value
results[0] = min(s[i+1].pos, s[i].pos) + 1
results[1] = max(s[i+1].pos, s[i].pos)
return results
data = [-3, 1, 1, -3, 5,-2]
print("data is ",data)
mySol= Solution()
res = mySol.subarraySumClosest(data)
print("res is ",res)
|
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
pub = rospy.Publisher('/mobile_base/commands/velocity', Twist, queue_size=10)
def setup():
rospy.Subscriber('kobuki_command', Twist, send)
rospy.init_node('constant_command', anonymous=True)
def send(data):
command = Twist()
command.linear.x = data.linear.x
command.angular.z= data.angular.z
pub.publish(command)
if __name__ =='__main__':
try:
setup()
except rospy.ROSInterruptException:
pass
|
# Generated by Django 2.2.12 on 2020-06-02 10:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('admin', '0015_rename_view_permissions'),
]
operations = [
migrations.AddField(
model_name='domain',
name='message_limit',
field=models.PositiveIntegerField(blank=True, help_text='Number of messages this domain can send per day', null=True, verbose_name='Message sending limit'),
),
migrations.AddField(
model_name='mailbox',
name='message_limit',
field=models.PositiveIntegerField(blank=True, help_text='Number of messages this mailbox can send per day', null=True, verbose_name='Message sending limit'),
),
]
|
'''
Created on Dec 2, 2015
@author: ams889
This module contains the functions used for the assignment.
'''
import pandas as pd
from userDefinedErrorHandling import *
import sys
import matplotlib.pyplot as plt
def dataLoad():
'''
This function loads the csv from the URL used to access the data. It also prints a
wait message as loading the data dynamically typically takes about 30 seconds
'''
print("Please wait, data loading (may take up to a minute)...")
try:
df = pd.DataFrame(pd.read_csv('https://data.cityofnewyork.us/api/views/xx67-kt59/rows.csv?accessType=DOWNLOAD'))
print("Data loaded!")
return df
except IOError:
print('Data could not be accessed, program closing')
sys.exit(1)
def dataClean(df):
'''
This function cleans the grade column removing pending grades, missing grades
and other non-letter grades as well as the boro column
'''
gradeMask = df.GRADE.isin(['A', 'B', 'C'])
df = df.loc[gradeMask,:]
boroughs=["MANHATTAN", "BROOKLYN", "QUEENS", "BRONX", "STATEN ISLAND"]
gradeMask = df.BORO.isin(boroughs)
df = df.loc[gradeMask,:]
df=df.dropna(subset=["GRADE DATE"])
return df
def test_grades(grade_list):
'''
This function returns 1, 0 or -1 if the grade improves, remains the same,
or decreases (respectively). I chose to use only the first and last letter as
initially my code iterated through all changes (A to C = -2, A to B = -1 and similarly
C to A = +2, etc.) and then returned the net, but this is the same as taking the
first and the last and since we treat any decrease as '-1' we don't care what
the overall difference is, just that there was a decrease (and vise versa for an increase)
'''
if len(grade_list)==0:
raise grade_listFormatError('Grade list must contain at least one value')
if grade_list[-1] > grade_list[0]:
return -1
elif grade_list[-1] < grade_list[0]:
return 1
else:
return 0
def gradeOverTimePlot(data,boro):
'''
This function plots the grades by month per each borough or for
the entire NYC dataset (depending on input)
'''
if boro=="ALL":
boro="NYC"
dfGrades=data
else:
dfGrades=data[data["BORO"]==boro]
dfGrades=dfGrades.reset_index()
dfGrades['YEARMON']=0
for i in range(len(dfGrades)):
dfGrades['YEARMON'][i]=int(str(dfGrades["GRADE DATE"][i][6:10])+str(dfGrades["GRADE DATE"][i][0:2]))
df1=dfGrades[["GRADE", "YEARMON"]]
df1=df1.sort(['YEARMON'], ascending=1)
output=df1.groupby(["YEARMON", "GRADE"]).GRADE.count().unstack("GRADE")
output=output.fillna(0)
fig, ax = plt.subplots()
fig.canvas.draw()
labelData=df1["YEARMON"] #To be used for labeling the x-ticks
labelData=labelData.drop_duplicates()
labelData=labelData.reset_index()
labelData['MonthYear']=0
for i in range(len(labelData)):
labelData['MonthYear'][i]=(str(labelData["YEARMON"][i])[4:6]+"-"+str(labelData["YEARMON"][i])[0:4])
labels = [labelData['MonthYear'][0], labelData['MonthYear'][10], labelData['MonthYear'][20], labelData['MonthYear'][30], labelData['MonthYear'][40], labelData['MonthYear'][len(labelData)-1]]
ax.set_xticklabels(labels)
plt.xlim(0, len(labelData)-1) #To keep the scale reasonable for each borough
plt.title('Grade Improvement in '+boro+" over time.")
plt.xlabel('Date')
plt.ylabel('Number of Restaurants')
plt.plot(output["A"], label="A")
plt.plot(output["B"], label="B")
plt.plot(output["C"], label="C")
plt.legend(loc="upper left")
plt.savefig('grade_improvement_'+str(boro)+'.pdf')
plt.close()
|
import re
def riddle1():
raise NotImplementedError()
def riddle2():
raise NotImplementedError()
def riddle3():
raise NotImplementedError()
def riddle4():
raise NotImplementedError()
def riddle5():
raise NotImplementedError()
def riddle6():
raise NotImplementedError()
def riddle7():
raise NotImplementedError()
def riddle8():
raise NotImplementedError()
def riddle9():
raise NotImplementedError()
def riddle10():
raise NotImplementedError()
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
"""KafkaProducer class
Produce messages and send them in a Kafka topic.
"""
import asyncio
from logging import (getLogger, Logger)
from typing import Union, List, Dict, Awaitable
from aiokafka.errors import KafkaError, KafkaTimeoutError
from aiokafka.producer import AIOKafkaProducer
from aiokafka.producer.message_accumulator import BatchBuilder
from aiokafka.producer.producer import TransactionContext
from tonga.models.records.base import BaseRecord
from tonga.models.store.store_record import StoreRecord
from tonga.models.structs.positioning import (BasePositioning, KafkaPositioning)
from tonga.services.coordinator.client.kafka_client import KafkaClient
from tonga.services.coordinator.partitioner.base import BasePartitioner
from tonga.services.errors import BadSerializer
from tonga.services.producer.base import BaseProducer
from tonga.services.producer.errors import AioKafkaProducerBadParams
from tonga.services.producer.errors import FailToSendBatch
from tonga.services.producer.errors import FailToSendEvent
from tonga.services.producer.errors import KafkaProducerAlreadyStartedError
from tonga.services.producer.errors import KafkaProducerError
from tonga.services.producer.errors import KafkaProducerNotStartedError
from tonga.services.producer.errors import KafkaProducerTimeoutError
from tonga.services.producer.errors import KeyErrorSendEvent
from tonga.services.producer.errors import ProducerConnectionError
from tonga.services.producer.errors import TypeErrorSendEvent
from tonga.services.producer.errors import UnknownEventBase
from tonga.services.producer.errors import ValueErrorSendEvent
from tonga.services.serializer.base import BaseSerializer
from tonga.services.serializer.kafka_key import KafkaKeySerializer
__all__ = [
'KafkaProducer',
]
class KafkaProducer(BaseProducer):
"""
KafkaProducer Class, this class make bridge between AioKafkaProducer an tonga
Attributes:
logger (Logger): Python logger
serializer (BaseSerializer): Serializer encode & decode event
_bootstrap_servers (Union[str, List[str]): ‘host[:port]’ string (or list of ‘host[:port]’ strings) that
the consumer should contact to bootstrap initial cluster metadata
_client_id (str): A name for this client. This string is passed in each request to servers and can be used
to identify specific server-side log entries that correspond to this client
_acks (Union[int, str]): The number of acknowledgments the producer requires the leader to have
received before considering a request complete. Possible value (0 / 1 / all)
_running (bool): Is running flag
_transactional_id (str): Id for make transactional process
_kafka_producer (AIOKafkaProducer): AioKafkaProducer for more information go to
_loop (AbstractEventLoop): Asyncio loop
"""
logger: Logger
serializer: BaseSerializer
_client: KafkaClient
_bootstrap_servers: Union[str, List[str]]
_client_id: str
_acks: Union[int, str]
_running: bool
_transactional_id: str
_kafka_producer: AIOKafkaProducer
_loop: asyncio.AbstractEventLoop
def __init__(self, client: KafkaClient, serializer: BaseSerializer, loop: asyncio.AbstractEventLoop,
partitioner: BasePartitioner, client_id: str = None, acks: Union[int, str] = 1,
transactional_id: str = None) -> None:
"""
KafkaProducer constructor
Args:
client (KafkaClient): Initialization class (contains, client_id / bootstraps_server)
serializer (BaseSerializer): Serializer encode & decode event
acks (Union[int, str]): The number of acknowledgments the producer requires the leader to have
received before considering a request complete. Possible value (0 / 1 / all)
client_id (str): Client name (if is none, KafkaConsumer use KafkaClient client_id)
transactional_id: Id for make transactional process
Raises:
AioKafkaProducerBadParams: raised when producer was call with bad params
KafkaProducerError: raised when some generic error was raised form Aiokafka
Returns:
None
"""
super().__init__()
self.logger = getLogger('tonga')
self._client = client
# Create client_id
if client_id is None:
self._client_id = self._client.client_id + '-' + str(self._client.cur_instance)
else:
self._client_id = client_id
self._bootstrap_servers = self._client.bootstrap_servers
self._acks = acks
if isinstance(serializer, BaseSerializer):
self.serializer = serializer
else:
raise BadSerializer
self._transactional_id = transactional_id
self._running = False
self._loop = loop
try:
self._kafka_producer = AIOKafkaProducer(loop=self._loop, bootstrap_servers=self._bootstrap_servers,
client_id=self._client_id, acks=self._acks,
value_serializer=self.serializer.encode,
transactional_id=self._transactional_id,
key_serializer=KafkaKeySerializer.encode,
partitioner=partitioner)
except ValueError as err:
self.logger.exception('%s', err.__str__())
raise AioKafkaProducerBadParams
except KafkaError as err:
self.logger.exception('%s', err.__str__())
raise KafkaProducerError
self.logger.debug('Create new producer %s', self._client_id)
async def start_producer(self) -> None:
"""
Start producer
Raises:
KafkaProducerAlreadyStartedError: raised when producer was already started
ProducerConnectionError: raised when producer can't connect to broker
KafkaError: raised when catch KafkaError
Returns:
None
"""
if self._running:
raise KafkaProducerAlreadyStartedError
for retry in range(2):
try:
await self._kafka_producer.start()
self._running = True
self.logger.debug('Start producer : %s', self._client_id)
except KafkaTimeoutError as err:
self.logger.exception('retry: %s, err: %s', retry, err.__str__())
await asyncio.sleep(1)
except ConnectionError as err:
self.logger.exception('retry: %s, err: %s', retry, err.__str__())
await asyncio.sleep(1)
except KafkaError as err:
self.logger.exception('retry: %s, err: %s', retry, err.__str__())
raise err
else:
break
else:
raise ProducerConnectionError
async def stop_producer(self) -> None:
"""
Stop producer
Raises:
KafkaProducerNotStartedError: raised when producer was not started
KafkaProducerTimeoutError: raised when producer timeout on broker
KafkaError: raised when catch KafkaError
Returns:
None
"""
if not self._running:
raise KafkaProducerNotStartedError
try:
await self._kafka_producer.stop()
self._running = False
self.logger.debug('Stop producer : %s', self._client_id)
except KafkaTimeoutError as err:
self.logger.exception('%s', err.__str__())
raise KafkaProducerTimeoutError
except KafkaError as err:
self.logger.exception('%s', err.__str__())
raise err
def is_running(self) -> bool:
"""
Get is running
Returns:
bool: running
"""
return self._running
# Transaction sugar function
def init_transaction(self) -> TransactionContext:
"""
Inits transaction
Returns:
TransactionContext: Aiokafka TransactionContext
"""
return self._kafka_producer.transaction()
async def end_transaction(self, committed_offsets: Dict[str, BasePositioning], group_id: str) -> None:
"""
Ends transaction
Args:
committed_offsets (Dict[str, BasePositioning]): Committed offsets during transaction
group_id (str): Group_id to commit
Returns:
None
"""
kafka_committed_offsets = dict()
for key, positioning in committed_offsets.items():
kafka_committed_offsets[positioning.to_topics_partition()] = positioning.get_current_offset()
await self._kafka_producer.send_offsets_to_transaction(kafka_committed_offsets, group_id)
async def send_and_wait(self, msg: Union[BaseRecord, StoreRecord], topic: str) -> BasePositioning:
"""
Send a message and await an acknowledgments
Args:
msg (BaseRecord): Event to send in Kafka, inherit form BaseRecord
topic (str): Topic name to send massage
Raises:
KeyErrorSendEvent: raised when KeyError was raised
ValueErrorSendEvent: raised when ValueError was raised
TypeErrorSendEvent: raised when TypeError was raised
KafkaError: raised when catch KafkaError
FailToSendEvent: raised when producer fail to send event
Returns:
None
"""
if not self._running:
await self.start_producer()
for retry in range(4):
try:
if isinstance(msg, BaseRecord):
self.logger.debug('Send record %s', msg.to_dict())
record_metadata = await self._kafka_producer.send_and_wait(topic=topic, value=msg,
key=msg.partition_key)
elif isinstance(msg, StoreRecord):
self.logger.debug('Send store record %s', msg.to_dict())
record_metadata = await self._kafka_producer.send_and_wait(topic=topic, value=msg,
key=msg.key)
else:
self.logger.error('Fail to send msg %s', msg.event_name())
raise UnknownEventBase
except KafkaTimeoutError as err:
self.logger.exception('retry: %s, err: %s', retry, err.__str__())
await asyncio.sleep(1)
except KeyError as err:
self.logger.exception('retry: %s, err: %s', retry, err.__str__())
raise KeyErrorSendEvent
except ValueError as err:
self.logger.exception('retry: %s, err: %s', retry, err.__str__())
raise ValueErrorSendEvent
except TypeError as err:
self.logger.exception('retry: %s, err: %s', retry, err.__str__())
raise TypeErrorSendEvent
except KafkaError as err:
self.logger.exception('retry: %s, err: %s', retry, err.__str__())
raise err
else:
return KafkaPositioning(record_metadata.topic, record_metadata.partition, record_metadata.offset)
else:
raise FailToSendEvent
async def send(self, msg: Union[BaseRecord, StoreRecord], topic: str) -> Awaitable:
"""
Send a message and await an acknowledgments
Args:
msg (BaseRecord): Event to send in Kafka, inherit form BaseRecord
topic (str): Topic name to send massage
Raises:
KeyErrorSendEvent: raised when KeyError was raised
ValueErrorSendEvent: raised when ValueError was raised
TypeErrorSendEvent: raised when TypeError was raised
KafkaError: raised when catch KafkaError
FailToSendEvent: raised when producer fail to send event
Returns:
None
"""
if not self._running:
await self.start_producer()
for retry in range(4):
try:
if isinstance(msg, BaseRecord):
self.logger.debug('Send record %s', msg.to_dict())
record_promise = self._kafka_producer.send(topic=topic, value=msg, key=msg.partition_key)
elif isinstance(msg, StoreRecord):
self.logger.debug('Send store record %s', msg.to_dict())
record_promise = self._kafka_producer.send(topic=topic, value=msg, key=msg.key)
else:
raise UnknownEventBase
except KafkaTimeoutError as err:
self.logger.exception('retry: %s, err: %s', retry, err.__str__())
await asyncio.sleep(1)
except KeyError as err:
self.logger.exception('retry: %s, err: %s', retry, err.__str__())
raise KeyErrorSendEvent
except ValueError as err:
self.logger.exception('retry: %s, err: %s', retry, err.__str__())
raise ValueErrorSendEvent
except TypeError as err:
self.logger.exception('retry: %s, err: %s', retry, err.__str__())
raise TypeErrorSendEvent
except KafkaError as err:
self.logger.exception('retry: %s, err: %s', retry, err.__str__())
raise err
else:
return record_promise
else:
raise FailToSendEvent
async def create_batch(self) -> BatchBuilder:
"""
Creates an empty batch
Returns:
BatchBuilder: Empty batch
"""
if not self._running:
await self.start_producer()
self.logger.debug('Create batch')
return self._kafka_producer.create_batch()
async def send_batch(self, batch: BatchBuilder, topic: str, partition: int = 0) -> None:
"""
Sends batch
Args:
batch (BatchBuilder): BatchBuilder
topic (str): Topic name
partition (int): Partition number
Raises:
KeyErrorSendEvent: raised when KeyError was raised
ValueErrorSendEvent: raised when ValueError was raised
TypeErrorSendEvent: raised when TypeError was raised
KafkaError: raised when catch KafkaError
FailToSendBatch: raised when producer fail to send batch
Returns:
None
"""
if not self._running:
await self.start_producer()
for retry in range(4):
try:
self.logger.debug('Send batch')
await self._kafka_producer.send_batch(batch=batch, topic=topic, partition=partition)
except KafkaTimeoutError as err:
self.logger.exception('retry: %s, err: %s', retry, err.__str__())
await asyncio.sleep(1)
except KeyError as err:
self.logger.exception('retry: %s, err: %s', retry, err.__str__())
raise KeyErrorSendEvent
except ValueError as err:
self.logger.exception('retry: %s, err: %s', retry, err.__str__())
raise ValueErrorSendEvent
except TypeError as err:
self.logger.exception('retry: %s, err: %s', retry, err.__str__())
raise TypeErrorSendEvent
except KafkaError as err:
self.logger.exception('retry: %s, err: %s', retry, err.__str__())
raise err
else:
break
else:
raise FailToSendBatch
async def partitions_by_topic(self, topic: str) -> List[int]:
"""
Get partitions by topic name
Args:
topic (str): topic name
Returns:
List[int]: list of partitions
"""
if not self._running:
await self.start_producer()
try:
self.logger.debug('Get partitions by topic')
partitions = await self._kafka_producer.partitions_for(topic)
except KafkaTimeoutError as err:
self.logger.exception('%s', err.__str__())
raise KafkaProducerTimeoutError
except KafkaError as err:
self.logger.exception('%s', err.__str__())
raise err
return partitions
def get_producer(self) -> AIOKafkaProducer:
"""
Get kafka producer
Returns:
AIOKafkaProducer: AioKafkaProducer instance
"""
return self._kafka_producer
|
from betterhandler import *
import cgi
from google.appengine.ext import webapp
from google.appengine.ext import db
from google.appengine.api import users
from google.appengine.ext.webapp import template
from models import *
import os
import wsgiref.handlers
class MainPage(BetterHandler):
def get(self):
lyrics = db.GqlQuery("SELECT * FROM Lyric ORDER BY date DESC LIMIT 10")
for_template = {
'lyrics': lyrics,
}
self.response.out.write(template.render(self.template_path('index.html'), self.template_values(for_template)))
class NewLyric(BetterHandler):
def get(self):
self.response.out.write(template.render(self.template_path('new_lyric.html'), self.template_values()))
def post(self):
body = cgi.escape(self.request.get('body'))
song = cgi.escape(self.request.get('song'))
artist = cgi.escape(self.request.get('artist'))
album = cgi.escape(self.request.get('album'))
ASIN = cgi.escape(self.request.get('ASIN'))
lyric = Lyric()
lyric.user = users.get_current_user()
lyric.body = unicode(body)
lyric.song = unicode(song)
lyric.artist = unicode(artist)
lyric.album = unicode(album)
lyric.ASIN = unicode(ASIN)
lyric.put()
lyric.key_id = lyric.key().id()
lyric.put()
for_template = {
'lyric': lyric,
}
# @todo set some kind of flash variable here to show update message on next page
self.redirect("/lyric?id=%s" % lyric.key_id)
class ALyric(BetterHandler):
def get(self):
key_id = int(cgi.escape(self.request.get('id')))
lyric = Lyric.get_by_id(key_id)
if lyric == None:
self.redirect("/")
for_template = {
'lyric': lyric,
}
self.response.out.write(template.render(self.template_path('single_lyric.html'), self.template_values(for_template)))
class EditLyric(BetterHandler):
def get(self):
key_id = int(cgi.escape(self.request.get('id')))
lyric = Lyric.get_by_id(key_id)
if (lyric == None) or ( cmp(lyric.user, users.get_current_user()) != 0 ):
self.redirect("/")
else :
for_template = {
'lyric': lyric,
}
self.response.out.write(template.render(self.template_path('edit_lyric.html'), self.template_values(for_template)))
def post(self):
key_id = int(cgi.escape(self.request.get('id')))
body = cgi.escape(self.request.get('body'))
song = cgi.escape(self.request.get('song'))
artist = cgi.escape(self.request.get('artist'))
album = cgi.escape(self.request.get('album'))
ASIN = cgi.escape(self.request.get('ASIN'))
lyric = Lyric.get_by_id(key_id)
lyric.user = users.get_current_user()
lyric.body = unicode(body)
lyric.song = unicode(song)
lyric.artist = unicode(artist)
lyric.album = unicode(album)
lyric.ASIN = unicode(ASIN)
lyric.put()
for_template = {
'lyric': lyric,
}
# @todo set some kind of flash variable here to show update message on next page
self.response.out.write(template.render(self.template_path('single_lyric.html'), self.template_values(for_template)))
class DeleteLyric(BetterHandler):
def get(self):
key_id = int(cgi.escape(self.request.get('id')))
lyric = Lyric.get_by_id(key_id)
if (lyric == None) or ( cmp(lyric.user, users.get_current_user()) != 0 ):
self.redirect("/")
lyric.delete()
# @todo redirect to a referrer in a safe way
# @todo set some kind of flash variable here to show update message on next page
self.redirect("/")
class Artist(BetterHandler):
def get(self):
artist = cgi.escape(self.request.get('name'))
lyrics = db.GqlQuery("SELECT * FROM Lyric WHERE artist = :1 ORDER BY date DESC", artist)
if lyrics.count() < 1:
lyrics = None;
for_template = {
'artist': artist,
'lyrics': lyrics,
}
self.response.out.write(template.render(self.template_path('artist.html'), self.template_values(for_template)))
def main():
application = webapp.WSGIApplication([
('/', MainPage),
('/lyric', ALyric),
('/lyric/new', NewLyric),
('/lyric/edit', EditLyric),
('/lyric/delete', DeleteLyric),
('/artist', Artist),
],
debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
import os, sys
import pickle
import numpy as np
import hdf5_to_dict as io
avgs = []
for fname in sys.argv[1:-1]:
print("Loading {}".format(fname))
avgs.append(pickle.load(open(fname, "rb")))
avgs[-1]['fname'] = fname
#for avg in avgs:
# print("Name: {}, contents: {}".format(avg['fname'], avg.keys()))
num_keys = [len(avg.keys()) for avg in avgs]
avg_max_keys = num_keys.index(max(num_keys))
# TODO organize this damn dict. HDF5?
direct_list = ['fname', 'a', 'gam', 'gam_e', 'gam_p', 'r', 'th', 'th_eh', 'th_bz', 'phi', 'avg_start', 'avg_end', 'avg_w', 't']
keys_to_sum = [key for key in avgs[avg_max_keys].keys() if key not in direct_list]
uni = {}
for key in keys_to_sum:
uni[key] = np.zeros_like(avgs[avg_max_keys][key])
for avg in avgs:
if key in avg:
# Keep track of averages w/weights, otherwise just sum since everything's time-dependent
if (key[-2:] == '_r' or key[-3:] == '_th' or key[-4:] == '_hth' or key[-4:] == '_phi' or
key[-4:] == '_rth' or key[-6:] == '_thphi' or key[-5:] == '_rphi' or key[-4:] == '_pdf'):
uni[key] += avg[key]*avg['avg_w']
elif key[-1:] == 't':
if uni[key].shape[0] < avg[key].shape[0]:
uni[key] += avg[key][:uni[key].shape[0]]
else:
uni[key][:avg[key].shape[0]] += avg[key]
else:
if uni[key].size < avg[key].size:
uni[key] += avg[key][:uni[key].size]
else:
uni[key][:avg[key].size] += avg[key]
for key in direct_list:
if key in avgs[avg_max_keys].keys():
uni[key] = avgs[avg_max_keys][key]
# Add compat/completeness stuff
uni['mdot'] = uni['Mdot']
uni['phi_b'] = uni['Phi_b']/np.sqrt(uni['Mdot'])
# Add the log versions of variables, for completeness/better ffts
if os.path.exists(sys.argv[-1]):
uni['diags'] = io.load_log(sys.argv[-1])
with open("eht_out.p", "wb") as outf:
print("Writing eht_out.p")
pickle.dump(uni, outf)
|
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.shortcuts import render, redirect
from registration.forms import RegistrationForm
def registration(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
username = form.cleaned_data['username']
password = form.clean_password()
User.objects.create_user(username=username, password=password)
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect('/registration/register-complete/')
else:
form = RegistrationForm()
context = {'form': form}
return render(request, 'registration/registration.html', context)
def register_complete(request):
return render(request, 'registration/register_complete.html')
|
#!/usr/bin/env python
# encoding: utf-8
"""
sendmail
Created by Anne Pajon on 2015-03-26.
"""
# email modules
import smtplib
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
# email addresses
ANNE = 'anne.pajon@cruk.cam.ac.uk'
ANNEGMAIL = 'pajanne@gmail.com'
HELPDESK = 'genomics-helpdesk@cruk.cam.ac.uk'
msg = MIMEMultipart()
send_from = HELPDESK
send_to = [ANNE, ANNEGMAIL]
msg['Subject'] = 'Testing Automated Notification'
msg['From'] = HELPDESK
msg['To'] = ','.join(send_to)
msg.attach(MIMEText("""
Testing Automated Notification...
--
Cambridge Institute Genomics Core (CIGC)
genomics-helpdesk@cruk.cam.ac.uk
"""))
#mail = smtplib.SMTP('smtp.cruk.cam.ac.uk')
mail = smtplib.SMTP('mailrelay.cruk.cam.ac.uk')
mail.set_debuglevel(True)
mail.sendmail(send_from, send_to, msg.as_string())
mail.quit()
|
import os
import datetime
import cv2
import numpy as np
import postgresql
from PIL import ImageGrab
import error_log
import db_query
import introduction
IMAGES_FOLDER = "images"
def search_cards(screen_area, deck, list_length, db):
hand = ''
try:
for item in db_query.get_last_screen(screen_area, db):
path = item['image_path']
img_rgb = cv2.imread(path, 0)
for value in deck:
if cv_data_template(value['image_path'], img_rgb) > 0:
hand += value['alias']
if len(hand) == list_length:
return hand
except Exception as e:
error_log.error_log('searchCards', str(e))
print(e)
if len(hand) < 4 and list_length > 2:
print(hand)
hand = '7h2d'
return hand
def check_is_folder_exist():
folder_name = os.path.join(IMAGES_FOLDER, str(datetime.datetime.now().date()))
if not os.path.exists(str(folder_name)):
os.makedirs(str(folder_name))
db = postgresql.open(db_query.connection_string())
data = db.query("select screen_area from screen_coordinates "
"union select screen_area from opponent_screen_coordinates")
for value in data:
if not os.path.exists(str(folder_name) + "/" + str(value['screen_area'])):
os.makedirs(str(folder_name) + "/" + str(value['screen_area']))
def made_screenshot(x_coordinate, y_coordinate, width, height):
image = ImageGrab.grab(bbox=(x_coordinate, y_coordinate, width, height))
return image
def imaging(x_coordinate, y_coordinate, width, height, image_path, screen_area, db):
image = made_screenshot(x_coordinate, y_coordinate, width, height)
image.save(image_path, "PNG")
db_query.insert_image_path_into_db(image_path, screen_area, db)
def search_element(screen_area, elements, folder, db):
for item in elements:
path = db_query.get_last_screen(screen_area, db)
path = path[0]['image_path']
img_rgb = cv2.imread(path, 0)
template_path = folder + item + '.png'
if cv_data_template(template_path, img_rgb):
return True
return False
def search_last_opponent_action(screen_area, db):
element_area = introduction.save_element(screen_area, 'limp_area', db)
path = db_query.get_last_screen(element_area, db)
path = path[0]['image_path']
img_rgb = cv2.imread(path, 0)
for item in db_query.get_actions_buttons(db):
if cv_data_template(item['image_path'], img_rgb) > 0:
return item
return 'push'
def check_is_cbet_available(screen_area, db):
element_area = introduction.save_element(screen_area, 'limp_area', db)
path = db_query.get_last_screen(element_area, db)
path = path[0]['image_path']
img_rgb = cv2.imread(path, 0)
template_path = 'action_buttons/check.png'
if cv_data_template(template_path, img_rgb) > 0:
return True
def convert_hand(hand):
hand = '\'' + hand[0] + hand[1] + '\'' + ',' + '\'' + hand[2] + hand[3] + '\''
return hand
def check_current_hand(screen_area, hand, db):
current_hand = convert_hand(hand)
deck = db_query.get_current_cards(current_hand, db)
if len(search_cards(screen_area, deck, 4, db)) == 4:
return True
else:
return False
def cv_data_template(image_path, img_rgb):
template = cv2.imread(str(image_path), 0)
result = cv2.matchTemplate(img_rgb, template, cv2.TM_CCOEFF_NORMED)
loc = np.where(result >= 0.98)
return len(loc[0])
|
from sqlalchemy import Column, String, Integer, Float
from bd import Base
class Song(Base):
__tablename__ = 'song'
song_id = Column(String, primary_key=True)
title = Column(String)
artist_id = Column(String)
year = Column(Integer)
duration = Column(Float)
def _init_(self,song_id,title,artist_id, year,duration):
self.song_id = song_id
self.title = title
self.last_name = artist_id
self.year = year
self.duration = duration
def __repr__(self):
return "<Song(song_id='{}', title='{}', artist_id='{}', year={}, duration={})>"\
.format(self.song_id, self.title, self.artist_id, self.year,self.duration)
def __eq__(self, otro):
return self.song_id == otro.song_id
def __hash__(self):
return hash((self.song_id))
|
from datetime import datetime
from itertools import groupby
from collections import Counter
#Opening log file
def open_read_file():
infile = r"./input/log.txt"
with open(infile) as log:
log = log.readlines()
return log
log_file = open_read_file()
row_taker = [row.split() for row in log_file]
def print_only_time():
for var in row_taker:
print(var[0])
#print_only_time()
list = row_taker
"""
for k,g in groupby(list, key=lambda item: item[0][0:8]):
print(k)
print('\n'.join(x[0] for x in g) + '\n')
"""
class TimeSelect(object):
def solve(item):
for line in list:
if not line: continue
else:
dt = datetime.strptime(item[0], '%H:%M:%S.%f')
return dt.hour, dt.minute, dt.second
for k, g in groupby(list, key=solve):
outLine = ('\n'.join(x[0][0:8] for x in g) + '\n')
timeSelectedObjects = TimeSelect()
"""
def string_to_datetime(log):
parts = log.split('.')
var_date = datetime.strptime(parts[0], "%H:%M:%S.%f")
return var_date.replace(microsecond=int(parts[1]))
def parsing_keywords(log):
important = []
keep_phrases = ['ComponentA']
parsing_keywords = (line for line in log_file if any(phrase in line for phrase in keep_phrases))
for line in log:
for phrase in keep_phrases:
if phrase in line:
important.append(line)
break
return important
print parsing_keywords(log_file)
def time_cutter(var_date):
time = datetime.strptime(var_date, '%H:%M:%S')
start_timestamp = datetime.strptime(var_date, '%H:%M:%S')
end_timestamp = datetime.strptime(var_date, '%H:%M:%S')
if time >= start_timestamp and time <= end_timestamp: print 'it worked'
"""
|
from CallBackOperator import CallBackOperator
from SignalGenerationPackage.DynamicPointsDensitySignal.DynamicPointsDensityUIParameters import DynamicPointsDensityUIParameters
class PointsDensityCallBackOperator(CallBackOperator):
def __init__(self, model):
super().__init__(model)
# overridden
def ConnectCallBack(self, window):
self.window = window
self.setup_callback_and_synchronize_slider(
validator_min=DynamicPointsDensityUIParameters.PointsDensitySliderMin,
validator_max=DynamicPointsDensityUIParameters.PointsDensitySliderMax,
validator_accuracy=DynamicPointsDensityUIParameters.PointsDensityLineEditAccuracy,
line_edit=window.PointsDensitylineEdit,
slider_min=DynamicPointsDensityUIParameters.PointsDensitySliderMin,
slider_max=DynamicPointsDensityUIParameters.PointsDensitySliderMax,
slider=window.PointsDensityhorizontalSlider,
update_slider_func=self.update_points_density_slider,
update_line_edit_func=self.update_points_density_line_edit
)
def update_points_density_slider(self):
self.update_slider(
line_edit=self.window.PointsDensitylineEdit,
slider=self.window.PointsDensityhorizontalSlider,
calc_constant=DynamicPointsDensityUIParameters.PointsDensityCalcConstant
)
def update_points_density_line_edit(self):
self.update_line_edit(
line_edit=self.window.PointsDensitylineEdit,
slider=self.window.PointsDensityhorizontalSlider,
calc_constant=DynamicPointsDensityUIParameters.PointsDensityCalcConstant,
update_model_func=self.update_vertical_offset
)
def update_vertical_offset(self, val):
self.model.PointsDensity = val
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-21 16:09
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("organisations", "0007_auto_20161021_1609"),
("elections", "0004_electedrole"),
]
operations = [
migrations.CreateModel(
name="Election",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("election_id", models.CharField(blank=True, max_length=100)),
("poll_open_date", models.DateField()),
],
),
migrations.CreateModel(
name="ElectionDivisions",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("seats_contested", models.IntegerField()),
("seats_total", models.IntegerField()),
(
"division",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="organisations.OrganisationDivision",
),
),
(
"election",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="elections.Election",
),
),
],
),
migrations.AddField(
model_name="election",
name="divisions",
field=models.ManyToManyField(
through="elections.ElectionDivisions",
to="organisations.OrganisationDivision",
),
),
migrations.AddField(
model_name="election",
name="election_subtype",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="elections.ElectionSubType",
),
),
migrations.AddField(
model_name="election",
name="election_type",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="elections.ElectionType",
),
),
migrations.AddField(
model_name="election",
name="organisation",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="organisations.Organisation",
),
),
]
|
"""
Basic model of a room with a radiator.
All specific enthalpies within room are lumped as one value.
All sources of heat loss are lumped as one value.
Assumed that room is only heated by the radiator.
heat_in => heat_stored => heat out.
Gives basic equation
stored_heat = start_heat + (in_heat + out_heat) [Q_room = Q_start + Q_rad + Q_loss]
"""
# Simulation setup. SET THESE VALUES.
N_ITERATIONS = 20 * 60 # on for 20 minutes
START_TEMP = 15.0 # [C]
OUTSIDE_TEMP = 0.0 # [C]
RADIATOR_TEMP = 60.0 # [C]
ROOM_SIZE = (3.0, 5.0, 2.3) # w, l, h. Assumes room is cuboid. [m]
# The R(SI) value of the room (resistance to heat transfer).
# Equivalent of 1/U-value. Using R value as easier to combine.
# From U-value of 0.8, given by DHD as typical for poorly insulated house.
R_WALLS = 1.25 # [K m^2/W]
# R_WALLS = 10 # [K m^2/W]
# Physical constants.
DENSITY_AIR = 1.205 # Density of air at 20 C, 1 Atm. [Kg/m^3]
Cp_AIR = 1005.0 # The constant pressure specific heat capacity of air at 20 C, 1 Atm [J Kg/K]
# Surface area enclosing room. [m^2]
SURFACE_AREA = (ROOM_SIZE[0] * ROOM_SIZE[1] +
ROOM_SIZE[0] * ROOM_SIZE[2] +
ROOM_SIZE[1] * ROOM_SIZE[2]) * 2.0
VOLUME = ROOM_SIZE[0] * ROOM_SIZE[1] * ROOM_SIZE[2] # Volume of room. [m^3]
C_AIR = DENSITY_AIR * VOLUME * Cp_AIR # Heat capacity of air in the room. [J/K]
# C_AIR = DENSITY_AIR * VOLUME * Cp_AIR + 40000 # Naively adding thermal capacitance to represent extra objects fails.
WALL_CONDUCTANCE = SURFACE_AREA * (1.0 / R_WALLS) # Thermal conductance of room walls. Equivalent to U-value [W/K]
print("Surface Area: {0:.2f} m^2\nVolume: {1:.2f} m^3".format(SURFACE_AREA, VOLUME))
print("Thermal Conductance: {0:.2f} W/K\nHeat Capacity: {1:.2f} kJ/K".format(WALL_CONDUCTANCE, C_AIR / 1000.0))
def heat_in_radiator(room_temp, radiator_temp):
""" Heat transfer into the room via the radiator.
Assume radiator temperature independent of heat transfer.
:param room_temp: [C]
:param radiator_temp: [C]
:return: [J]
"""
conductance = 25 # thermal conductance [W/K]
return (radiator_temp - room_temp) * conductance # assuming 1 kW heat input with room at 20 C [1000 / (60 - 20)]
def heat_loss_walls(room_temp, outside_temp):
""" Get the heat loss through the walls, floor and ceiling this iteration.
Assume outside is the same temp for all walls.
:param room_temp: [C]
:param outside_temp: [C]
:return: [J]
"""
return (outside_temp - room_temp) * WALL_CONDUCTANCE
def calc_temp(room_temp, *args):
""" Calculate the new room temperature
Assume entire room heats up instantly and evenly.
:param room_temp: Current room temperature. [C]
:param args: Heat flow into room (note! +ve is heat source, -ve is heat sink). [J]
:return: New room temperature. [C]
"""
return room_temp + (1.0 / C_AIR) * sum(args)
room_temps = [START_TEMP]
heat_in = [0.0]
heat_out = [0.0]
for i in range(N_ITERATIONS):
heat_in.append(heat_in_radiator(room_temps[-1], RADIATOR_TEMP))
heat_out.append(heat_loss_walls(room_temps[-1], OUTSIDE_TEMP))
room_temps.append(
calc_temp(room_temps[-1],
heat_in[-1],
heat_out[-1])
)
# Print final temp and total energy use.
print("Final Temp: {0:.2f} C\nEnergy Use: {1:.2f} kJ\nEnergy Loss: {2:.2f} kJ".format(room_temps[-1],
sum(heat_in) / 1000.0,
sum(heat_out) / 1000.0))
# Print per iteration values.
# output = []
# for i in range(len(temperature)):
# print([i, temperature[i], heat_in[i]])
|
import unittest
from katas.kyu_8.remove_first_and_last_char_part_two import array
class ArrayTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(array('1,2,3'), '2')
def test_equals_2(self):
self.assertEqual(array('1,2,3,4'), '2 3')
def test_none(self):
self.assertIsNone(array(''))
def test_none_2(self):
self.assertIsNone(array('1'))
def test_none_3(self):
self.assertIsNone(array('1, 3'))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 30 16:27:36 2019
@author: kj22643
"""
# -*- coding: utf-8 -*-
"""
Created on Wed May 29 12:29:10 2019
@author: kj22643
"""
%reset
import numpy as np
import pandas as pd
import os
import scanpy as sc
import seaborn as sns
from plotnine import *
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.pyplot import plot, show, draw, figure, cm
import matplotlib as plt
import random
os.chdir('/Users/kj22643/Documents/Documents/231_Classifier_Project/code')
from func_file import find_meanvec
from func_file import find_eigvals
from func_file import find_eigvecs
from func_file import project_cells
path = '/Users/kj22643/Documents/Documents/231_Classifier_Project/data'
#path = '/stor/scratch/Brock/231_10X_data/'
os.chdir(path)
sc.settings.figdir = 'KJ_plots'
sc.set_figure_params(dpi_save=300)
sc.settings.verbosity = 3
#%% Load in pre and post data
adata = sc.read('daylin_anndata.h5ad')
adata.obs.head()
#BgL1K
#30hr
#Rel-1
#Rel-2
#%% Assign survivor category in adata.obs
longTreatLins = adata.obs.loc[(adata.obs['sample'].isin(['Rel-1','Rel-2']))&(adata.obs.lineage!='nan'),'lineage'].unique().tolist()
adata.obs.loc[adata.obs.lineage.isin(longTreatLins)==False,'survivor'] = 'sens'
adata.obs.loc[adata.obs.lineage.isin(longTreatLins)==True,'survivor'] = 'res'
sc.pl.umap(adata,color=['survivor'],wspace=0.3,
save='alltps_res_sens.png')
# %%try to rename the samples by time point
samps= adata.obs['sample'].unique()
timepoint = np.array(['t=0hr', 't=30hr', 't=1344hr'])
adata.obs.loc[adata.obs['sample']==samps[0], 'timepoint']='t=0hr'
adata.obs.loc[adata.obs['sample']==samps[1], 'timepoint']='t=30hr'
adata.obs.loc[adata.obs['sample']==samps[2], 'timepoint']='t=1344hr'
adata.obs.loc[adata.obs['sample']==samps[3], 'timepoint']='t=1344hr'
print(adata.obs['timepoint'].unique())
sc.pl.umap(adata,color = ['timepoint'], palette=['#2c9e2f','#046df7', '#d604f7', '#c91212'], wspace=0.3,
save = 'TPs_umap.png')
#%% Separately make dataframes for the pre-treatment, intermediate, and post treatment samples
# t=0 hr (pre-treatment), 3182 pre treatment cells
adata_pre = adata[adata.obs['timepoint']=='t=0hr', :]
dfpre = pd.concat([adata_pre.obs['survivor'],
pd.DataFrame(adata_pre.raw.X,index=adata_pre.obs.index,
columns=adata_pre.var_names),],axis=1)
# t = 30 hr (intermediate timepoint) 5169 int treatment cells
adata_int = adata[adata.obs['timepoint']=='t=30hr', :]
dfint = pd.DataFrame(adata_int.raw.X, index=adata_int.obs.index, columns = adata_int.var_names)
# t=1344 hr (~roughly 8 weeks), 10332 post treatment cells
adata_post = adata[adata.obs['timepoint']=='t=1344hr', :]
dfpost = pd.DataFrame(adata_post.raw.X, index=adata_post.obs.index, columns = adata_post.var_names)
#%% Try making a UMAP of the first sample only
sc.pl.umap(adata_pre,color=['survivor'],wspace=0.3,
save='pre_treat_res_sens.png')
#%% Play with scanpys PCA
sc.tl.pca(adata_pre, n_comps=50, zero_center=True, svd_solver='auto', random_state=0, return_info=False, use_highly_variable=None, dtype='float32', copy=False, chunked=False, chunk_size=None)
#%%
classvecser= adata_pre.obs['survivor']
classvec = pd.DataFrame(classvecser)
PCs=adata_pre.obsm['X_pca']
PCdf = pd.DataFrame(PCs)
classvec.reset_index(drop=True, inplace=True)
PCdf.reset_index(drop=True, inplace=True)
PC_df=pd.concat([classvec['survivor'],PCdf], axis =1)
#%%
sns.set_style('white')
from matplotlib.pyplot import plot, show, draw, figure, cm
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(6,6))
ax=sns.scatterplot(PC_df[0], PC_df[1], hue= PC_df['survivor'])
ax.set(xlabel ='PC1', ylabel ='PC2')
ax1=sns.scatterplot(PC_df[1], PC_df[2], hue= PC_df['survivor'])
ax1.set(xlabel ='PC2', ylabel ='PC3')
ax2=sns.scatterplot(PC_df[2], PC_df[3], hue= PC_df['survivor'])
ax2.set(xlabel ='PC3', ylabel ='PC4')
ax3=sns.scatterplot(PC_df[0], PC_df[2], hue= PC_df['survivor'])
ax3.set(xlabel ='PC1', ylabel ='PC3')
ax4=sns.scatterplot(PC_df[0], PC_df[3], hue= PC_df['survivor'])
ax4.set(xlabel ='PC1', ylabel ='PC4')
ax5=sns.scatterplot(PC_df[1], PC_df[3], hue= PC_df['survivor'])
ax5.set(xlabel ='PC2', ylabel ='PC4')
#%% ATTEMPT AT MAKING a 3D scatter plot with PCs 1, 2, & 3
fig = plt.figure(figsize=(15,15))
ax=fig.add_subplot(111,projection='3d')
PC_dfsens = PC_df[PC_df['survivor']=='sens']
PC_dfres = PC_df[PC_df['survivor']=='res']
Xs= np.asarray(PC_dfsens[0])
Ys=np.asarray(PC_dfsens[2])
Zs=np.asarray(PC_dfsens[3])
Xr= np.asarray(PC_dfres[0])
Yr=np.asarray(PC_dfres[1])
Zr=np.asarray(PC_dfres[2])
ax.scatter(Xr, Yr, Zr, c='b', marker='^', alpha = 1)
ax.scatter(Xs, Ys, Zs, c='r', marker='o', alpha = 0.3)
ax.set_xlabel('PC1')
ax.set_ylabel('PC2')
ax.set_zlabel('PC3')
ax.azim = 100
ax.elev = -50
# NEXT NEED TO FIND OUT HOW TO OUT ARROWS ON THIS
#%% PCA Overview
sc.pl.pca_overview(adata_pre)
#%%
loadings=adata_pre.varm['PCs']
#%%
print(dfpre) # 22192 columns corresponding to 22191 genes
#%% Make series that label the pre-treatment cells as res/sens and label the
# label the post treatment cells by their sample
labelsdfpre = dfpre['survivor']
print(labelsdfpre)
#%% Make matrices (data frames) of just the cell-gene matrix for the pre treatment and
# post treatment samples
genematpre = dfpre.loc[:, dfpre.columns !='survivor']
print(genematpre)
# Now genematpre and genemat post are your ncells rows x ngenes columns gene
# expression matrices.
#%% Now try to emulate your matlab code...
# Start with just your pre-treatment time point
# In Matlab we have an x by k where x would be all the genes and k are the indivdual
# cells (so each column is a cell and each row is a gene)
# let's see if we can make that in python and call it Adf
nint = dfint.shape[0]
npost =dfpost.shape[0]
npre = genematpre.shape[0] # this gets the number of rows in the df (number of cells)
# Set your k for your k-fold cross validation to divide up testing and training data sets
kCV=4
ntrain = round(((kCV-1)/kCV)*npre)+1 # start by setting the number of training cells to 1/10th
ntest = npre-ntrain
#%% Make your full data frames (include both the testing and training data set from the pre-treatment time point)
# Call these Adf
AdfT= genematpre
Adf = AdfT.T
print(Adf)
AintT=dfint
Aint= AintT.T
ApoT= dfpost
Apost= ApoT.T
#%% Make susbets of the full data frame for training and testing
# Going to perform k-fold CV with k=4
indexes = [i for i, _ in enumerate(Adf)]
ordered_ind= random.sample(indexes, ntest*kCV)
train_folds = {}
test_folds = {}
labstest = {}
labstrain = {}
Vfolds = {}
lamfolds = {}
mfolds = {}
#%% Make dictionaries that hold training and testing data set labels.
for i in range(kCV):
# test out grabbing a subset of columns from a dataframe
itest=ordered_ind[i*ntest:ntest*(i+1)]
itrain =[j for j in indexes if j not in itest]
Atest = Adf.iloc[:,itest]
Atrain = Adf.iloc[:,itrain]
labstest[i] = labelsdfpre.iloc[itest]
labstrain[i] = labelsdfpre.iloc[itrain]
# Save the testing and training data frames into a data frame that holds all of them.
train_folds[i] = Atrain
test_folds[i] = Atest
# Find the mean vector, eigenvalues, and eigenvectors
m = find_meanvec(A=Atrain)
lams = find_eigvals(A=Atrain, mu=m )
V = find_eigvecs(A=Atrain, mu = m)
# Save the training set mean vector, eigenvalues, and eigenvectors
Vfolds[i]=V
lamfolds[i]=lams
mfolds[i]=m
# find the coordinates of the training and testing cells by projecting onto eigenvectors (V)
#%% Make a dictionary which contains the training matrix, labels, eigenvectors,
# eigen values, and mean vectors
full_dict = {'trainmat':{}, 'trainlabel':{}, 'eigenvectors':{}, 'eigvals':{}, 'meanvec':{}}
full_dict['trainmat']= Adf
full_dict['trainlabel']=labelsdfpre
mpre = find_meanvec(A=Adf)
lamspre = find_eigvals(A=Adf, mu = mpre)
Vall=find_eigvecs(A=Adf, mu = mpre)
full_dict['eigenvectors']=Vall
full_dict['eigvals']= lamspre
full_dict['meanvec']=mpre
#%% Merge the dictionaries of testing and training matrices, labels, eigenvectors, eigenvalues, and mean vectorfo
folds_dict = {'trainmats': {},'testmats': {}, 'trainlabels':{}, 'testlabels':{},
'eigenvectors':{}, 'eigvals':{}, 'meanvecs':{}}
folds_dict['trainmats']= train_folds
folds_dict['testmats']=test_folds
print(folds_dict)
#%% Start with one example-- last Atrain and Atest
neigs=100
trainmat = project_cells(Atrain, V, neigs)
testmat = project_cells(Atest, V,neigs)
# find distance between columns of testmat and columns of training mat
#distmat = ordered_dist(trainmat, testmat)
# For each testing cell we have two columns- one is the distance to the
# nearest to furthest training cell, and the second is the index of the training cell matrix
# (trainmat) that that corresponds to.
# We can then use this distance and the indices to come up with the class estimates using a few different distance etrics
# Start with Euclidean distance and nearest neighbor classification
#%% Now you have the Eigenspace, defined by Vnorm, which contains normalized eigenvectors of
# the length of the number of genes, in order of importance.
# Now we need to do the projection of the individual cells (in A) onto the eigenvector space
#Mcol = reshape(M, [x,1]);
#b = (Mcol-m)'*V; %(1 by x) *(x*k) Gives a vector of length k
# Project onto eigenspace
#recon = V*b'; % (x by k) * (k *1) Gives x by 1
# Declare the number of eigenvectors we want to use from the Vnorm matrix
neigs = 100
#eventually we will loop through the number of eigenvectors and assess the accuracy as afunction of neigs
trainmat = np.zeros((neigs, ntrain))
trainvec= np.zeros((neigs,1))
# First make your Omat which contains the coordinates (the columns of each training image
# and has the corresponding assignment (sens or res) ))
for i in range(ntrain):
Mcol = Xmat[:,i] # start right after the last training column in Amat
McolT= Mcol.T # to get the first testing cell as a row
# 1xngene x ngene x ntrain = 1 x ntrain
b=np.matmul(McolT,Vnorm)
trainvec= b[:neigs]
trainmat[:,i]= trainvec
#%% Should generate a neigs x ntrain matrix where each column is the coordinates
# of the cell in eigenspace
print(trainmat)
print(labelsdfpre[:ntrain])
trainlabels = labelsdfpre[:ntrain]
testlabels = labelsdfpre[ntrain:]
#%% Project the testing cells into eigenspace
ntest = ncells-ntrain
Xte = Atest.sub(m, axis=0) # we subtract from each column
Xtest = Xte.as_matrix()
testmat = np.zeros((neigs, ntest))
testvec = np.zeros((neigs,1))
for i in range(ntest):
Mcol = Xtest[:,i]
McolT = Mcol.T
b = np.matmul(McolT, Vnorm)
testvec = b[:neigs]
testmat[:,i] = testvec
#%% Compare the coordinates of the testing set with the coordinates of the training set cells
# Now you have a test mat which has class labels corresponding to each column
# For each column of your test mat, you want to find the k training mat columns it is closest to
# make a matrix that stores the ordered indices with the top being the lowest and the bottom the highest
# Euclidean distance
ordered_inds = np.zeros((ntrain, ntest))
dist = np.zeros((ntrain,ntest))
#for i in range(ntest):
for i in range(ntest):
testvec = testmat[:,i]
#for j in range(ntrain):
for j in range(ntrain):
trainvec = trainmat[:,j]
# testvec of length neigs and train vec of length neigs
# find Euclidean distance between the two vectors
dist_j = np.linalg.norm(testvec-trainvec)
# fill in your distance vector
dist[j, i]=dist_j
#%% Now you have a distance matrix where each column is a testing cell
# for each column, we want to output the indices of the training vector distance
# in order from least to greatest
lab_est = [None]*ntest
for i in range(ntest):
distcol = dist[:,i]
ind = np.argsort(distcol)
ordered_inds[:,i] = ind
#%% Use the ordered ind to make sense/res matrix
# Using k=1 nearest neighbors classifier. Need to figure out how to extend this
for i in range(ntest):
index = ordered_inds[0,i]
lab_est[i] = trainlabels[int(index)]
print(trainlabels[int(index)])
#%% Make a data frame with your actual and predicted classes
df = pd.DataFrame({'actual class':testlabels,'predicted class':lab_est})
cnf_matrix = pd.crosstab(df['predicted class'],df['actual class'])
hm = sns.heatmap(cnf_matrix,annot=True,fmt='d',robust=True,
linewidths=0.1,linecolor='black')
#%% Calculate some metrics of accuracy from the confusion matrix
TPR=cnf_matrix.iloc[0,0]/ (sum(cnf_matrix.iloc[:,0]))
print(TPR)
TNR = cnf_matrix.iloc[1,1]/(sum(cnf_matrix.iloc[:,1]))
print(TNR)
PPV = cnf_matrix.iloc[0,0]/sum(cnf_matrix.iloc[0,:])
print(PPV)
NPV = cnf_matrix.iloc[1,1]/sum(cnf_matrix.iloc[1,:])
print(NPV)
Acc = (cnf_matrix.iloc[0,0]+ cnf_matrix.iloc[1,1,])/ntest
print(Acc)
prevalance = sum(cnf_matrix.iloc[:,0])/ntest
print(prevalance)
#%% Vary the number of eigenvectors and then compare the accuracy metrics
neigvec = [10, 50, 75, 100, 150, 200, 250,]
TPRi=np.zeros((len(neigvec),1))
TNRi=np.zeros((len(neigvec),1))
PPVi=np.zeros((len(neigvec),1))
NPVi=np.zeros((len(neigvec),1))
Acci=np.zeros((len(neigvec),1))
#%%
for k in range(len(neigvec)):
neigi=neigvec[k] # set the number of eigenvectors
trainmat = np.zeros((neigi, ntrain))
trainvec= np.zeros((neigi,1))
# First make your Omat which contains the coordinates (the columns of each training image
# and has the corresponding assignment (sens or res) ))
for i in range(ntrain):
Mcol = Xmat[:,i] # start right after the last training column in Amat
McolT= Mcol.T # to get the first testing cell as a row
# 1xngene x ngene x ntrain = 1 x ntrain
b=np.matmul(McolT,Vnorm)
trainvec= b[:neigi]
trainmat[:,i]= trainvec
# We want to save all of these reduced coordinate spaces
#eigenmatrices.append(trainmat)
#Project the testing cells into eigenspace
testmat = np.zeros((neigi, ntest))
testvec = np.zeros((neigi,1))
for i in range(ntest):
Mcol = Xtest[:,i]
McolT = Mcol.T
b = np.matmul(McolT, Vnorm)
testvec = b[:neigi]
testmat[:,i] = testvec
# Compare the coordinates of the testing set with the coordinates of the training set cells
dist = np.zeros((ntrain,ntest))
#for i in range(ntest):
for i in range(ntest):
testvec = testmat[:,i]
#for j in range(ntrain):
for j in range(ntrain):
trainvec = trainmat[:,j]
# testvec of length neigs and train vec of length neigs
# find Euclidean distance between the two vectors
dist_j = np.linalg.norm(testvec-trainvec)
# fill in your distance vector
dist[j, i]=dist_j
# Now you have a distance matrix where each column is a testing cell
# for each column, we want to output the indices of the training vector distance
# in order from least to greatest
lab_est = [None]*ntest
for i in range(ntest):
distcol = dist[:,i]
ind = np.argsort(distcol)
ordered_inds[:,i] = ind
# Use the ordered ind to make sense/res matrix
# Using k=1 nearest neighbors classifier. Need to figure out how to extend this
for i in range(ntest):
index = ordered_inds[0,i]
lab_est[i] = trainlabels[int(index)]
dfi = pd.DataFrame({'actual class':testlabels,'predicted class':lab_est})
cnf_matrixi = pd.crosstab(dfi['predicted class'],dfi['actual class'])
hm = sns.heatmap(cnf_matrixi,annot=True,fmt='d',robust=True,
linewidths=0.1,linecolor='black')
# Calculate some metrics of accuracy from the confusion matrix
TPRi[k] = cnf_matrixi.iloc[0,0]/ (sum(cnf_matrixi.iloc[:,0]))
TNRi[k] = cnf_matrixi.iloc[1,1]/(sum(cnf_matrixi.iloc[:,1]))
PPVi[k] = cnf_matrixi.iloc[0,0]/sum(cnf_matrixi.iloc[0,:])
NPVi[k] = cnf_matrixi.iloc[1,1]/sum(cnf_matrixi.iloc[1,:])
Acci[k] = (cnf_matrixi.iloc[0,0]+ cnf_matrixi.iloc[1,1,])/ntest
#%%
print(TPRi)
print(cnf_matrixi)
print(k)
print(cnf_matrixi.iloc[0,0]/ (sum(cnf_matrixi.iloc[:,0])))
#%% Plot the accuracy metrics versus number of eigenvectors
import matplotlib.pyplot as plt
plt.plot(neigvec, TPRi, 'b-', label='TPR')
plt.plot(neigvec, TNRi, 'g-', label='TNR')
plt.plot(neigvec, PPVi, 'r-', label='PPV')
plt.plot(neigvec, NPVi, 'y-', label='NPV')
plt.plot(neigvec, Acci, 'k-', label='Accuracy')
plt.xlabel('Number of eigenvectors')
plt.ylabel('Metrics of Accuracy')
plt.legend()
loc= 'lower right'
#%% BIG GAP FOR Applying the classifier to the post treatment samples!
# Here we are doing it to just the 107 Aziz sample (one of the very late post treatment samples).
# We will combine this with the other sample, and we should also do the 30 hour time point.
#%% Project the post treatment cells into eigenspace
dfpost107=dfpost[dfpost['sample'].str.contains("Aziz")]
mpost = Apost.mean(axis =1)
print(npost)
#%%
neigs=100
X107 = Apost.sub(mpost, axis=0) # we subtract from each column
Xpost = X107.as_matrix()
postmat = np.zeros((neigs, npost))
postvec = np.zeros((neigs,1))
for i in range(npost):
Mcol = Xpost[:,i]
McolT = Mcol.T
b = np.matmul(McolT, Vnorm)
postvec = b[:neigs]
postmat[:,i] = postvec
#%% Compare the coordinates of the testing set with the coordinates of the training set cells
# Now you have a test mat which has class labels corresponding to each column
# For each column of your test mat, you want to find the k training mat columns it is closest to
# make a matrix that stores the ordered indices with the top being the lowest and the bottom the highest
# Euclidean distance
ordered_inds = np.zeros((ntrain, npost))
dist = np.zeros((ntrain,npost))
#for i in range(ntest):
for i in range(npost):
postvec = postmat[:,i]
#for j in range(ntrain):
for j in range(ntrain):
trainvec = trainmat[:,j]
# testvec of length neigs and train vec of length neigs
# find Euclidean distance between the two vectors
dist_j = np.linalg.norm(postvec-trainvec)
# fill in your distance vector
dist[j, i]=dist_j
#%% Now you have a distance matrix where each column is a testing cell
# for each column, we want to output the indices of the training vector distance
# in order from least to greatest
lab_est_post = [None]*npost
for i in range(npost):
distcol = dist[:,i]
ind = np.argsort(distcol)
ordered_inds[:,i] = ind
#%% Use the ordered ind to make sense/res matrix
# Using k=1 nearest neighbors classifier. Need to figure out how to extend this
for i in range(npost):
index = ordered_inds[0,i]
lab_est_post[i] = trainlabels[int(index)]
#%% Now that you have lab_est post, quantify the proportion.
ct_sens=0
for i in range(npost):
if lab_est_post[i]=='sens':
ct_sens+=1
phi_est=ct_sens/npost
print(phi_est)
phi_est0=1-prevalance
print(phi_est0)
|
# 학과: 문화콘텐츠학과
# 학번: 2016010819
# 작성자: 이아영
# 작성일: 2016년 10월 21일
# 덧셈만으로 제곱 구하는 함수 만들기(재귀함수)
def square(n):
if n > 0:
return n+n-1 + square(n-1)
elif n < 0:
return -n-n-1 + square(n+1)
else:
return 0
'''
def square(n):
if n != 0:
if n > 0:
return n+n-1 + square(n-1)
elif n < 0:
rdturn -n-1 + square(n+1)
else:
return 0
'''
# 덧셈만으로 제곱 구하는 함수 만들기(while 반복문)
def square2(n):
k = 0
while n != 0:
if n > 0:
k += n+n-1
n = n-1
else:
k += -n-n-1
n = n+1
return n + k
# 곱셈만으로 순열구하는 함수 만들기(재귀함수)
def permutation(n,k):
if k > 0:
if n >= k:
return n * permutation(n-1,k-1)
else:
return 0
return 1
# 곱셈만으로 순열구하는 함수 만들기(while 반복문)
def permutation2(n,k):
a = 1
while k > 0:
if n >= k:
a = a*n
n,k = n-1,k-1
else:
return 0
return a
|
print(3 * 5)
|
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver, Signal
from django.contrib.auth.models import User
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from jenkins.models import Job, Build, Artifact
# Signals
projectbuild_finished = Signal(providing_args=["projectbuild"])
@python_2_unicode_compatible
class Dependency(models.Model):
name = models.CharField(max_length=255, unique=True)
job = models.ForeignKey(Job, null=True)
description = models.TextField(null=True, blank=True)
parameters = models.TextField(null=True, blank=True)
class Meta:
verbose_name_plural = "dependencies"
def __str__(self):
return self.name
def get_current_build(self):
"""
Return the most recent build
"""
if self.job is not None:
finished_builds = self.job.build_set.filter(phase="FINISHED")
if finished_builds.count() > 0:
return finished_builds.order_by("-number")[0]
def get_build_parameters(self):
"""
Return the parameters property parsed into a dictionary of "build"
parameters.
If we have no parameters, we should get None back.
"""
if not self.parameters:
return
build_parameters = {}
keyvalues = self.parameters.split("\n")
for keyvalue in keyvalues:
key, value = keyvalue.split("=")
build_parameters[key.strip()] = value.strip()
return build_parameters
@python_2_unicode_compatible
class ProjectDependency(models.Model):
"""
Represents the build of a dependency used by a project.
e.g. Project X can use build 20 of dependency Y while
Project Z is using build 23.
So, this is the specific "tag" version of the
dependency that's used by this project.
We can have a UI that shows what the current version is
and the current version, and allow promoting to a newer version.
"""
dependency = models.ForeignKey(Dependency)
project = models.ForeignKey("Project")
auto_track = models.BooleanField(default=True)
current_build = models.ForeignKey(Build, null=True, editable=False)
class Meta:
verbose_name_plural = "project dependencies"
def __str__(self):
return "{0} dependency for {1} {2}".format(
self.dependency, self.project, self.auto_track)
@python_2_unicode_compatible
class Project(models.Model):
name = models.CharField(max_length=255, unique=True)
description = models.TextField(null=True, blank=True)
dependencies = models.ManyToManyField(
Dependency, through=ProjectDependency)
def get_current_artifacts(self):
"""
Returns a QuerySet of Artifact objects representing the Artifacts
associated with the project dependencies at their current dependency
level.
"""
current_builds = []
for dependency in ProjectDependency.objects.filter(project=self):
current_builds.append(dependency.current_build)
return Artifact.objects.filter(build__in=current_builds)
def __str__(self):
return self.name
@python_2_unicode_compatible
class ProjectBuildDependency(models.Model):
"""
Represents one of the dependencies of a particular Projet Build.
"""
projectbuild = models.ForeignKey("ProjectBuild")
build = models.ForeignKey(Build, blank=True, null=True)
dependency = models.ForeignKey(Dependency)
class Meta:
verbose_name_plural = "project build dependencies"
def __str__(self):
return "Build of {0} for {1}".format(
self.dependency.name, self.projectbuild.build_id)
@python_2_unicode_compatible
class ProjectBuild(models.Model):
"""Represents a requested build of a Project."""
project = models.ForeignKey(Project)
requested_by = models.ForeignKey(User, null=True, blank=True)
requested_at = models.DateTimeField(auto_now_add=True)
ended_at = models.DateTimeField(null=True)
status = models.CharField(max_length=10, default="UNKNOWN")
phase = models.CharField(max_length=25, default="UNKNOWN")
build_id = models.CharField(max_length=20)
build_dependencies = models.ManyToManyField(
Build, through=ProjectBuildDependency)
def __str__(self):
return self.project.name
def get_current_artifacts(self):
"""
Returns a QuerySet of Artifact objects representing the Artifacts
associated with the builds of the project dependencies for this
project build.
"""
return Artifact.objects.filter(build__build_id=self.build_id)
def save(self, **kwargs):
if not self.pk:
self.build_id = generate_projectbuild_id(self)
super(ProjectBuild, self).save(**kwargs)
def generate_projectbuild_id(projectbuild):
"""
Generates a daily-unique id for a given project.
TODO: Should this drop the ".0" when there's no previous builds?
"""
# This is a possible race condition
today = timezone.now()
day_start = today.replace(hour=0, minute=0, second=0)
day_end = today.replace(hour=23, minute=59, second=59)
filters = {"requested_at__gt": day_start,
"requested_at__lte": day_end,
"project": projectbuild.project}
today_count = ProjectBuild.objects.filter(**filters).count()
return today.strftime("%%Y%%m%%d.%d" % today_count)
@receiver(post_save, sender=Build, dispatch_uid="new_build_handler")
def handle_new_build(sender, created, instance, **kwargs):
if instance.job.dependency_set.exists():
for dependency in instance.job.dependency_set.all():
for project_dependency in dependency.projectdependency_set.filter(
auto_track=True):
project_dependency.current_build = instance
project_dependency.save()
@receiver(post_save, sender=Build, dispatch_uid="projectbuild_build_handler")
def handle_builds_for_projectbuild(sender, created, instance, **kwargs):
if instance.build_id:
dependency = ProjectBuildDependency.objects.filter(
dependency__job=instance.job,
projectbuild__build_id=instance.build_id).first()
# TODO: This event handler should be split...
# This is a possible race-condition, if we have multiple dependencies
# being processed at the same time, then we could miss the status of
# one.
#
# Splitting it into a task would rule out using events tho'.
if dependency:
dependency.build = instance
dependency.save()
projectbuild = dependency.projectbuild
build_statuses = ProjectBuildDependency.objects.filter(
projectbuild=dependency.projectbuild).values(
"build__status", "build__phase")
statuses = set([x["build__status"] for x in build_statuses])
phases = set([x["build__phase"] for x in build_statuses])
updated = False
if len(statuses) == 1:
projectbuild.status = list(statuses)[0]
updated = True
if len(phases) == 1:
projectbuild.phase = list(phases)[0]
if projectbuild.phase == "FINISHED":
projectbuild.ended_at = timezone.now()
projectbuild.save()
projectbuild_finished.send(
sender=ProjectBuild, projectbuild=projectbuild)
if updated:
projectbuild.save()
|
boto_args = {'service_name': 'dynamodb'}
#boto_args['endpoint_url'] = 'http://localhost:8000'
|
#!/usr/bin/env python
import roslib
roslib.load_manifest("edge_tpu")
import sys
import rospy
import cv2
import time
from std_msgs.msg import String
from sensor_msgs.msg import Image
#from object_detection_msgs.msgs import RecognizedObject
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
import PIL
import edgetpu.classification.engine
class tpu_classifier:
def __init__(self, model, labels, threshold=0.5, device_path=None):
self.bridge = CvBridge()
rospy.loginfo("Loading model {}".format(model))
self.image_sub = rospy.Subscriber("input", Image, self.callback)
self.threshold = threshold
self.engine = edgetpu.classification.engine.ClassificationEngine(model, device_path)
self.load_labels(labels)
rospy.loginfo("Device path:", engine.device_path())
def load_labels(self, labels):
with open(labels, 'r', encoding="utf-8") as f:
pairs = (l.strip().split(maxsplit=1) for l in f.readlines())
self.labels = dict((int(k), v) for k, v in pairs)
def callback(self,data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, desired_encoding="passthrough")
except CvBridgeError as e:
rospy.logerr(e)
results = self.engine.ClassifyWithImage(PIL.Image.fromarray(cv_image), top_k=1, threshold=self.threshold)
if len(results) > 0:
try:
rospy.loginfo("%s %.2f\n%.2fms" % (
self.labels[results[0][0]], results[0][1], self.engine.get_inference_time()))
except:
rospy.logerr("Error processing results")
rospy.logerr(results)
def main(args):
rospy.init_node('classify', anonymous=True)
model_path = rospy.get_param('~model_path')
label_path = rospy.get_param('~label_path')
threshold = rospy.get_param('~threshold', default=0.5)
device_path = rospy.get_param('~device_path', default=None)
classifier = tpu_classifier(model_path, label_path, threshold, device_path)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
if __name__ == "__main__":
main(sys.argv)
|
import xml.etree.ElementTree as ET
serviceurl = ('E:/KNU/course2semestr/codinginGIS/alonwork/S3/comments_283746.xml')
print ('Retrieving', serviceurl)
#TODO
#Find sum in count elements
counter = 0
tree = ET.parse(serviceurl)
root = tree.getroot()
for x in root.findall('comments'):
for y in x.findall('comment'):
count = y.find('count').text
counter += int(count)
print(counter)
|
# -*- encoding: utf-8 -*-
"""
Topic:通用文章爬取
Todo:瀑布流分页不可用
Todo:Ajax加载不可用
"""
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from items import ArticleItem
import logging
logger = logging.getLogger(__name__)
class ArticleSpider(CrawlSpider):
def __init__(self, rule):
self.rule = rule
self.name = rule.name
self.write_to = rule.write_to
self.allowed_domains = rule.allow_domains.split(",")
self.start_urls = rule.start_urls.split(",")
rule_list = []
"""
restrict_xpaths在获取链接的时候不需要@href,会自动获取
写上会报错:'str' object has no attribute 'iter'
"""
# 获取下一页的规则
if rule.next_page:
rule_list.append(Rule(LinkExtractor(restrict_xpaths=rule.next_page)))
# 添加抽取文章链接的规则
rule_list.append(Rule(LinkExtractor(
allow=[rule.allow_url],
restrict_xpaths=[rule.extract_from]),
callback='parse_item'))
self.rules = tuple(rule_list)
super(ArticleSpider, self).__init__()
def parse_item(self, response):
article = ArticleItem()
article['title'] = response.xpath(self.rule.title_xpath).extract()
article['content'] = response.xpath(self.rule.content_xpath).extract()
# 可能是白判断
if isinstance(article['content'], list):
article['content'] = article['content'][0]
article['url'] = response.url
logger.info('crawl link:' + response.url)
return article
|
"""Contains all constants for the project.
This file contains all constants for the project. E.g. dictionary keys and
regional names.
Typical usage example:
print(REGIONS.get(i))
"""
# top level keys
KEY_NEWS = 'news'
KEY_REGIONAL = 'regional'
KEY_NEWSTORIESCOUNTLINK = 'newStoriesCountLink'
KEY_TYPE = 'type'
# first level keys
KEY_SOPHORAID = 'sophoraId'
KEY_EXTERNALID = 'externalId'
KEY_TITLE = 'title'
KEY_TEASERIMAGE = 'teaserImage'
KEY_CONTENT = 'content'
KEY_DATE = 'date'
KEY_TRACKING = 'tracking'
KEY_TAGS = 'tags'
KEY_UPDATECHECKURL = 'updateCheckUrl'
KEY_REGIONID = 'regionId'
KEY_IMAGES = 'images'
KEY_DETAILS = 'details'
KEY_DETAILSWEB = 'detailsweb'
KEY_SHAREURL = 'shareURL'
KEY_TOPLINE = 'topline'
KEY_FIRSTSENTENCE = 'firstSentence'
KEY_GEOTAGS = 'geotags'
KEY_CROP = 'crop'
KEY_RESSORT = 'ressort'
KEY_STREAMS = 'streams'
KEY_BREAKINGNEWS = 'breakingNews'
KEY_FIRSTFRAME = 'firstFrame'
# second level keys
KEY_COPYRIGHT = 'copyrigh'
KEY_ALTTEXT = 'alttext'
KEY_PREFERREDVARIANTS = 'preferredVariants'
KEY_VIDEOWEBL = 'videowebl'
KEY_PORTRAETGROSSPLUS8x9 = 'portraetgrossplus8x9'
KEY_VIDEOWEBM = 'videowebm'
KEY_VIDEOWEBS = 'videowebs'
KEY_PORTRAETGROSS8x9 = 'portraetgross8x9'
KEY_IMAGEURL = 'imageurl'
KEY_TEXT = 'text'
KEY_HEADLINE = 'headline'
KEY_VALUE = 'value'
KEY_TAG = 'tag'
# regions
REGIONS = {0: 'no region',
1: 'Baden-Württemberg',
2: 'Bayern',
3: 'Berlin',
4: 'Brandenburg',
5: 'Bremen',
6: 'Hamburg',
7: 'Hessen',
8: 'Mecklenburg-Vorpommern',
9: 'Niedersachsen',
10: 'Nordrhein-Westfalen',
11: 'Rheinland-Pfalz',
12: 'Saarland',
13: 'Sachsen',
14: 'Sachsen-Anhalt',
15: 'Schleswig-Holstein',
16: 'Thüringen'}
# general
SQLCONNECTIONSLEEP = 5
SQLCONNECTIONTRYS = 5
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-22 23:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('drone', '0003_auto_20170523_0106'),
]
operations = [
migrations.AddField(
model_name='station',
name='anticipated_charged_battery',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='station',
name='num_charged_battery',
field=models.IntegerField(blank=True, null=True),
),
]
|
#global links Queue to share across files
from queue import Queue
from collections import Counter
def init(initial_page):
global queue
global seen
global file_count
global cookie
queue = Queue()
seen = set([initial_page])
queue.put(initial_page)
file_count = Counter()
cookie = None
|
import ply.lex as lex
# Reserved words.
reserved = {
'at' : 'AT',
'to' : 'TO',
'up': 'UP',
'add' : 'ADD',
'end': 'END',
'get' : 'GET',
'map': 'MAP',
'run': 'RUN',
'set': 'SET',
'down': 'DOWN',
'edit' : 'EDIT',
'exit': 'EXIT',
'left': 'LEFT',
'move': 'MOVE',
'undo': 'UNDO',
'build': 'BUILD',
'moves' : 'MOVES',
'named': 'NAMED',
'right': 'RIGHT',
'start': 'START',
'create' : 'CREATE',
'remove' : 'REMOVE',
'switch': 'SWITCH',
'replace' : 'REPLACE',
'obstacle' : 'OBSTABLE',
'solution' : 'SOLUTION',
'traverse' : 'TRAVERSE'
}
# List of token names.
tokens = [ 'COMMENT', 'INT', 'ID', 'LP', 'RP', 'COMMA' ] + list(reserved.values())
# Rule for user comments.
def t_COMMENT(t): # Comments shall be ignored...
r'[#][^\n]*'
pass
def t_INT(t):
r'\d+' # INT corresponds to one or more consecutive occurrences an integer...
t.value = int(t.value)
return t
# Rule for reserved words
def t_ID(t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = reserved.get(t.value, 'ID')
return t
# Rule for left parenthesis.
t_LP = r'\('
# Rule for right parenthesis.
t_RP = r'\)'
# Rule for comma.
t_COMMA = r'\,'
# Error handling.
def t_error(t):
print("Oops! Try again!")
t.lexer.skip(1)
# Ignore spaces.
t_ignore = r' '
# Build the lexer.
lexer = lex.lex()
|
#https://github.com/codebasics/py/blob/master/ML/2_linear_reg_multivariate/2_linear_regression_multivariate.ipynb
import pandas as pd
import numpy as np
from sklearn import linear_model
import math
data = pd.read_csv('Prices.csv')
print(data)
#Data preprocessing
#Since few data are missing, find the median of the data and fill the missing data with the median
rooms_median = math.floor(data.rooms.median())
print(rooms_median)
#fill the NaN with the mediam values
data.rooms = data.rooms.fillna(rooms_median)
print(data)
#train the linear regression model
reg = linear_model.LinearRegression()
reg.fit(data[['sqft','rooms','years']],data.price)
#predict with the new set of values
price_predicted = reg.predict([[1500,3,0]])
print(price_predicted)
|
import unittest
from katas.kyu_8.triple_trouble import triple_trouble
class TripleTroubleTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(triple_trouble('aaa', 'bbb', 'ccc'), 'abcabcabc')
def test_equals_2(self):
self.assertEqual(triple_trouble('aaaaaa', 'bbbbbb', 'cccccc'),
'abcabcabcabcabcabc')
def test_equals_3(self):
self.assertEqual(triple_trouble('burn', 'reds', 'rolls'),
'brrueordlnsl')
def test_equals_4(self):
self.assertEqual(triple_trouble('Bm', 'aa', 'tn'), 'Batman')
def test_equals_5(self):
self.assertEqual(triple_trouble('LLh', 'euo', 'xtr'), 'LexLuthor')
|
from functions import isPrime
def find_ns(a, b):
n = 0
while isPrime(n**2+a*n+b):
n+=1
return n
def main():
n_lst = []
ab_lst = []
for a in range(-1000,1000):
for b in range(-1001,1001):
n_lst.append(find_ns(a,b))
ab_lst.append((a,b))
print(ab_lst[n_lst.index(max(n_lst))])
print(ab_lst[n_lst.index(max(n_lst))][0]*ab_lst[n_lst.index(max(n_lst))][1])
if __name__=='__main__':
main()
|
from flask import Flask,render_template,request,redirect,url_for,flash
from flask_sqlalchemy import SQLAlchemy
from config import Development,Production
from resources.employees import Employee
app = Flask(__name__)
app.config.from_object(Development)
# app.config.from_object(Production)
db = SQLAlchemy(app)
from models.Employees import EmployeesModel
from models.Payrolls import PayrollsModel
@app.before_first_request
def create_tables():
db.create_all()
@app.route('/')
def home():
employees = EmployeesModel.fetch_all_records()
return render_template('index.html',wafanyikazi = employees)
@app.route('/payrolls/<int:id>')
def payrolls(id):
employee = EmployeesModel.fetch_by_id(id)
return render_template('payroll.html',mfanyikazi = employee)
@app.route('/delete/<int:id>')
def deleteEmployee(id):
EmployeesModel.delete_by_id(id)
return redirect(url_for('home'))
@app.route('/generate/<int:uid>',methods=['POST'])
def generate_payroll(uid):
month = request.form['month']
year = request.form['year']
overtime = request.form['overtime']
month = month + str(year)
employee = EmployeesModel.fetch_by_id(uid)
basic = employee.basic_salary
benefits = employee.benefits
mfanyikazi = Employee("bob",basic,benefits)
gross = mfanyikazi.grossSalary
payee = mfanyikazi.payeTax
nhif = mfanyikazi.nhif
nssf = mfanyikazi.nssf
personal_relief = mfanyikazi.personal_relief
sacco_contribution = 0
pension = 0
net = mfanyikazi.netSalary + int(overtime)
emp_id = uid
pay = PayrollsModel(month=month,gross_salary=gross,payee = payee,nhif=nhif,
nssf=nssf,personal_relief=personal_relief,
sacco_contribution=sacco_contribution,
pension=pension,net_salary=net,
employee_id=emp_id)
try:
pay.insert_record()
return redirect(url_for('payrolls',id = uid))
except:
flash("Error in saving to the database")
return redirect(url_for('payrolls',id = uid))
@app.route('/editemployee/<int:pos>',methods=['POST'])
def editEmployee(pos):
name = request.form['name']
email = request.form['email']
kra_pin = request.form['kra']
basic_salary = request.form['basic']
benefits = request.form['benefits']
current_user = EmployeesModel.fetch_by_id(pos)
# use and to capture this error
if EmployeesModel.check_kra(kra_pin) and kra_pin != current_user.kra_pin or EmployeesModel.check_email(email) and email != current_user.email:
flash("Email/Kra already exists")
return redirect(url_for('home'))
EmployeesModel.update_by_id(id = pos,name=name,email = email,kra=kra_pin,
basic=basic_salary,benefits=benefits)
return redirect(url_for('home'))
@app.route('/newemployee',methods=['POST'])
def createNewEmployee():
if request.method == "POST":
name = request.form['name']
email = request.form['email']
kra_pin = request.form['kra']
basic_salary = request.form['basic']
benefits = request.form['benefits']
if EmployeesModel.check_kra(kra_pin) or EmployeesModel.check_email(email):
flash("Email/Kra already exists")
return redirect(url_for('home'))
emp = EmployeesModel(name=name,email=email,kra_pin=kra_pin,
basic_salary=basic_salary,benefits=benefits)
emp.insert_record()
return redirect(url_for('home'))
# if __name__ == '__main__':
# app.run()
|
from django.urls import path
from Product.views import Comment_Add
urlpatterns = [
path('Comment_Add/<int:id>/',Comment_Add, name='comment_add'),
]
|
#-*- coding: utf-8 -*-
from django.conf.urls import url
from . views import *
urlpatterns = [
url(r'^$', formulario, name='formulario'),
url(r'assinar$', assinar, name='assinar'),
]
|
#!/usr/bin/python3
#minimalist python pe library
import sys
import argparse
import struct
import PEDataDirHeader
class Decoder:
def __init__(self,_filename="",_fileperms="rb"):
self.fileperms = _fileperms
self.filename = _filename
self.header = PEDataDirHeader.PEDataDirHeader()
self.fields = self.header.header_fields
self.fmt_dict = self.header.header_fmt_dict
self.fmt = "".join([self.fmt_dict[name] for name in self.fields])
self.fmt_len = struct.calcsize(self.fmt)
self.len = 0
self.original_file = _filename
def decode(self,_start=0,_count=0):
self.decoded_file = []
self.len = 0
self.count = _count
with open(self.original_file,self.fileperms) as raw_pe:
extra = raw_pe.read(_start)
self.len += len(extra)
#try:
if (self.count == 0):
return [],0
for directory in range(self.count):
_bytes = raw_pe.read(self.fmt_len)
self.len += len(_bytes)
unpack = struct.unpack(self.fmt,_bytes)
if (unpack != None):
self.decoded_file.append(unpack)
#except struct.error:
# self.len = 0
# return None,self.len
return self.decoded_file,self.len
def decode_field(self,index):
return self.fields[index]
|
import os
os.system('gnome-terminal --command=gvim')
|
import logging.config
CONFIG_PY = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {
"format": "%(asctime)s | %(filename)s | %(levelname)s | %(funcName)s | %(message)s"
},
},
#handler level overrides the logger level
"handlers": {
"info": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "default",
"stream": "ext://sys.stdout"
},
"error": {
"class": "logging.StreamHandler",
"level": "ERROR",
"formatter": "default",
"stream": "ext://sys.stderr"
},
},
# root logger level is always set to the lower level => the root logger sends all stream.
# then the stream will be filtered be the handler level
"root": {
"level": "DEBUG",
"handlers": ["info", "error"]
},
}
logging.config.dictConfig(CONFIG_PY)
|
# set
# 集合主要特点:
# 1. 天生去重(去掉重复值)
# 2. 可以增,删(准确来说,集合可以增加删除元素,但不能修改元素的值)
# 3. 可以方便的求交集,并集,补集
# 集合的定义
set1 = {"xiaoming", 12, 12, "man", (1, )}
# print(set1)
# print(len(set1)) # 集合长度
# print(type(set1)) # 类型
# 集合的交集(相同的元素)
set2 = {1, 2, 3, 4, 5, 6}
set3 = {2, 4, 6, 8}
# print(set2.intersection(set3)) # 打印set2和set3的交集
# print(set2 & set3) # &符号也是求交集
# print(set2.isdisjoint(set3)) # 判断set2和set3是否有交集,有交集返回False;没有返回True
# print(set1.isdisjoint(set2)) # 判断set1和set2是否有交集,True
# 集合的并集(合并)
# print(set2.union(set3)) # 打印结合set2和set3的并集
# print(set2 | set3) # |符号也可以求并集
# 集合的补集
# print(set2.difference(set3)) # 打印set3集合有,set2集合没有的元素
# print(set2 - set3) # set3-set2 也可以求补集
# print(set3.difference(set2)) # 打印set3集合有,set2集合没有的元素
# print(set3 - set2) # set3-set2 也可以求补集
# 对称差集
print(set2.symmetric_difference(set3)) # set2有的set3没有的元素 加上 set2没有set3有的元素
print(set2 ^ set3) # ^符号也可以求对称差集
# 判断一个集合是否是另一个集合的子集
print(set2.issubset(set3)) # set2是否是集合set3的子集
print(set2.issuperset(set3)) # set2是否包含set3
print("----------------------------------------")
set2 = {1, 2, 3, 4, 5, 6}
# 添加集合元素
set2.add(7) # 添加一个元素7
print(set2)
set2.update([9, 10]) # 一次添加多个元素
set2.update((11, 12)) # 一次添加多个元素
print(set2)
# 删除集合元素
set2.remove(9) # 删除元素9
set2.discard(10) # 删除元素10
print(set2)
|
# encoding: utf-8
"""
TODO:
`var z = require("blah").z;` ... to support this kind of require, we just have to
make sure we don't replace the entire line of code, but just the `require(whatev)`
part, so we'll end up with:
var z = require("blah").z;
var z = exports.z;
"""
import os
import sys
import re
PACKAGEFOLDERS = ['extendables/core-packages', 'site-packages']
all_packages = {}
for folder in PACKAGEFOLDERS:
packages = os.listdir(folder)
for package in packages:
if '.' not in package:
all_packages[package] = folder + '/' + package
class CodeFragment(object):
def __init__(self, filename_or_string, basedir='', string=False):
if string:
self.code = filename_or_string
self.basedir = basedir
self.source = None
else:
if len(basedir):
basedir += '/'
self.source = basedir + filename_or_string
with open(self.source) as script:
self.code = script.read()
self.basedir = basedir + "/".join(filename_or_string.split('/')[0:-1])
def inline_include(self, match):
filename = match.group('filename')
return CodeFragment(filename, self.basedir).inline()
# todo: recursion
# todo: support for var x = require("module").y;
def inline_require(self, match):
package = match.group('package')
location = package.replace(package, all_packages.get(package, package), 1)
location = location + '/lib'
if not '/' in package:
location = location + '/__core__'
replacement = """undefined;
var exports = {{}};
#include "{0}.jsx"
var {1} = exports;
exports = undefined;
""".format(location, package)
return replacement
return CodeFragment(replacement, string=True).inline()
# needs work
def inline_extract(self, match):
package = match.group('package')
location = package.replace(package, all_packages.get(package, package), 1)
if not '/' in location:
location = location + '/__core__'
replacement = """var exports = {{}};
#include "{0}.jsx";
for (name in exports) {
$.global[name] = exports[name];
}
exports = undefined;
""".format(package)
return replacement
return CodeFragment(replacement, string=True).inline()
def inline(self):
code = self.code
if self.source:
code = code.replace(
'$.fileName',
'new File($.fileName).parent + "/{source}"'.format(source=self.source)
)
code = re.sub(r'#include "?(?P<filename>[^";]+)"?;?', self.inline_include, code)
code = re.sub(r'(?<!function\s)require\(["\'](?P<package>.+)["\']\);?', self.inline_require, code)
code = re.sub(r'(?<!function\s)require\(["\'](?P<package>.+)["\']\);?', self.inline_extract, code)
return code
def inline_code():
""" Give the filename to the script you wish to minify. """
script = sys.argv[1]
print CodeFragment(script).inline()
if __name__ == '__main__':
inline_code()
|
from boto.s3.connection import S3Connection
#Connect
conn = S3Connection('access key','secret')
#Get your bucket
b = conn.get_bucket('bucket')
#Iterate through bucket keys, set metadata and update your key
for key in b.list():
key.metadata.update({'Cache-Control': 'max-age=3154000'})
key.copy(
key.bucket.name,
key.name,
key.metadata,
preserve_acl=True)
print('Cached -> ', key.name)
|
import textwrap
def wrap(string, max_width):
lista=textwrap.wrap(string,max_width)
texto=''
for i in lista:
texto+=i+'\n'
return texto
if __name__ == '__main__':
string, max_width = input(), int(input())
result = wrap(string, max_width)
print(result)
|
# test 1
#列表切片
names = ["Amy","Sam","Ziv","Leo","Rock"]
# 输出索引为1~3的元素,同样的,不包含3
print(names[1:3])
# 不指定起始索引,默认从开头开始
print(names[:3])
# 不指定结尾索引,默认到列表的结尾
print(names[2:])
# 用负数输出倒数几个元素
print(names[-2:])
# test 2
#与完整的列表相同,切片也是可以被遍历的
for name in names[:3]:
print(name)
# test 3
# 列表的复制,使用一个包含所有元素的切片
name3 = names[:]
names.append("ming")
name3.append("wa")
print(names)
print(name3)
name4 = names
name4.append("1")
names.append("2")
print(name4)
print(names)
'''
值得注意的是,使用切片复制的列表才是真正的创建了一个新的独立的列表,两者之间的修改互不干扰
而使用赋值的方式赋值的列表只是将两个变量都指向了同一个列表,在两者上的操作会反映在同一个列表上
'''
|
from __future__ import unicode_literals
from django.apps import AppConfig
class LiantangConfig(AppConfig):
name = 'liantang'
|
def factorial(a):
c = 1
for i in range(1, a + 1):
c = c * i
return c
print(factorial(19))
|
#!/usr/bin/env python
#
# Po-Lin Chiu 2015.05.14 - Created.
#
from EMAN2 import *
from sparx import *
def write_ctfs2header(ctf_txtfile, in_image, out_image):
"""
Write the CTF parameters into the image header (for stack).
ctf_txtfile: 'sxcter.py' ctf output list
in_image: input image data
out_image: output image data with header written
"""
import os
import re
ctfs = read_text_row(ctf_txtfile)
f_name = os.path.basename(in_image)
for i in xrange(len(ctfs)):
im_name = ctfs[i][-1].split('/')[-1]
im_number = (re.findall(r'\d+', im_name)[0])
ptstk_number = (re.findall(r'\d+', in_image)[0])
if (ptstk_number == im_number):
ctf = ctfs[i]
n_im = EMUtil.get_image_count(in_image)
for j in xrange(n_im):
a = EMData()
a.read_image(in_image, j)
a.set_attr("ctf", generate_ctf(ctf[:9]))
a.write_image(out_image, -1)
return generate_ctf(ctf[:9])
return 0
def flip_phases_stack(in_image, out_image, e2ctf, oversampling=1):
"""
e2ctf: EMAN2 object for CTF.
"""
n_im = EMUtil.get_image_count(in_image)
for i in xrange(n_im):
aData = EMData()
aData.read_image(in_image, i)
im_size = aData.get_xsize()
osam_size = im_size * oversampling
if oversampling > 1:
aData.clip_inplace(Region(-(im_size*(oversampling-1)/2),
-(im_size*(oversampling-1)/2),
osam_size,
osam_size))
fft_im = aData.do_fft()
flipim = fft_im.copy()
e2ctf.compute_2d_complex(flipim, Ctf.CtfType.CTF_SIGN)
fft_im.mult(flipim)
out = fft_im.do_ift()
out['ctf'] = e2ctf
out['apix_x'] = e2ctf.apix
out['apix_y'] = e2ctf.apix
out['apix_z'] = e2ctf.apix
out.clip_inplace(Region(int(im_size*(oversampling-1)/2),
int(im_size*(oversampling-1)/2),
im_size,
im_size))
out.write_image(out_image, -1)
def normalize_stack(in_image, out_image, contrast_inversion=False):
n_im = EMUtil.get_image_count(in_image)
if contrast_inversion:
invert = -1.0
else:
invert = 1.0
for i in xrange(n_im):
a = EMData()
a.read_image(in_image, i)
st = Util.infomask(a, None, True)
b = ramp((a-st[0]) / st[1] * invert)
b.write_image(out_image, i)
def scale_stack(in_image, out_image, out_size=64):
n_im = EMUtil.get_image_count(in_image)
for i in xrange(n_im):
a = EMData()
a.read_image(in_image, i)
im_size = a.get_xsize()
s = float(out_size) / im_size
b = resample(a, s)
b.write_image(out_image, i)
def clean_file(filename):
import os
if os.path.isfile(filename):
os.remove(filename)
def main():
clean_file("temp1.hdf")
clean_file("temp2.hdf")
clean_file("temp3.hdf")
import glob
file_list = glob.glob("ptrc_qfviii_*.hdf")
ctf_list = "../../ctf_estm/3001_041315/ctf_params_3001.txt"
for stack in file_list:
print stack, EMUtil.get_image_count(stack)
out_stack = "pre%s" % stack
clean_file(out_stack)
ctf_obj = write_ctfs2header(ctf_list, stack, "temp1.hdf")
flip_phases_stack("temp1.hdf", "temp2.hdf", ctf_obj, oversampling=1)
normalize_stack("temp2.hdf", "temp3.hdf", contrast_inversion=True)
scale_stack("temp3.hdf", out_stack, out_size=64)
clean_file("temp1.hdf")
clean_file("temp2.hdf")
clean_file("temp3.hdf")
if __name__ == '__main__':
main()
|
from django.shortcuts import render
from django.http import JsonResponse
def hello_world(request):
return JsonResponse({
'message': 'Hello world!'
})
|
from django.contrib import admin
from .models import Budget, Expense
admin.site.register(Budget)
admin.site.register(Expense)
|
import json
from Utilities import *
from Animation import *
class NPC: # Non-player characters. People to interact with in-game that are run by the computer
def __init__(self, name):
self.name = name # For display purposes and for finding the right save file
self.savefilename = str(self.name) + "SaveFile.txt" # Saves general things like location, trades, and possessions
self.defaultfile = "npcDefaultSave.txt" # In case the save file gets lost
try:
self.readSaveFile(self.savefilename)
except:
self.readSaveFile(self.defaultfile)
self.displayImage = Animation(str(self.name) + "-" + "no-still", 1, 1) # "no-still" stands for facing north, view overhead, standing still. I need that image generator
self.moveLeft = 0 # How much is left in the move, not how much to move in the leftwards direction
self.nextMove = round(random(300)) # To make it look like they're doing something important
def readSaveFile(self, filename):
file = open(filename, "r")
saveData = " ".join(file.readlines())
self.attributes = json.loads(saveData)
file.close()
def writeSaveFile(self):
newSaveFile = open(self.saveFilename, "w")
newSaveFile.write(json.dumps(self.attributes))
newSaveFile.close()
self.updateImage()
def updateImage(self): # Like the player
if self.attributes["state"] == "still":
self.displayImage = Animation(self.name + "-" + self.attributes["facing"] + self.attributes["view"] + "-still", 1, 1)
elif self.attributes["state"] == "mov":
self.displayImage = Animation(self.name + "-" + self.attributes["facing"] + self.attributes["view"] + "-mov", 4, 10)
# TODO: Make this more modular
def run(self):
self.display()
self.nextPixel = self.findNextPxl() # For finding boundries
if self.nextMove <= 0:
self.setMovePoint()
self.nextMove = round(random(300))
if self.moveLeft <= 0:
self.attributes["state"] = "still"
self.updateImage()
self.nextMove -= 1
if self.moveLeft > 0:
self.move()
if self.nextPixel == intToRGB(-3815995): # For running into a boundry. Not yet modular
if self.attributes["facing"] == "n":
self.attributes["y"] += self.attributes["speed"] + 1
if self.attributes["facing"] == "s":
self.attributes["y"] -= self.attributes["speed"] + 1
if self.attributes["facing"] == "e":
self.attributes["x"] -= self.attributes["speed"] + 1
if self.attributes["facing"] == "w":
self.attributes["x"] += self.attributes["speed"] + 1
self.moveLeft = 0
# TODO: Make this work with any color
def display(self):
self.displayImage.display(self.attributes["x"], self.attributes["y"], self.attributes["w"], self.attributes["h"])
def move(self): # For moving based on direction faced
self.attributes["state"] = "mov"
self.nextPixel = self.findNextPxl()
self.pastX = self.attributes["x"]
self.pastY = self.attributes["y"]
if self.attributes["view"] == "o":
if self.attributes["facing"] == "n":
self.attributes["y"] -= self.attributes["speed"]
if self.attributes["facing"] == "s":
self.attributes["y"] += self.attributes["speed"]
if self.attributes["facing"] == "e":
self.attributes["x"] += self.attributes["speed"]
if self.attributes["facing"] == "w":
self.attributes["x"] -= self.attributes["speed"]
self.moveLeft -= self.attributes["speed"]
def setMovePoint(self): # Deciding which direction to go
orientation = floor(random(3.99))
if orientation == 0:
self.attributes["facing"] = "n"
if orientation == 1:
self.attributes["facing"] = "s"
if orientation == 2:
self.attributes["facing"] = "e"
if orientation == 3:
self.attributes["facing"] = "w"
self.updateImage()
self.moveLeft = random(300)
def findNextPxl(self): # For finding color
if self.attributes["facing"] == "n":
nextPxl = get(self.attributes["x"], self.attributes["y"] - 10)
fill(255, 0, 0)
#ellipse(self.attributes["x"], self.attributes["y"] - 10, 10, 10)
return intToRGB(nextPxl)
if self.attributes["facing"] == "s":
nextPxl = get(self.attributes["x"], self.attributes["y"] + 10)
fill(255, 0, 0)
#ellipse(self.attributes["x"], self.attributes["y"] + 10, 10, 10)
return intToRGB(nextPxl)
if self.attributes["facing"] == "w":
nextPxl = get(self.attributes["x"] - 10, self.attributes["y"])
fill(255, 0, 0)
#ellipse(self.attributes["x"] - 10, self.attributes["y"], 10, 10)
return intToRGB(nextPxl)
if self.attributes["facing"] == "e":
nextPxl = get(self.attributes["x"] + 10, self.attributes["y"])
fill(255, 0, 0)
#ellipse(self.attributes["x"] + 10, self.attributes["y"], 10, 10)
return intToRGB(nextPxl)
|
#!/usr/bin/env python3
"""Retrieve and save data from/to csv.
Usage:
python3 words.py <URL>
"""
import sys
import pandas as pd
import numpy as np
import datetime
def read_csv(path, sep=',', header='infer'):
"""Read data from a csv file.
Args:
path: The path to the file.
sep: Delimiter to use.
header: Row number(s) to use as the column names.
Returns:
A pandas dataframe containing rows from the file.
Throws Value exception if path is not good.
"""
try:
data = pd.read_csv(path, sep=sep, header=header)
return data
except OSError as e:
print("Could not read file because {}".format(str(e)), file=sys.stderr)
raise
def write_csv(dir_path, file_name, data, columns):
"""Write output to csv file. File name is formed from datetime stamp and file_name argument.
Args:
dir_path: The path to the directory.
file_name: File name.
data: Data to save to csv.
columns: Headers for data to be saved.
"""
try:
file_name = "".join([str(datetime), '_', file_name, '.csv'])
file_path = "\\".join([dir_path, file_name])
np.savetxt(
file_path,
np.c_[range(1, len(data) + 1), data],
delimiter=',',
header=','.join([columns]),
comments='',
fmt='%d')
except OSError as e:
print("Could not read file because {}".format(str(e)), file=sys.stderr)
raise
|
clist=[0]*10001
for i in range(1,10001):
clist[i]=i*i*i
cubes=set(clist[1:])
for _ in range(int(input())):
n=int(input())
flag=0
for i in range(1,10001):
find=n-clist[i]
if find in cubes:
flag=1
if flag == 1:
print("YES")
else:
print('NO')
|
from rest_framework import status
from rest_framework.response import Response
def unauthorized(message):
''' User is unauthorised to perform action.'''
body = {'status': 401, 'error': 'unauthorized', 'message': message}
return Response(body, status=status.HTTP_401_UNAUTHORIZED)
def bad_request(message):
'''Request is bad and not valid '''
body = {'status': 400, 'error': 'bad request', 'message': message}
return Response(body, status=status.HTTP_400_BAD_REQUEST)
def not_allowed():
'''Method not allowed'''
body = {'status': 405, 'error': 'method not allowed'}
return Response(body, status=status.HTTP_405_METHOD_NOT_ALLOWED)
def forbidden(message):
'''Forbidden request'''
body = {'status': 403, 'error': 'forbidden', 'message': message}
return Response(body, status=status.HTTP_403_FORBIDDEN)
def not_found(message):
'''Not found error'''
body = {'status': 404, 'error': 'not found', 'message': message}
return Response(body, status=status.HTTP_404_NOT_FOUND)
def unprocessable_entity(message):
'''Can't process this entity'''
body = {'status': 422, 'error': 'unprocessable entity', 'message': message}
return Response(body, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
|
# 二维前缀和
class Solution:
def imageSmoother(self, img: List[List[int]]) -> List[List[int]]:
m, n = len(img), len(img[0])
preSum = [[0] * (n+1) for _ in range(m+1)]
for i in range(m):
for j in range(n):
preSum[i+1][j+1] = preSum[i+1][j] + preSum[i][j+1] - preSum[i][j] + img[i][j]
res = [[0] * (n) for _ in range(m)]
for i in range(m):
for j in range(n):
lx, ly = max(0, i-1), max(0, j-1)
rx, ry = min(m-1, i + 1), min(n-1, j+1)
cnt = (rx - lx + 1) * (ry - ly + 1)
res[i][j] = math.floor((preSum[rx+1][ry+1] - preSum[lx][ry+1] - preSum[rx+1][ly] + preSum[lx][ly])/cnt)
return res
|
# code for question 2
import sys
import arff, numpy as np
from sklearn import tree
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn import svm, datasets
from sklearn.model_selection import GridSearchCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
import pandas as pd
import random
# fixed random seed
np.random.seed(0)
random.seed(0)
def label_enc(labels):
le = preprocessing.LabelEncoder()
le.fit(labels)
return le
def features_encoders(features,categorical_features='all'):
n_samples, n_features = features.shape
label_encoders = [preprocessing.LabelEncoder() for _ in range(n_features)]
X_int = np.zeros_like(features, dtype=np.int)
for i in range(n_features):
feature_i = features[:, i]
label_encoders[i].fit(feature_i)
X_int[:, i] = label_encoders[i].transform(feature_i)
enc = preprocessing.OneHotEncoder(categorical_features=categorical_features)
return enc.fit(X_int),label_encoders
def feature_transform(features,label_encoders, one_hot_encoder):
n_samples, n_features = features.shape
X_int = np.zeros_like(features, dtype=np.int)
for i in range(n_features):
feature_i = features[:, i]
X_int[:, i] = label_encoders[i].transform(feature_i)
return one_hot_encoder.transform(X_int).toarray()
def load_data(path):
dataset = arff.load(open(path, 'r'))
data = np.array(dataset['data'])
attr = dataset['attributes']
# mask categorical features
masks = []
for i in range(len(attr)-1):
if attr[i][1] != 'REAL':
masks.append(i)
return data, masks
def preprocess(data,masks, noise_ratio):
# split data
train_data, test_data = train_test_split(data,test_size=0.3,random_state=0)
# test data
test_features = test_data[:,0:test_data.shape[1]-1]
test_labels = test_data[:,test_data.shape[1]-1]
# training data
features = train_data[:,0:train_data.shape[1]-1]
labels = train_data[:,train_data.shape[1]-1]
classes = list(set(labels))
# categorical features need to be encoded
if len(masks):
one_hot_enc, label_encs = features_encoders(data[:,0:data.shape[1]-1],masks)
test_features = feature_transform(test_features,label_encs,one_hot_enc)
features = feature_transform(features,label_encs,one_hot_enc)
le = label_enc(data[:,data.shape[1]-1])
labels = le.transform(train_data[:,train_data.shape[1]-1])
test_labels = le.transform(test_data[:,test_data.shape[1]-1])
# add noise
np.random.seed(0)
noise = np.random.randint(len(classes)-1, size=int(len(labels)*noise_ratio))+1
noise = np.concatenate((noise,np.zeros(len(labels) - len(noise),dtype=np.int)))
labels = (labels + noise) % len(classes)
return features,labels,test_features,test_labels
# load data
paths = ['balance-scale','primary-tumor',
'glass','heart-h']
noise = [0,0.2,0.5,0.8]
scores = []
params = []
for path in paths:
path = path + '.arff'
score = []
param = []
data, masks = load_data(path)
sys.exit()
# training on data without noise and default parameters
features, labels, test_features, test_labels = preprocess(data, masks, 0.5)
tree = DecisionTreeClassifier(random_state=0,min_samples_leaf=2, min_impurity_decrease=0)
tree.fit(features, labels)
tree_preds = tree.predict(test_features)
tree_performance = accuracy_score(test_labels, tree_preds)
score.append(tree_performance)
param.append(tree.get_params()['min_samples_leaf'])
# training on data with noise %0, %20, %50, %80
for noise_ratio in noise:
data, masks = load_data(path)
features, labels, test_features, test_labels = preprocess(data, masks, noise_ratio)
param_grid = {'min_samples_leaf': np.arange(2,30,5)}
grid_tree = GridSearchCV(DecisionTreeClassifier(random_state=0), param_grid,cv=10,return_train_score=True)
grid_tree.fit(features, labels)
estimator = grid_tree.best_estimator_
tree_preds = grid_tree.predict(test_features)
tree_performance = accuracy_score(test_labels, tree_preds)
score.append(tree_performance)
param.append(estimator.get_params()['min_samples_leaf'])
scores.append(score)
params.append(param)
# print the results
header = "{:^123}".format("Decision Tree Results") + '\n' + '-' * 123 + '\n' + \
"{:^15} | {:^16} | {:^16} | {:^16} | {:^16} | {:^16} |".format("Dataset", "Default", "0%", "20%", "50%", "80%")
sys.exit()
# print result table
print(header)
for i in range(len(scores)):
#scores = score_list[i][1]
print("{:<15}".format(paths[i]))
for j in range(len(params[i])):
print("| {:>6.2%} ({:>2}) " .format(scores[i][j],params[i][j]))
print('|\n')
print('\n')
|
from flask import render_template, flash, request, url_for
from sqlalchemy import desc
from werkzeug.utils import redirect
from fyyur import app, db
from fyyur.forms import *
# Shows
# ----------------------------------------------------------------
from fyyur.models import Venue, Artist, Show
from fyyur.repositories import ShowRepository
service = ShowRepository()
@app.route('/shows')
def shows():
# displays list of shows at /shows
# TODO: replace with real venues data.
# num_shows should be aggregated based on number of upcoming shows per venue.
values = Show.query.order_by(desc(Show.start_time))
data = [{'venue_id': v.venue_id,
'artist_id': v.artist_id,
'start_time': v.start_time,
'artist_name': v.artist.name,
'venue_name': v.venue.name,
'artist_image_link': v.artist.image_link} for v in values]
return render_template('pages/shows.html', shows=data)
@app.route('/shows/search', methods=['POST'])
def search_shows():
search_term = request.form.get('search_term', '')
values = Show.query.search(search_term)
data = [{'venue_id': v.venue_id,
'artist_id': v.artist_id,
'start_time': v.start_time,
'venue_name': v.venue.name,
'artist_name': v.artist.name} for v in values]
response = {
"count": len(data),
"data": data
}
return render_template('pages/search_shows.html', results=response,
search_term=search_term)
@app.route('/shows/create', methods=['GET', 'POST'])
def create_shows():
form = ShowForm(coerce=str)
if request.method == 'POST' and form.validate_on_submit():
venue_id = form.data['venue_id']
artist_id = form.data['artist_id']
has_show = Show.query.by_artist_and_venue(venue_id, artist_id).count() > 0
if has_show:
flash('There is a similar Show.')
elif Show.query.by_date(form.start_time.data, artist_id).count() > 0:
flash('Artist: schedule conflict.')
else:
model = Show()
model.venue_id = form.data['venue_id']
model.artist_id = form.data['artist_id']
model.start_time = form.data['start_time']
service.persiste(model)
flash('Show was successfully listed!')
return redirect(url_for('shows'))
form.venue_id.choices = [(v.id, v.name) for v in
Venue.query.with_entities(Venue.id, Venue.name).order_by(Venue.name)]
form.artist_id.choices = [(v.id, v.name) for v in
Artist.query.with_entities(Artist.id, Artist.name).order_by(Artist.name)]
return render_template('forms/new_show.html', form=form)
|
"""
Created by Alex Wang on 20200513
"""
def badcase_set_get(groundtruth_list, predict_list, video_id_list, id_name_map):
badcase_set_map = {}
for i in range(len(groundtruth_list)):
groundtruth = groundtruth_list[i]
predict_label = predict_list[i]
if groundtruth != predict_label:
pair = "{}_{}".format(id_name_map[groundtruth], id_name_map[predict_label])
if pair not in badcase_set_map:
badcase_set_map[pair] = set()
badcase_set_map[pair].add(video_id_list[i])
return badcase_set_map
def get_domain_split_dict():
domain_dict = dict()
for i in range(0, 17):
domain_dict[i] = 0
for i in range(17, 38):
domain_dict[i] = 1
for i in range(38, 58):
domain_dict[i] = 2
for i in range(58, 72):
domain_dict[i] = 3
for i in range(72, 89):
domain_dict[i] = 4
return domain_dict
def badcase_set_get_yw(groundtruth_list, predict_list, confidence_list, video_id_list, ):
badcase_set_map = {}
for i in range(len(groundtruth_list)):
groundtruth = groundtruth_list[i]
predict_label = predict_list[i]
confidence = confidence_list[i]
if groundtruth != predict_label:
pair = "{}_{}".format(groundtruth, predict_label)
gt_score = confidence[groundtruth]
pred_score = confidence[predict_label]
if pair not in badcase_set_map:
badcase_set_map[pair] = list()
badcase_set_map[pair].append((video_id_list[i], gt_score, pred_score))
return badcase_set_map
if __name__ == '__main__':
pass
|
import socket
import ssl
from time import sleep
import flatactors
import common
import irc_parser
class Socket:
""" A line buffered IRC socket interface. send(text) sends
text as UTF-8 and appends a newline, read() reads text
and returns a list of strings which are the read lines
without a line separator."""
def __init__(self, server, port, ssl_enabled, timeout):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# try to connect
try: sock.connect((server, port))
# try really, really hard
# TODO: except what!?
except: sock.send(bytes('', 'utf-8'))
if ssl_enabled:
sock = ssl.wrap_socket(sock)
sock.settimeout(timeout)
self.ssl_enabled = ssl_enabled
self.sock = sock
# initialise an empty buffer
self.buffer = b""
def send(self, text):
# this will contain more sanity checking later
self.sock.send(bytes(text + '\n', 'utf-8'))
def read(self):
# Read new stuff to the buffer
self.buffer += self.sock.read(4096) if self.ssl_enabled else self.sock.recv(4096)
# prepare to return lines
bytelines = self.buffer.split(b'\r\n')
# don't return the (possibly) last unfinished line
self.buffer = bytelines.pop()
# auxiliary decoding function!
def decode(text, encs):
for enc in encs:
try: return text.decode(enc)
# TODO: except what!?
except: continue
# fallback is latin-1
return text.decode('latin-1', 'replace')
# return the decoded lines as a list of strings!
return [decode(byteline, ['utf-8', 'latin-1', 'cp1252'])
for byteline in bytelines]
class IRCConnectionActor(flatactors.Actor):
def constructor(self, host, irc_settings):
self.host = host
self.irc_settings = irc_settings
def initialize(self):
self.wait_for_message = False
try:
# create a new line-buffered socket thingey
self.irc = Socket(
self.host['server'],
self.host['port'],
self.host['ssl'],
0.01
)
except (socket.error, socket.herror, socket.gaierror):
print('Connection to {}:{} failed, waiting for {} seconds...'.format(
self.host['server'],
self.host['port'],
self.irc_settings['reconnect_delay']))
sleep(self.irc_settings['reconnect_delay'])
return
# send credentials
self.irc.send('NICK {}'.format(self.irc_settings['nick']))
self.irc.send('USER {0} 0 * :IRC bot {0}'.format(self.irc_settings['nick']))
self.state = {
'nick': self.irc_settings['nick'],
'channels': set([]),
}
def main_loop(self, message):
channels = set(self.host['channels'])
if message:
target, source, subject, payload = message
if subject == 'response':
try:
line = irc_parser.make_privmsg(*payload)
except Exception as e:
self.send('logger:errors', 'log',
'Failed encoding IRC message: {}'.format(e))
self.irc.send(line)
if subject == 'die':
self.stop()
try:
lines = self.irc.read()
except socket.timeout:
lines = []
for line in lines:
# extract the informations from the message
try:
user, command, arguments = irc_parser.split(line)
nick = irc_parser.get_nick(user)
except AttributeError as e:
self.send('logger:errors', 'log',
'Failed decoding IRC message: {}'.format(e))
continue
if command == 'PING':
self.irc.send('PONG :' + arguments[0])
# make sure the current state of channels is updated
if command == 'JOIN' and nick == self.state['nick']:
self.state['channels'].add(arguments[0])
if command == 'PART' and nick == self.state['nick']:
self.state['channels'].discard(arguments[0])
# after the housekeeping, just pass it on to irc
self.send('irc', 'irc line',
(line, any(argument.startswith(self.state['nick'])
for argument in arguments)))
# this raises an exception,
# and the thread will be respawned!
# TODO: remove this obviously...
if ' except' in line:
raise Exception
# try to join and part whatever channels applicable
if self.state['channels'] != channels:
joins = channels - self.state['channels']
parts = self.state['channels'] - channels
if joins:
self.irc.send('JOIN {}'.format(','.join(joins)))
if parts:
self.irc.send('PART {}'.format(','.join(parts)))
def before_death(self):
self.irc.send('QUIT :termination beep blarp boop')
self.irc.sock.close()
|
import time, struct, socket
import logging
def parse_data(self):
#Go through data and find command and end codes to parse recv buffer
data = self.recv_buffer
buffer_length = len(data)
if buffer_length>0:
go=True
else:
go=False
while go:
go=False #If data is found then go will be set to true
#1st two byte command code
command_id = data[:2]
if command_id in self.commands: #Check to make sure it is command code
#Now get length two bytes
self.lastRXtime = time.time()
length = struct.unpack("H", data[2:4])[0]
#Check if buffer is long enough for all data.
if buffer_length >= length + 4:
#Parse data
command_data = data[4:length+4]
#print "Command %r %r" %(command_id, command_data)
self.process_data(command_id, command_data)
#Delete from recv buffer all data before end point
self.recv_buffer = data = data[length+4:]
#print "AFTER %r -- %r" %(self.recv_buffer, data)
if len(data) > 0: #If more data then check next data
go = True
#print self.recv_buffer, i_start, i_end
else:
logging.warning("GlassProtocol: Command %s Not Valid" ,command_id)
self.recv_buffer = ''
def sendrecv(self, conn):
#Send and recieve data
time.sleep(0.01)
temp_time = time.time()
#Send data if available
if len(self.send_buffer)>=1:
conn.send(self.send_buffer)
logging.debug("GlassProtocol: Sending %r" ,self.send_buffer)
self.send_buffer = '' #Clear buffer after it has been sent.
self.time_lastTX = temp_time
elif temp_time - self.time_lastTX > 3:
self.add_to_send([['PI','','Ping from Server']])
#Try to recieve data
try:
data = conn.recv(1024)
#print "DATA %r" %data
self.RX_bytes += len(data)
self.recv_buffer += data
except socket.error, e:
if e[0]==11 or e[0]==10035: #Standard error if no data to receieve
#print "Not Blocking"
pass
else: #Something is wrong True Error occured
logging.warning("GlassProtocol: Socket Error %r", e)
self.reset_connect() #Quit connection
def initialize(self):
self.send_buffer = ''
self.recv_buffer = ''
self.time_lastTX = 0
self.RX_bytes = 0
self.TX_bytes = 0
|
# The Juice Shortcodes API. To be transfered into the juice.core module.
# Needs a little bit more refactoring, but the general idea is to provide
# Pages, posts and other content types with the ability to use shortcodes.
# This is very similar to WordPress' technique. Examples of shortcodes could be:
#
# * [form slug="contact-form"] (Juice Forms API)
# * [chunk name="chunk-name"] (Juice Chunks API)
#
# You can define your custom shortcodes using the shortcodes.add method,
# which does all the parsing for you and passes a dictionary with the attributes
# to the function you've provided. To apply the shortcodes to a string use
# the shortcodes.apply method, which could optionally accept a request argument
# to pass request data (such as POST data for forms) to your shortcode handler
import re
from juice.front.debug import debug
# Don't use this class directly, it is initiated at the end for general
# usage. The classes shortcodes property holds the currently active
# shortcodes. You can use the add and remove methods to control them.
class ShortcodeAPI():
shortcodes = {}
# Apply all the shortcodes to a string (content) and optionally
# pass on the request object to shortcode handlers (for HTTP data
# handling).
def apply(self, content, request=None):
pattern = re.compile(r'\[(.*?)\]')
groups = pattern.findall(content)
pieces = {}
parsed = content
# Loop through the found shortcode groups, parse their attributes
# and call the handler functions accordingly.
for item in groups:
if ' ' in item:
name, space, args = item.partition(' ')
args = self.__parse_args__(args)
else:
name = item
args = {}
if request:
args['request'] = request
# Parse only shortcodes that were register with shortcodes.add
if name in self.shortcodes:
func = self.shortcodes[name]
result = func(args)
parsed = re.sub(r'\[' + re.escape(item) + r'\]', result, parsed)
return parsed
# Private method to parse shortcode attributes.
def __parse_args__(self, value):
ex = re.compile(r'[ ]*(\w+)=([^" ]+|"[^"]*")[ ]*(?: |$)')
groups = ex.findall(value)
kwargs = {}
for group in groups:
if group.__len__() == 2:
item_key = group[0]
item_value = group[1]
if item_value.startswith('"'):
if item_value.endswith('"'):
item_value = item_value[1:]
item_value = item_value[:item_value.__len__() - 1]
kwargs[item_key] = item_value
return kwargs
# The two following methods can add and remove shortcodes, in other words
# use them for shortcode registration and deregistration.
def add(self, key, function):
self.shortcodes[key] = function
def remove(self, key):
del self.shortcodes[key]
# This is the only instance of the ShortcodeAPI to be used outside this module.
shortcodes = ShortcodeAPI()
# Common shortcodes
class CommonShortcodes():
@staticmethod
def youtube(kwargs):
url = kwargs.get("url").__str__()
width = kwargs.get("width") or 640
height = kwargs.get("height") or 384
youtube_id = re.search(r'v=(.{11})', url).group(1)
if url != None:
return """
<span class="youtube-video video" style="width: %(width)spx; height: %(height)spx;">
<object width="%(width)s" height="%(height)s">
<param name="movie" value="http://www.youtube.com/v/%(id)s?fs=1&hl=en_US&rel=0"></param>
<param name="allowFullScreen" value="true"></param>
<param name="allowscriptaccess" value="always"></param>
<embed src="http://www.youtube.com/v/%(id)s?fs=1&hl=en_US&rel=0"
type="application/x-shockwave-flash"
allowscriptaccess="always"
allowfullscreen="true"
width="%(width)s"
height="%(height)s">
</embed>
</object>
</span>
""" % {'id': youtube_id, 'width': width, 'height': height}
@staticmethod
def snippet(kwargs):
from django import template
from django.template.loader import get_template
snippet_name = kwargs.get("name") or False
context = template.Context(kwargs)
try:
tp = get_template("snippets/%s" % snippet_name)
except:
return "Snippet %s not found!" % snippet_name
return tp.render(context)
shortcodes.add('youtube', CommonShortcodes.youtube)
shortcodes.add('snippet', CommonShortcodes.snippet)
|
import sys as stallman
# Checks to see if a line in F_I_L_E_O_N_E is NOT present in file2
# This does not take into account order of the lines within each file
with open(stallman.argv[1], 'r') as F_I_L_E_O_N_E:
with open(stallman.argv[2],'r') as file2:
# Protip: If you want to reverse this, and see what lines in F_I_L_E_O_N_E are present in file2
# change .difference() to .intersection()
same = set(F_I_L_E_O_N_E).difference(file2)
#optional
#same.discard('\n')
with open('some_output_file.txt', 'w') as file_out:
for line in same:
file_out.write(line)
|
from ..models import *
from datetime import datetime
def load_base_school_location(session):
# get the public school records
print("processing public school data")
public_school_records = session.query(JrnlPublicSchoolBase.NCESSCH,
JrnlPublicSchoolBase.ST,
JrnlPublicSchoolBase.FIPST,
JrnlPublicSchoolBase.STATENAME,
JrnlPublicSchoolBase.MSTREET1,
JrnlPublicSchoolBase.MSTREET2,
JrnlPublicSchoolBase.MSTREET3,
JrnlPublicSchoolBase.MCITY,
JrnlPublicSchoolBase.MSTATE,
JrnlPublicSchoolBase.MZIP,
JrnlPublicSchoolBase.MZIP4,
JrnlPublicSchoolBase.LSTREET1,
JrnlPublicSchoolBase.LSTREET2,
JrnlPublicSchoolBase.LSTREET3,
JrnlPublicSchoolBase.LCITY,
JrnlPublicSchoolBase.LSTATE,
JrnlPublicSchoolBase.LZIP,
JrnlPublicSchoolBase.LZIP4,
JrnlPublicSchoolBase.OUT_OF_STATE_FLAG
).all()
for r in public_school_records:
res = session.query(BaseSchoolLocation).filter(BaseSchoolLocation.school_id == r.NCESSCH).first()
if res:
res.state = r.ST
res.ansi_state = r.FIPST
res.state_nm = r.STATENAME
res.mailing_street1 = r.MSTREET1
res.mailing_street2 = r.MSTREET2
res.mailing_street3 = r.MSTREET3
res.mailing_city = r.MCITY
res.mailing_state = r.MSTATE
res.mailing_zip = r.MZIP
res.mailing_zip4 = r.MZIP4
res.location_street1 = r.LSTREET1 if r.LSTREET1 else r.MSTREET1
res.location_street2 = r.LSTREET2 if r.LSTREET2 else r.MSTREET2
res.location_street3 = r.LSTREET3 if r.LSTREET3 else r.MSTREET3
res.location_city = r.LCITY if r.LCITY else r.MCITY
res.location_state = r.LSTATE if r.LSTATE else r.MSTATE
res.location_zip = r.LZIP if r.LZIP else r.MZIP
res.location_zip4 = r.LZIP4
res.out_of_state_flg = r.OUT_OF_STATE_FLAG
res.updated_ts = datetime.now()
session.add(res)
else:
record = BaseSchoolLocation(school_id=r.NCESSCH,
state=r.ST,
ansi_state=r.FIPST,
state_nm=r.STATENAME,
mailing_street1=r.MSTREET1,
mailing_street2=r.MSTREET2,
mailing_street3=r.MSTREET3,
mailing_city=r.MCITY,
mailing_state=r.MSTATE,
mailing_zip=r.MZIP,
mailing_zip4=r.MZIP4,
location_street1=r.LSTREET1 if r.LSTREET1 else r.MSTREET1,
location_street2=r.LSTREET2 if r.LSTREET2 else r.MSTREET2,
location_street3=r.LSTREET3 if r.LSTREET3 else r.MSTREET3,
location_city=r.LCITY if r.LCITY else r.MCITY,
location_state=r.LSTATE if r.LSTATE else r.MSTATE,
location_zip=r.LZIP if r.LZIP else r.MZIP,
location_zip4=r.LZIP4,
out_of_state_flg=r.OUT_OF_STATE_FLAG,
inserted_ts=datetime.now(),
updated_ts=datetime.now()
)
session.add(record)
session.commit()
print("public school processing complete")
print("processing private school data")
private_school_records = session.query(JrnlPrivateSchool.PPIN,
JrnlPrivateSchool.PSTANSI,
JrnlPrivateSchool.PADDRS,
JrnlPrivateSchool.PCITY,
JrnlPrivateSchool.PSTABB,
JrnlPrivateSchool.PZIP,
JrnlPrivateSchool.PZIP4,
JrnlPrivateSchool.PL_ADD,
JrnlPrivateSchool.PL_CIT,
JrnlPrivateSchool.PL_STABB,
JrnlPrivateSchool.PL_ZIP,
JrnlPrivateSchool.PL_ZIP4,
JrnlPrivateSchool.LATITUDE16,
JrnlPrivateSchool.LONGITUDE16
).all()
for r in private_school_records:
res = session.query(BaseSchoolLocation).filter(BaseSchoolLocation.school_id == r.PPIN).first()
if res:
res.state = r.PSTANSI
res.ansi_state = r.PSTANSI
res.mailing_street1 = r.PADDRS
res.mailing_city = r.PCITY
res.mailing_state = r.PSTABB
res.mailing_zip = r.PZIP
res.mailing_zip4 = r.PZIP4
res.location_street1 = r.PL_ADD if r.PL_ADD else r.PADDRS
res.location_city = r.PL_CIT if r.PL_CIT else r.PCITY
res.location_state = r.PL_STABB if r.PL_STABB else r.PSTABB
res.location_zip = r.PL_ZIP if r.PL_ZIP else r.PZIP
res.location_zip4 = r.PL_ZIP4 if r.PL_ZIP4 else r.PZIP4
res.latitude = r.LATITUDE16
res.longitude = r.LONGITUDE16
res.out_of_state_flg = "No"
res.updated_ts = datetime.now()
session.add(res)
else:
record = BaseSchoolLocation(school_id=r.PPIN,
state=r.PSTANSI,
ansi_state=r.PSTANSI,
mailing_street1=r.PADDRS,
mailing_city=r.PCITY,
mailing_state=r.PSTABB,
mailing_zip=r.PZIP,
mailing_zip4=r.PZIP4,
location_street1=r.PL_ADD if r.PL_ADD else r.PADDRS,
location_city=r.PL_CIT if r.PL_CIT else r.PCITY,
location_state=r.PL_STABB if r.PL_STABB else r.PSTABB,
location_zip=r.PL_ZIP if r.PL_ZIP else r.PZIP,
location_zip4=r.PL_ZIP4 if r.PL_ZIP4 else r.PZIP4,
latitude=r.LATITUDE16,
longitude=r.LONGITUDE16,
out_of_state_flg="No",
inserted_ts=datetime.now(),
updated_ts=datetime.now()
)
session.add(record)
session.commit()
print("private school processing complete")
|
"""
Crie um programa que receba 3 valores, compare-os e printe ao final o número de valores iguais.
Exemplo:
Entrada Saída
1, 2, 3 0
2, 3, 2 2
7, 7, 7 3
"""
#Solução
contador=0
num1, num2, num3 = int(input()), int(input()), int(input())
if num1==num2:
contador+=1
if num2==num3:
contador+=1
if num1==num3:
contador+=1
print("A quantidade de números iguais é: " + str(contador))
|
def longestCommonPrefix(self, strs):
"""
https://leetcode.com/problems/longest-common-prefix
:type strs: List[str]
:rtype: str
"""
if len(strs) == 1:
return strs[0]
template = strs[0]
count = float('inf')
for i in range(1, len(strs)):
j = 0
min_len = min(len(template), len(strs[i]))
while(j < min_len and template[j] == strs[i][j]):
j+=1
if j < count:
count = j
return template[:count]
"""
def rotate(self, A):
A[:] = zip(*A[::-1])
"""
|
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
# import os
# print(os.getcwd())
cars = np.loadtxt("../../data/cars.csv", delimiter=",", dtype=np.int32)
# print(cars)
print(cars.shape)
x = cars[:, 0]
y = cars[:, 1]
x1, y1 = cars.T
print(cars.T)
print(x1)
print(y1)
X = tf.placeholder(dtype=tf.float32)
Y = tf.constant(y, dtype=tf.float32)
w = tf.Variable(tf.random_uniform([1]))
b = tf.Variable(tf.random_uniform([1]))
hx = w * X + b
cost = tf.reduce_mean(tf.square(hx - y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train = optimizer.minimize(cost)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
for i in range(10000):
sess.run(train, feed_dict={X: x})
if not i % 100:
print(i, sess.run(cost, feed_dict={X: x}))
# 위에서 학습이 끝났으므로 W와 b값이 도출됨.
# 자동차 속도가 30과 50일때 제동거리 예측가능
print(sess.run(hx, feed_dict={X: 30}))
print(sess.run(hx, feed_dict={X: 50}))
plt.title("cars")
plt.xlabel("speed")
plt.ylabel("distance")
plt.scatter(x, y)
plt.plot(x, sess.run(hx, feed_dict={X: x}), "r--")
plt.show()
|
import math
import sys
class NaiveBayes(object):
def __init__(self,classes,classData,data):
self.classes = classes # prior data for all the classes
self.priors={} # saves prior probability for each class
self.classData = classData # hasmap with key as class and value as file name for data for the class
self.data = data
self.probabilities = {}
def train(self):
classes = self.getUniqueClassesInData()
#print classes
for item in classes:
self.priors[item]=self.getPriorValue(item)
dataForItem = self.getDataForItem(item)
countOfInItem = self.getCountOfInItem(dataForItem)
wProbabilitiiesForItem={}
for w in self.data:
if w in dataForItem.keys():
countOfWInItem = float(dataForItem[w])
else:
countOfWInItem = 0.0
probabilityOfWInItem = float((countOfWInItem + 1.0))/float((countOfInItem + len(self.data)))
wProbabilitiiesForItem[w]=probabilityOfWInItem
self.probabilities[item] = wProbabilitiiesForItem
def test(self,testData):
sol = []
for row in testData:
max = 1 - sys.maxint
temp=None
for c in self.probabilities.keys():
val=1.0
mapOfClass = self.probabilities[c]
for w in row:
val =float(val) + math.log(float(mapOfClass[w]))
val= val + math.log( self.priors[c])
if val > max:
temp=c
max=val
sol.append(temp) # class is max of sol[c]
return sol
def getCountOfInItem(self,dataForItem):
cnt=0
for key in dataForItem.keys():
cnt = cnt + dataForItem[key]
return cnt
def getDataForItem(self,item):
dict={}
f= open(self.classData[item])
for line in f.readlines():
k= line.split(" ")
# print k[0]
dict[k[0]]=int(k[1].strip("\n"))
return dict
def getPriorValue(self,item):
cnt =0.0
for i in self.classes:
if i==item:
cnt = cnt + 1
return float(cnt/len(self.classes))
def getUniqueClassesInData(self):
classes = set()
for c in self.classes:
classes.add(c)
return classes
def printPriorVallues(self):
for row in self.priors:
print self.priors[row]
|
import argparse
import logging
import math
from ..pyp import PYP
from ..arpa import CharLM, SRILMWrapper
import corpus
def run_sampler(model, identifiers, n_iter):
n_identifiers = len(identifiers)
for it in range(n_iter):
logging.info('Iteration %d/%d', it, n_iter)
for iden in identifiers:
if it > 0: model.decrement(iden)
model.increment(iden)
#if it % 10 == 0:
logging.info('Model: %s', model)
ll = model.log_likelihood()
ppl = 2**(-ll / n_identifiers)
logging.info('LL=%.0f ppl=%.3f', ll, ppl)
def main():
logging.basicConfig(level=logging.INFO, format='%(message)s')
parser = argparse.ArgumentParser()
parser.add_argument('--traindata', help='path to training data', required=True)
parser.add_argument('--discount', help='discount parameter for PYP', required=True, type=float)
parser.add_argument('--strength', help='strength parameter for PYP', required=True, type=float)
parser.add_argument('--niter', help='number of iterations of sampling', type=int, default=10)
parser.add_argument('--char_lm_order', help='order of character language model', type=int, default=10)
args = parser.parse_args()
with open(args.traindata) as f:
_, identifiers = corpus.read(f)
char_lm = SRILMWrapper()
char_lm.train(set(identifiers), args.char_lm_order, 'wbdiscount')
base = CharLM(char_lm.ngram_file)
assert args.strength > - args.discount
model = PYP(args.discount, args.strength, base)
run_sampler(model, identifiers, args.niter)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
'''
Center for Internet Security (CIS) audit module
'''
from __future__ import absolute_import
# Import python libs
import logging
# Import salt libs
from salt import utils
__virtualname__ = 'cis'
LOG = logging.getLogger(__name__)
GREP = utils.which('egrep')
STAT = utils.which('stat')
SYSCTL = utils.which('sysctl')
RPMQUERY = utils.which('rpm')
if utils.which('chkconfig'):
CHKCONFIG = utils.which('chkconfig')
if utils.which('systemctl'):
CHKCONFIG = utils.which('systemctl')
CIS = {}
CIS['Passed'] = []
CIS['Failed'] = []
CIS['Totals'] = {}
CIS['Details'] = {}
CIS['Totals']['Pass'] = 0
CIS['Totals']['Fail'] = 0
def __virtual__():
'''
Only load module on Linux
'''
if 'Linux' in __salt__['grains.get']('kernel'):
return __virtualname__
return False
def _grep(pattern, filename, shell=False):
cmd = '{0} {1} {2}'.format(GREP, pattern, filename)
return __salt__['cmd.run'](cmd, python_shell=shell)
def _stat(filename):
'''
Standard function for all ``stat`` commands.
'''
cmd = '{0} {1} {2}'.format(STAT, '-L -c "%a %u %g"', filename)
return __salt__['cmd.run'](cmd, python_shell=False)
def _sysctl(keyname):
cmd = '{0} {1}'.format(SYSCTL, keyname)
return __salt__['cmd.run'](cmd, python_shell=False)
def _rpmquery(package):
cmd = '{0} {1} {2}'.format(RPMQUERY, '-q', package)
return __salt__['cmd.run'](cmd, python_shell=False)
def _chkconfig(service):
if 'systemctl' in CHKCONFIG:
cmd = '{0} {1} {2}'.format(CHKCONFIG, 'is-enabled', service)
elif 'chkconfig' in CHKCONFIG:
cmd = '{0} {1} {2}'.format(CHKCONFIG, '--list', service)
return __salt__['cmd.run'](cmd, python_shell=False)
def audit_1_1(details=False):
'''
Audit Filesystem Configuration benchmarks (1.1)
'''
audit_1_1_1()
audit_1_1_2()
audit_1_1_3()
audit_1_1_4()
audit_1_1_5()
audit_1_1_6()
audit_1_1_7()
audit_1_1_8()
audit_1_1_9()
audit_1_1_10()
audit_1_1_14()
audit_1_1_15()
audit_1_1_16()
audit_1_1_17()
for benchmark in CIS['Passed']:
CIS['Totals']['Pass'] += 1
for benchmark in CIS['Failed']:
CIS['Totals']['Fail'] += 1
if details:
return CIS
else:
return CIS['Totals']
def audit_1_1_1():
'''
Since the /tmp directory is intended to be world-writable, there is a risk
of resource exhaustion if it is not bound to a separate partition. In
addition, making /tmp its own file system allows an administrator to set
the noexec option on the mount, making /tmp useless for an attacker to
install executable code. It would also prevent an attacker from
establishing a hardlink to a system setuid program and wait for it to be
updated. Once the program was updated, the hardlink would be broken and the
attacker would have his own copy of the program. If the program happened to
have a security vulnerability, the attacker could continue to exploit the
known flaw.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_1_1_1
'''
benchmark = '1.1.1 Create Separate Partition for /tmp (Scored)'
ret = _grep('"/tmp"', '/etc/fstab')
if ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_1_1_2():
'''
Since the /tmp filesystem is not intended to support devices, set this
option to ensure that users cannot attempt to create block or character
special devices in /tmp.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_1_1_2
'''
benchmark = '1.1.2 Set nodev option for /tmp partition (Scored)'
ret = _grep('"/tmp"', '/etc/fstab')
if 'nodev' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_1_1_3():
'''
Since the /tmp filesystem is only intended for temporary file storage, set this option to
ensure that users cannot create set userid files in /tmp.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_1_1_3
'''
benchmark = '1.1.3 Set nosuid option for /tmp partition (Scored)'
ret = _grep('"/tmp"', '/etc/fstab')
if 'nosuid' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_1_1_4():
'''
Since the /tmp filesystem is only intended for temporary file storage, set
this option to ensure that users cannot run executable binaries from /tmp.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_1_1_4
'''
benchmark = '1.1.4 Set noexec option for /tmp partition (Scored)'
ret = _grep('"/tmp"', '/etc/fstab')
if 'noexec' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_1_1_5():
'''
Since the /var directory may contain world-writable files and directories,
there is a risk of resource exhaustion if it is not bound to a separate
partition.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_1_1_5
'''
benchmark = '1.1.5 Create Separate Partition for /var (Scored)'
ret = _grep('"/var"', '/etc/fstab')
if ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_1_1_6():
'''
All programs that use /var/tmp and /tmp to read/write temporary files will
always be written to the /tmp file system, preventing a user from running
the /var file system out of space or trying to perform operations that have
been blocked in the /tmp filesystem.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_1_1_6
'''
benchmark = '1.1.6 Bind mount the /var/tmp directory to /tmp (Scored)'
ret = _grep('"^/tmp"', '/etc/fstab')
if '/var/tmp' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_1_1_7():
'''
There are two important reasons to ensure that system logs are stored on a
separate partition: protection against resource exhaustion (since logs can
grow quite large) and protection of audit data.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_1_1_7
'''
benchmark = '1.1.7 Create separate partition for /var/log (Scored)'
ret = _grep('"/var/log"', '/etc/fstab')
if ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_1_1_8():
'''
There are two important reasons to ensure that data gathered by auditd is
stored on a separate partition: protection against resource exhaustion
(since the audit.log file can grow quite large) and protection of audit
data. The audit daemon calculates how much free space is left and performs
actions based on the results. If other processes (such as syslog) consume
space in the same partition as auditd, it may not perform as desired.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_1_1_8
'''
benchmark = '1.1.8 Create separate partition for /var/log/audit (Scored)'
ret = _grep('"/var/log/audit"', '/etc/fstab')
if ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_1_1_9():
'''
If the system is intended to support local users, create a separate
partition for the /home directory to protect against resource exhaustion
and restrict the type of files that can be stored under /home.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_1_1_9
'''
benchmark = '1.1.9 Create separate partition for /home (Scored)'
ret = _grep('"/home"', '/etc/fstab')
if ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_1_1_10():
'''
Since the user partitions are not intended to support devices, set this
option to ensure that users cannot attempt to create block or character
special devices.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_1_1_10
'''
benchmark = '1.1.10 Add nodev option to /home (Scored)'
ret = _grep('"/home"', '/etc/fstab')
if 'nodev' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_1_1_14():
'''
Since the /dev/shm filesystem is not intended to support devices, set this
option to ensure that users cannot attempt to create special devices in
/dev/shm partitions.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_1_1_14
'''
benchmark = '1.1.14 Add nodev option to /dev/shm partition (Scored)'
ret = _grep('"/dev/shm"', '/etc/fstab')
if 'nodev' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_1_1_15():
'''
Setting this option on a file system prevents users from introducing
privileged programs onto the system and allowing non-root users to execute
them.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_1_1_15
'''
benchmark = '1.1.15 Add nosuid option to /dev/shm partition (Scored)'
ret = _grep('"/dev/shm"', '/etc/fstab')
if 'nosuid' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_1_1_16():
'''
Setting this option on a file system prevents users from executing programs
from shared memory. This deters users from introducing potentially
malicious software on the system.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_1_1_16
'''
benchmark = '1.1.16 Add noexec option to /dev/shm partition (Scored)'
ret = _grep('"/dev/shm"', '/etc/fstab')
if 'noexec' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_1_1_17():
'''
This feature prevents the ability to delete or rename files in world
writable directories (such as /tmp) that are owned by another user.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_1_1_17
'''
benchmark = '1.1.17 Set sticky bit on all world-writable directories (Scored)'
cmd = "df --local -P | awk {'if (NR!=1) print $6'} | xargs -I '{}' find '{}' -xdev -type d \( -perm -0002 -a ! -perm -1000 \) 2>/dev/null"
ret = __salt__['cmd.run'](cmd, python_shell=True)
if not ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_1_2_2():
'''
It is important to ensure that an RPM's package signature is always checked
prior to installation to ensure that the software is obtained from a trusted
source.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_1_2_2
'''
benchmark = '1.2.2 Verify that gpgcheck is globally activated (Scored)'
ret = _grep('gpgcheck=0', '/etc/yum.repos.d/*.repo', shell=True)
if ret:
CIS['Failed'].append(benchmark)
else:
CIS['Passed'].append(benchmark)
return CIS
def audit_1_5(details=False):
'''
Audit Filesystem Configuration benchmarks (1.5)
'''
audit_1_5_1()
audit_1_5_2()
audit_1_5_3()
audit_1_5_4()
audit_1_5_5()
for benchmark in CIS['Passed']:
CIS['Totals']['Pass'] += 1
for benchmark in CIS['Failed']:
CIS['Totals']['Fail'] += 1
if details:
return CIS
else:
return CIS['Totals']
def audit_1_5_1():
'''
Setting the owner and group to root prevents non-root users from changing the file.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_1_5_1
'''
benchmark = '1.5.1 Set user/group owner on /etc/grub.conf (Scored)'
if 'systemctl' in CHKCONFIG:
ret = _stat('/boot/grub2/grub.cfg')
elif 'chkconfig' in CHKCONFIG:
ret = _stat('/etc/grub.conf')
if '0 0' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_1_5_2():
'''
Setting the permissions to read and write for root only prevents non-root
users from seeing the boot parameters or changing them. Non-root users who read
the boot parameters may be able to identify weaknesses in security upon boot
and be able to exploit them.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_1_5_2
'''
benchmark = '1.5.2 Set permissions on /etc/grub.conf (Scored)'
if 'systemctl' in CHKCONFIG:
ret = _stat('/boot/grub2/grub.cfg')
elif 'chkconfig' in CHKCONFIG:
ret = _stat('/etc/grub.conf')
if '600' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_1_5_3():
'''
Requiring a boot password upon execution of the boot loader will prevent an
unauthorized user from entering boot parameters or changing the boot partition.
This prevents users from weakening security (e.g. turning off SELinux at boot
time).
CLI Example:
.. code-block:: shell
salt '*' cis.audit_1_5_3
'''
benchmark = '1.5.3 Set boot loader password (Scored)'
if 'systemctl' in CHKCONFIG:
ret = _grep('"^password"', '/boot/grub2/grub.cfg')
elif 'chkconfig' in CHKCONFIG:
ret = _grep('"^password"', '/etc/grub.conf')
if ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_1_5_4():
'''
Requiring authentication in single user mode prevents an unauthorized user
from rebooting the system into single user to gain root privileges without
credentials
CLI Example:
.. code-block:: shell
salt '*' cis.audit_1_5_4
'''
benchmark = '1.5.4 Require authentication for single-user mode (Scored)'
ret = _grep('"^SINGLE"', '/etc/sysconfig/init')
if 'sulogin' in ret:
CIS['Passed'].append(benchmark)
elif 'sushell' in ret:
CIS['Failed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_1_5_5():
'''
Requiring authentication in single user mode prevents an unauthorized user
from rebooting the system into single user to gain root privileges without
credentials
CLI Example:
.. code-block:: shell
salt '*' cis.audit_1_5_5
'''
benchmark = '1.5.5 Disable interactive boot (Scored)'
ret = _grep('"^PROMPT"', '/etc/sysconfig/init')
if ('no' or 'NO') in ret:
CIS['Passed'].append(benchmark)
elif ('yes' or 'YES') in ret:
CIS['Failed'].append(benchmark)
return CIS
def audit_1_6(details=False):
'''
Audit Filesystem Configuration benchmarks (1.6)
'''
audit_1_6_1()
audit_1_6_2()
audit_1_6_3()
for benchmark in CIS['Passed']:
CIS['Totals']['Pass'] += 1
for benchmark in CIS['Failed']:
CIS['Totals']['Fail'] += 1
if details:
return CIS
else:
return CIS['Totals']
def audit_1_6_1():
'''
Setting a hard limit on core dumps prevents users from overriding the soft
variable. If core dumps are required, consider setting limits for user groups
(see limits.conf(5)). In addition, setting the fs.suid_dumpable variable to 0
will prevent setuid programs from dumping core.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_1_6_1
'''
benchmark = '1.6.1 Restrict core dumps (Scored)'
ret1 = _grep('"hard core"', '/etc/security/limits.conf')
ret2 = _sysctl('fs.suid_dumpable')
if (ret1 and (ret2 == '0')):
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_1_6_2():
'''
Enabling any feature that can protect against buffer overflow attacks
enhances the security of the system.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_1_6_2
'''
benchmark = '1.6.2 Configure ExecShield (Scored)'
ret = _sysctl('kernel.exec-shield')
if '1' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_1_6_3():
'''
Randomly placing virtual memory regions will make it difficult for to write
memory page exploits as the memory placement will be consistently shifting.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_1_6_3
'''
benchmark = '1.6.3 Enable randomized virtual memory region placement (Scored)'
ret = _sysctl('kernel.randomize_va_space')
if '2' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_2_1(details=False):
'''
Audit Filesystem Configuration benchmarks (2.1)
'''
audit_2_1_1()
audit_2_1_2()
audit_2_1_3()
audit_2_1_4()
audit_2_1_5()
audit_2_1_6()
audit_2_1_7()
audit_2_1_8()
audit_2_1_9()
audit_2_1_10()
audit_2_1_11()
audit_2_1_12()
audit_2_1_13()
audit_2_1_14()
audit_2_1_15()
audit_2_1_16()
audit_2_1_17()
audit_2_1_18()
for benchmark in CIS['Passed']:
CIS['Totals']['Pass'] += 1
for benchmark in CIS['Failed']:
CIS['Totals']['Fail'] += 1
if details:
return CIS
else:
return CIS['Totals']
def audit_2_1_1():
'''
The telnet protocol is insecure and unencrypted. The use of an unencrypted
transmission medium could allow a user with access to sniff network traffic the
ability to steal credentials. The ssh package provides an encrypted session and
stronger security and is included in most Linux distributions.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_2_1_1
'''
benchmark = '2.1.1 Remove telnet-server (Scored)'
ret = _rpmquery('telnet-server')
if 'not installed' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_2_1_2():
'''
The telnet protocol is insecure and unencrypted. The use of an unencrypted
transmission medium could allow an authorized user to steal credentials. The
ssh package provides an encrypted session and stronger security and is included
in most Linux distributions
CLI Example:
.. code-block:: shell
salt '*' cis.audit_2_1_2
'''
benchmark = '2.1.2 Remove telnet client (Scored)'
ret = _rpmquery('telnet')
if 'not installed' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_2_1_3():
'''
These legacy service contain numerous security exposures and have been
replaced with the more secure SSH package.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_2_1_3
'''
benchmark = '2.1.3 Remove rsh-server (Scored)'
ret = _rpmquery('rsh-server')
if 'not installed' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_2_1_4():
'''
These legacy clients contain numerous security exposures and have been
replaced with the more secure SSH package. Even if the server is removed, it is
best to ensure the clients are also removed to prevent users from inadvertently
attempting to use these commands and therefore exposing their credentials. Note
that removing the rsh package removes the clients for rsh, rcp and rlogin.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_2_1_4
'''
benchmark = '2.1.4 Remove rsh (Scored)'
ret = _rpmquery('rsh')
if 'not installed' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_2_1_5():
'''
The NIS service is inherently an insecure system that has been vulnerable
to DOS attacks, buffer overflows and has poor authentication for querying NIS
maps. NIS generally has been replaced by such protocols as Lightweight
Directory Access Protocol (LDAP). It is recommended that the service be
removed.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_2_1_5
'''
benchmark = '2.1.5 Remove NIS client (Scored)'
ret = _rpmquery('ypbind')
if 'not installed' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_2_1_6():
'''
The NIS service is inherently an insecure system that has been vulnerable
to DOS attacks, buffer overflows and has poor authentication for querying NIS
maps. NIS generally been replaced by such protocols as Lightweight Directory
Access Protocol (LDAP). It is recommended that the service be disabled and
other, more secure services be used.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_2_1_6
'''
benchmark = '2.1.6 Remove NIS server (Scored)'
ret = _rpmquery('ypserv')
if 'not installed' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_2_1_7():
'''
It is recommended that TFTP be removed, unless there is a specific need for
TFTP (such as a boot server). In that case, use extreme caution when
configuring the services.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_2_1_7
'''
benchmark = '2.1.7 Remove tftp (Scored)'
ret = _rpmquery('tftp')
if 'not installed' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_2_1_8():
'''
TFTP does not support authentication nor does it ensure the confidentiality
of integrity of data. It is recommended that TFTP be removed, unless there is a
specific need for TFTP. In that case, extreme caution must be used when
configuring the services.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_2_1_8
'''
benchmark = '2.1.8 Remove tftp-server (Scored)'
ret = _rpmquery('tftp-server')
if 'not installed' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_2_1_9():
'''
The software presents a security risk as it uses unencrypted protocols for
communication.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_2_1_9
'''
benchmark = '2.1.9 Remove talk (Scored)'
ret = _rpmquery('talk')
if 'not installed' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_2_1_10():
'''
The software presents a security risk as it uses unencrypted protocols for
communication.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_2_1_10
'''
benchmark = '2.1.10 Remove talk-server (Scored)'
ret = _rpmquery('talk-server')
if 'not installed' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_2_1_11():
'''
If there are no xinetd services required, it is recommended that the daemon
be deleted from the system.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_2_1_11
'''
benchmark = '2.1.11 Remove xinetd (Scored)'
ret = _rpmquery('xinetd')
if 'not installed' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_2_1_12():
'''
Disabling this service will reduce the remote attack surface of the system.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_2_1_12
'''
benchmark = '2.1.12 Disable chargen-dgram (Scored)'
ret = _chkconfig('chargen-dgram')
if 'No such file or directory' in ret:
CIS['Passed'].append(benchmark)
elif 'off' in ret:
CIS['Passed'].append(benchmark)
elif 'enabled' in ret:
CIS['Failed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_2_1_13():
'''
Disabling this service will reduce the remote attack surface of the system.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_2_1_13
'''
benchmark = '2.1.13 Disable chargen-stream (Scored)'
ret = _chkconfig('chargen-stream')
if 'No such file or directory' in ret:
CIS['Passed'].append(benchmark)
elif 'off' in ret:
CIS['Passed'].append(benchmark)
elif 'enabled' in ret:
CIS['Failed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_2_1_14():
'''
Disabling this service will reduce the remote attack surface of the system.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_2_1_14
'''
benchmark = '2.1.14 Disable daytime-dgram (Scored)'
ret = _chkconfig('daytime-dgram')
if 'No such file or directory' in ret:
CIS['Passed'].append(benchmark)
elif 'off' in ret:
CIS['Passed'].append(benchmark)
elif 'enabled' in ret:
CIS['Failed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_2_1_15():
'''
Disabling this service will reduce the remote attack surface of the system.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_2_1_15
'''
benchmark = '2.1.15 Disable daytime-stream (Scored)'
ret = _chkconfig('daytime-stream')
if 'No such file or directory' in ret:
CIS['Passed'].append(benchmark)
elif 'off' in ret:
CIS['Passed'].append(benchmark)
elif 'enabled' in ret:
CIS['Failed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_2_1_16():
'''
Disabling this service will reduce the remote attack surface of the system.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_2_1_16
'''
benchmark = '2.1.16 Disable echo-dgram (Scored)'
ret = _chkconfig('echo-dgram')
if 'No such file or directory' in ret:
CIS['Passed'].append(benchmark)
elif 'off' in ret:
CIS['Passed'].append(benchmark)
elif 'enabled' in ret:
CIS['Failed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_2_1_17():
'''
Disabling this service will reduce the remote attack surface of the system.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_2_1_17
'''
benchmark = '2.1.17 Disable echo-stream (Scored)'
ret = _chkconfig('echo-stream')
if 'No such file or directory' in ret:
CIS['Passed'].append(benchmark)
elif 'off' in ret:
CIS['Passed'].append(benchmark)
elif 'enabled' in ret:
CIS['Failed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_2_1_18():
'''
tcpmux-server can be abused to circumvent the server's host based firewall.
Additionally, tcpmux-server can be leveraged by an attacker to effectively port
scan the server.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_2_1_18
'''
benchmark = '2.1.18 Disable tcpmux-server (Scored)'
ret = _chkconfig('tcpmux-server')
if 'No such file or directory' in ret:
CIS['Passed'].append(benchmark)
elif 'off' in ret:
CIS['Passed'].append(benchmark)
elif 'enabled' in ret:
CIS['Failed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_3(details=False):
'''
Audit Filesystem Configuration benchmarks (3)
'''
audit_3_1()
audit_3_2()
audit_3_3()
audit_3_5()
audit_3_6()
audit_3_16()
for benchmark in CIS['Passed']:
CIS['Totals']['Pass'] += 1
for benchmark in CIS['Failed']:
CIS['Totals']['Fail'] += 1
if details:
return CIS
else:
return CIS['Totals']
def audit_3_1():
'''
Setting the umask to 027 will make sure that files created by daemons will
not be readable, writable or executable by any other than the group and owner
of the daemon process and will not be writable by the group of the daemon
process. The daemon process can manually override these settings if these files
need additional permission.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_2_1_18
'''
benchmark = '3.1 Set Daemon umask (Scored)'
ret = _grep('"umask"', '/etc/sysconfig/init')
if 'umask 027' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_3_2():
'''
Unless your organization specifically requires graphical login access via X
Windows, remove it to reduce the potential attack surface
CLI Example:
.. code-block:: shell
salt '*' cis.audit_2_1_18
'''
benchmark = '3.2 Remove X Windows (Scored)'
ret = _grep('"^id"', '/etc/inittab')
if 'id:3:initdefault' in ret:
CIS['Passed'].append(benchmark)
elif 'id:5:initdefault' in ret:
CIS['Failed'].append(benchmark)
return CIS
def audit_3_3():
'''
Since servers are not normally used for printing, this service is not
needed unless dependencies require it. If this is the case, disable the service
to reduce the potential attack surface. If for some reason the service is
required on the server, follow the recommendations in sub-sections 3.2.1 -
3.2.5 to secure it.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_2_1_18
'''
benchmark = '3.3 Disable avahi server (Scored)'
ret = _chkconfig('avahi-daemon')
if 'No such file or directory' in ret:
CIS['Passed'].append(benchmark)
elif 'on' in ret:
CIS['Failed'].append(benchmark)
elif 'enabled' in ret:
CIS['Failed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_3_5():
'''
Unless a server is specifically set up to act as a DHCP server, it is
recommended that this service be deleted to reduce the potential attack
surface.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_2_1_18
'''
benchmark = '3.5 Remove DHCP server (Scored)'
ret = _rpmquery('dhcp')
if 'not installed' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_3_6():
'''
It is recommended that physical systems and virtual guests lacking direct
access to the physical host's clock be configured as NTP clients to synchronize
their clocks (especially to support time sensitive security mechanisms like
Kerberos). This also ensures log files have consistent time records across the
enterprise, which aids in forensic investigations.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_2_1_18
'''
benchmark = '3.6 Configure network time protocol (NTP) (Scored)'
ret = _grep('"restrict default"', '/etc/ntp.conf')
if ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_3_16():
'''
The software for all Mail Transfer Agents is complex and most have a long
history of security issues. While it is important to ensure that the system can
process local mail messages, it is not necessary to have the MTA's daemon
listening on a port unless the server is intended to be a mail server that
receives and processes mail from other systems.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_2_1_18
'''
benchmark = '3.16 Configure mail transfer agent for local-only mode (Scored)'
cmd = 'netstat -an | grep LIST | grep ":25[[:space:]]"'
ret = __salt__['cmd.run'](cmd, python_shell=True)
if '127.0.0.1' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_4_1_1():
'''
Setting the flag to 0 ensures that a server with multiple interfaces (for
example, a hard proxy), will never be able to forward packets, and therefore,
never serve as a router
CLI Example:
.. code-block:: shell
salt '*' cis.audit_4_1_1
'''
benchmark = '4.1.1 Disable IP forwarding (Scored)'
ret = _sysctl('net.ipv4.ip_forward')
if '0' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_4_1_2():
'''
An attacker could use a compromised host to send invalid ICMP redirects to
other router devices in an attempt to corrupt routing and have users access a
system set up by the attacker as opposed to a valid system.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_4_1_2
'''
benchmark = '4.1.2 Disable send packet redirects (Scored)'
ret1 = _sysctl('net.ipv4.conf.all.send_redirects')
ret2 = _sysctl('net.ipv4.conf.default.send_redirects')
if ('0' in ret1 and '0' in ret2):
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_4_2_1():
'''
Setting net.ipv4.conf.all.accept_source_route and
net.ipv4.conf.default.accept_source_route to 0 disables the system from
accepting source routed packets. Assume this server was capable of routing
packets to Internet routable addresses on one interface and private addresses
on another interface. Assume that the private addresses were not routable to
the Internet routable addresses and vice versa. Under normal routing
circumstances, an attacker from the Internet routable addresses could not use
the server as a way to reach the private address servers. If, however, source
routed packets were allowed, they could be used to gain access to the private
address systems as the route could be specified, rather than rely on routing
protocols that did not allow this routing.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_4_2_1
'''
benchmark = '4.2.1 disable source routed packet acceptance (Scored)'
ret1 = _sysctl('net.ipv4.conf.all.accept_source_route')
ret2 = _sysctl('net.ipv4.conf.default.accept_source_route')
if ('0' in ret1 and '0' in ret2):
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_4_2_2():
'''
Attackers could use bogus ICMP redirect messages to maliciously alter the
system routing tables and get them to send packets to incorrect networks and
allow your system packets to be captured.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_4_2_2
'''
benchmark = '4.2.2 Disable ICMP redirect acceptance (Scored)'
ret1 = _sysctl('net.ipv4.conf.all.accept_redirects')
ret2 = _sysctl('net.ipv4.conf.default.accept_redirects')
if ('0' in ret1 and '0' in ret2):
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_4_2_4():
'''
Enabling this feature and logging these packets allows an administrator to
investigate the possibility that an attacker is sending spoofed packets to
their server.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_4_2_4
'''
benchmark = '4.2.4 Log suspicious packets (Scored)'
ret1 = _sysctl('net.ipv4.conf.all.log_martians')
ret2 = _sysctl('net.ipv4.conf.default.log_martians')
if ('1' in ret1 and '1' in ret2):
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_4_2_5():
'''
Accepting ICMP echo and timestamp requests with broadcast or multicast
destinations for your network could be used to trick your host into starting
(or participating) in a Smurf attack. A Smurf attack relies on an attacker
sending large amounts of ICMP broadcast messages with a spoofed source address.
All hosts receiving this message and responding would send echo-reply messages
back to the spoofed address, which is probably not routable. If many hosts
respond to the packets, the amount of traffic on the network could be
significantly multiplied.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_4_2_5
'''
benchmark = '4.2.5 Enable ignore broadcast requests (Scored)'
ret = _sysctl('net.ipv4.icmp_echo_ignore_broadcasts')
if '1' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_4_2_6():
'''
Some routers (and some attackers) will send responses that violate RFC-1122
and attempt to fill up a log file system with many useless error messages.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_4_2_6
'''
benchmark = '4.2.6 Enable bad error message protection (Scored)'
ret = _sysctl('net.ipv4.icmp_ignore_bogus_error_response')
if '1' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_4_2_8():
'''
Attackers use SYN flood attacks to perform a denial of service attacked on
a server by sending many SYN packets without completing the three way
handshake. This will quickly use up slots in the kernel's half-open connection
queue and prevent legitimate connections from succeeding. SYN cookies allow the
server to keep accepting valid connections, even if under a denial of service
attack.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_4_2_8
'''
benchmark = '4.2.8 Enable TCP SYN cookies (Scored)'
ret = _sysctl('net.ipv4.tcp_syncookies')
if '1' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_4_5_3():
'''
It is critical to ensure that the /etc/hosts.allow file is protected from
unauthorized write access. Although it is protected by default, the file
permissions could be changed either inadvertently or through malicious actions.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_4_5_3
'''
benchmark = '4.5.3 Verify permissions on /etc/hosts.allow (Scored)'
ret = _stat('/etc/hosts.allow')
if '644' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_4_5_5():
'''
It is critical to ensure that the /etc/hosts.deny file is protected from
unauthorized write access. Although it is protected by default, the file
permissions could be changed either inadvertently or through malicious actions.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_4_5_5
'''
benchmark = '4.5.5 Verify permissions on /etc/hosts.deny (Scored)'
ret = _stat('/etc/hosts.deny')
if '644' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_4_7():
'''
IPtables provides extra protection for the Linux system by limiting
communications in and out of the box to specific IPv4 addresses and ports.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_4_5_7
'''
benchmark = '4.7 Enable iptables / firewalld (Scored)'
if 'systemctl' in CHKCONFIG:
ret = _chkconfig('firewalld')
if 'chkconfig' in CHKCONFIG:
ret = _chkconfig('iptables')
if '3:on' in ret:
CIS['Passed'].append(benchmark)
elif 'enabled' in ret:
CIS['Passed'].append(benchmark)
elif 'No such file or directory' in ret:
CIS['Failed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_5_1_1():
'''
The security enhancements of rsyslog such as connection-oriented (i.e. TCP)
transmission of logs, the option to log to database formats, and the encryption
of log data en route to a central logging server) justify installing and
configuring the package.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_5_1_1
'''
benchmark = '5.1.1 Install the rsyslog package (Scored)'
ret = _rpmquery('rsyslog')
if 'not installed' in ret:
CIS['Failed'].append(benchmark)
else:
CIS['Passed'].append(benchmark)
return CIS
def audit_5_1_2():
'''
It is important to ensure that syslog is turned off so that it does not
interfere with the rsyslog service.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_5_1_2
'''
benchmark = '5.1.2 Activate the rsyslog service (Scored)'
ret = _chkconfig('rsyslog')
if '3:on' in ret:
CIS['Passed'].append(benchmark)
elif 'enabled' in ret:
CIS['Passed'].append(benchmark)
elif 'No such file or directory' in ret:
CIS['Failed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_5_1_5():
'''
Storing log data on a remote host protects log integrity from local
attacks. If an attacker gains root access on the local system, they could
tamper with or remove log data that is stored on the local system
CLI Example:
.. code-block:: shell
salt '*' cis.audit_5_1_5
'''
benchmark = '5.1.5 Configure rsyslog to send logs to a remote log host (Scored)'
ret = _grep('"^*.* @"', '/etc/rsyslog.conf')
if ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_6_1_1():
'''
Cron jobs may include critical security or administrative functions that
need to run on a regular basis. Use this daemon on machines that are not up
24x7, or if there are jobs that need to be executed after the system has been
brought back up after a maintenance window.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_1_1
'''
benchmark = '6.1.1 Enable anacron daemon (Scored)'
ret = _rpmquery('cronie-anacron')
if 'not installed' in ret:
CIS['Failed'].append(benchmark)
else:
CIS['Passed'].append(benchmark)
return CIS
def audit_6_1_2():
'''
While there may not be user jobs that need to be run on the system, the
system does have maintenance jobs that may include security monitoring that
have to run and crond is used to execute them.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_1_2
'''
benchmark = '6.1.2 Enable crond daemon (Scored)'
ret = _chkconfig('crond')
if '3:on' in ret:
CIS['Passed'].append(benchmark)
elif 'enabled' in ret:
CIS['Passed'].append(benchmark)
elif 'No such file or directory' in ret:
CIS['Failed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_6_1_3():
'''
This file contains information on what system jobs are run by anacron.
Write access to these files could provide unprivileged users with the ability
to elevate their privileges. Read access to these files could provide users
with the ability to gain insight on system jobs that run on the system and
could provide them a way to gain unauthorized privileged access.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_1_3
'''
benchmark = '6.1.3 Set user/group owner and permission on /etc/anacrontab (Scored)'
ret = _stat('/etc/anacrontab')
if '600 0 0' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_6_1_4():
'''
This file contains information on what system jobs are run by cron. Write
access to these files could provide unprivileged users with the ability to
elevate their privileges. Read access to these files could provide users with
the ability to gain insight on system jobs that run on the system and could
provide them a way to gain unauthorized privileged access.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_1_4
'''
benchmark = '6.1.4 Set user/group owner and permission on /etc/crontab (Scored)'
ret = _stat('/etc/crontab')
if '600 0 0' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_6_1_5():
'''
Granting write access to this directory for non-privileged users could
provide them the means for gaining unauthorized elevated privileges. Granting
read access to this directory could give an unprivileged user insight in how to
gain elevated privileges or circumvent auditing controls.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_1_5
'''
benchmark = '6.1.5 Set user/group and permission on /etc/cron.hourly (Scored)'
ret = _stat('/etc/cron.hourly')
if '600 0 0' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_6_1_6():
'''
Granting write access to this directory for non-privileged users could
provide them the means for gaining unauthorized elevated privileges. Granting
read access to this directory could give an unprivileged user insight in how to
gain elevated privileges or circumvent auditing controls.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_1_6
'''
benchmark = '6.1.6 Set user/group and permission on /etc/cron.daily (Scored)'
ret = _stat('/etc/cron.daily')
if '600 0 0' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_6_1_7():
'''
Granting write access to this directory for non-privileged users could
provide them the means for gaining unauthorized elevated privileges. Granting
read access to this directory could give an unprivileged user insight in how to
gain elevated privileges or circumvent auditing controls.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_1_7
'''
benchmark = '6.1.7 Set user/group owner and permission on /etc/cron.weekly (Scored)'
ret = _stat('/etc/cron.weekly')
if '600 0 0' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_6_1_8():
'''
Granting write access to this directory for non-privileged users could
provide them the means for gaining unauthorized elevated privileges. Granting
read access to this directory could give an unprivileged user insight in how to
gain elevated privileges or circumvent auditing controls.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_1_8
'''
benchmark = '6.1.8 Set user/group owner and permission on /etc/cron.monthly (Scored)'
ret = _stat('/etc/cron.monthly')
if '600 0 0' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_6_1_9():
'''
Granting write access to this directory for non-privileged users could
provide them the means for gaining unauthorized elevated privileges. Granting
read access to this directory could give an unprivileged user insight in how to
gain elevated privileges or circumvent auditing controls.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_1_9
'''
benchmark = '6.1.9 Set user/group owner and permission on /etc/cron.d (Scored)'
ret = _stat('/etc/cron.d')
if '600 0 0' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_6_1_10():
'''
Granting write access to this directory for non-privileged users could
provide them the means to gain unauthorized elevated privileges. Granting read
access to this directory could give an unprivileged user insight in how to gain
elevated privileges or circumvent auditing controls. In addition, it is a
better practice to create a white list of users who can execute at jobs versus
a blacklist of users who can't execute at jobs as a system administrator will
always know who can create jobs and does not have to worry about remembering to
add a user to the blacklist when a new user id is created.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_1_10
'''
benchmark = '6.1.10 Restrict at daemon (Scored)'
ret = _stat('/etc/at.allow')
if '600 0 0' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_6_1_11():
'''
On many systems, only the system administrator is authorized to schedule
cron jobs. Using the cron.allow file to control who can run cron jobs enforces
this policy. It is easier to manage an allow list than a deny list. In a deny
list, you could potentially add a user ID to the system and forget to add it to
the deny files.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_1_11
'''
benchmark = '6.1.11 Restrict at/cron to authorized users (Scored)'
ret1 = _stat('/etc/cron.deny')
ret2 = _stat('/etc/at.deny')
ret3 = _stat('/etc/cron.allow')
ret4 = _stat('/etc/at.allow')
if (('600 0 0' in ret3 and '600 0 0' in ret4) and
('cannot stat' in ret1 and 'cannot stat' in ret2)):
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_6_2_1():
'''
SSH v1 suffers from insecurities that do not affect SSH v2.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_2_1
'''
benchmark = '6.2.1 Set SSH protocol to 2 (Scored)'
ret = _grep('"^Protocol"', '/etc/ssh/sshd_config')
if 'Protocol 2' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_6_2(details=False):
'''
Audit Filesystem Configuration benchmarks (1.1)
'''
audit_6_2_2()
audit_6_2_3()
audit_6_2_4()
audit_6_2_5()
audit_6_2_6()
audit_6_2_7()
audit_6_2_8()
audit_6_2_9()
audit_6_2_10()
audit_6_2_11()
audit_6_2_12()
audit_6_2_13()
audit_6_2_14()
for benchmark in CIS['Passed']:
CIS['Totals']['Pass'] += 1
for benchmark in CIS['Failed']:
CIS['Totals']['Fail'] += 1
if details:
return CIS
else:
return CIS['Totals']
def audit_6_2_2():
'''
SSH provides several logging levels with varying amounts of verbosity.
DEBUG is specifically not recommended other than strictly for debugging SSH
communications since it provides so much data that it is difficult to identify
important security information. INFO level is the basic level that only records
login activity of SSH users. In many situations, such as Incident Response, it
is important to determine when a particular user was active on a system. The
logout record can eliminate those users who disconnected, which helps narrow
the field.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_2_2
'''
benchmark = '6.2.2 Set LogLevel to INFO (Scored)'
ret = _grep('"^LogLevel"', '/etc/ssh/sshd_config')
if 'LogLevel INFO' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_6_2_3():
'''
The /etc/ssh/sshd_config file needs to be protected from unauthorized
changes by nonpriliveged users, but needs to be readable as this information is
used with many nonprivileged programs.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_2_3
'''
benchmark = '6.2.3 Set permissions on /etc/ssh/sshd_config (Scored)'
ret = _stat('/etc/ssh/sshd_config')
if '600 0 0' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_6_2_4():
'''
Disable X11 forwarding unless there is an operational requirement to use
X11 applications directly. There is a small risk that the remote X11 servers of
users who are logged in via SSH with X11 forwarding could be compromised by
other users on the X11 server. Note that even if X11 forwarding is disabled,
users can always install their own forwarders.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_2_4
'''
benchmark = '6.2.4 disable SSH X11 forwarding (Scored)'
ret = _grep('"^X11Forwarding"', '/etc/ssh/sshd_config')
if 'X11Forwarding no' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_6_2_5():
'''
Setting the MaxAuthTries parameter to a low number will minimize the risk
of successful brute force attacks to the SSH server. While the recommended
setting is 4, it is set the number based on site policy.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_2_5
'''
benchmark = '6.2.5 Set SSH maxAuthTries to 4 or less (Scored)'
ret = _grep('"^MaxAuthTries"', '/etc/ssh/sshd_config')
if ('1' or '2' or '3' or '4') in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_6_2_6():
'''
Setting this parameter forces users to enter a password when authenticating
with ssh.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_2_6
'''
benchmark = '6.2.6 Set SSH IgnoreRhosts to Yes (Scored)'
ret = _grep('"^IgnoreRhosts"', '/etc/ssh/sshd_config')
if 'IgnoreRhosts yes' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_6_2_7():
'''
Even though the .rhosts files are ineffective if support is disabled in
/etc/pam.conf, disabling the ability to use .rhosts files in SSH provides an
additional layer of protection.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_2_7
'''
benchmark = '6.2.7 Set SSH HostbasedAuthentication to No (Scored)'
ret = _grep('"HostbasedAuthentication"', '/etc/ssh/sshd_config')
if 'HostbasedAuthentication no' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_6_2_8():
'''
Disallowing root logins over SSH requires server admins to authenticate
using their own individual account, then escalating to root via sudo or su.
This in turn limits opportunity for non-repudiation and provides a clear audit
trail in the event of a security incident
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_2_8
'''
benchmark = '6.2.8 Disable SSH Root Login (Scored)'
ret = _grep('"^PermitRootLogin"', '/etc/ssh/sshd_config')
if 'PermitRootLogin no' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_6_2_9():
'''
Disallowing remote shell access to accounts that have an empty password
reduces the probability of unauthorized access to the system.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_2_9
'''
benchmark = '6.2.9 Set SSH PermitEmptyPasswords to No (Scored)'
ret = _grep('"^PermitEmptyPasswords"', '/etc/ssh/sshd_config')
if 'PermitEmptyPasswords no' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_6_2_10():
'''
Permitting users the ability to set environment variables through the SSH
daemon could potentially allow users to bypass security controls (e.g. setting
an execution path that has ssh executing trojan'd programs)
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_2_10
'''
benchmark = '6.2.10 Do not allow users to set environment options (Scored)'
ret = _grep('"^PermitUserEnvironment"', '/etc/ssh/sshd_config')
if 'PermitUserEnvironment no' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_6_2_11():
'''
Based on research conducted at various institutions, it was determined that
the symmetric portion of the SSH Transport Protocol (as described in RFC 4253)
has security weaknesses that allowed recovery of up to 32 bits of plaintext
from a block of ciphertext that was encrypted with the Cipher Block Chaining
(CBD) method. From that research, new Counter mode algorithms (as described in
RFC4344) were designed that are not vulnerable to these types of attacks and
these algorithms are now recommended for standard use.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_2_11
'''
benchmark = '6.2.11 Use only approvide cipher in counter mode (Scored)'
ret = _grep('"^Ciphers"', '/etc/ssh/sshd_config')
if not ('aes128-ctr' or 'aes192-ctr' or 'aes256-ctr') in ret:
CIS['Failed'].append(benchmark)
else:
CIS['Passed'].append(benchmark)
return CIS
def audit_6_2_12():
'''
Having no timeout value associated with a connection could allow an
unauthorized user access to another user's ssh session (e.g. user walks away
from their computer and doesn't lock the screen). Setting a timeout value at
least reduces the risk of this happening.. While the recommended setting is
300 seconds (5 minutes), set this timeout value based on site policy. The
recommended setting for ClientAliveCountMax is 0. In this case, the client
session will be terminated after 5 minutes of idle time and no keepalive
messages will be sent.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_2_12
'''
benchmark = '6.2.12 Set Idle timeout interval for user login (Scored)'
ret1 = _grep('"^ClientAliveInterval"', '/etc/ssh/sshd_config')
ret2 = _grep('"^ClientAliveCountMax"', '/etc/ssh/sshd_config')
if 'ClientAliveInterval 300' in ret1 and 'ClientAliveCountMax 0' in ret2:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_6_2_13():
'''
Restricting which users can remotely access the system via SSH will help
ensure that only authorized users access the system.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_2_13
'''
benchmark = '6.2.13 Limit access via SSH (Scored)'
ret1 = _grep('"^AllowUsers"', '/etc/ssh/sshd_config')
ret2 = _grep('"^AllowGroups"', '/etc/ssh/sshd_config')
if ret1 or ret2:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_6_2_14():
'''
Banners are used to warn connecting users of the particular site's policy
regarding connection. Consult with your legal department for the appropriate
warning banner for your site.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_2_14
'''
benchmark = '6.2.14 Set SSH Banner (Scored)'
ret = _grep('"^Banner"', '/etc/ssh/sshd_config')
if ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_6_3_1():
'''
The SHA-512 algorithm provides much stronger hashing than MD5, thus
providing additional protection to the system by increasing the level of effort
for an attacker to successfully determine passwords. Note that these change
only apply to accounts configured on the local system.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_3_1
'''
benchmark = '6.3.1 Upgrade password hashing algorithm to SHA-512 (Scored)'
ret = _grep('"^ENCRYPT_METHOD"', '/etc/login.defs')
if 'SHA512' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_6_3_2():
'''
Strong passwords protect systems from being hacked through brute force
methods.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_3_2
'''
benchmark = '6.3.2 Set password creation requirement parameters using pam_cracklib (Scored)'
ret = _grep('"pam_cracklib.so"', '/etc/pam.d/system-auth')
if ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_6_3_4():
'''
Forcing users not to reuse their past 5 passwords make it less likely that
an attacker will be able to guess the password. Note that these change only
apply to accounts configured on the local system.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_3_4
'''
benchmark = '6.3.4 Limit password reuise (Scored)'
ret = _grep('"remember"', '/etc/pam.d/system-auth')
if ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_6_5():
'''
Restricting the use of su, and using sudo in its place, provides system
administrators better control of the escalation of user privileges to execute
privileged commands. The sudo utility also provides a better logging and audit
mechanism, as it can log each command executed via sudo, whereas su can only
record that a user executed the su program.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_6_5
'''
benchmark = '6.5 Restrict access to the su command (Scored)'
ret = _grep('"pam_wheel.so"', '/etc/pam.d/su')
if ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_7_1_1():
'''
The window of opportunity for an attacker to leverage compromised
credentials or successfully compromise credentials via an online brute force
attack is limited by the age of the password. Therefore, reducing the maximum
age of a password also reduces an an attacker's window of opportunity.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_7_1_1
'''
benchmark = '7.1.1 Set password expiration days (Scored)'
ret = _grep('"PASS_MAX_DAYS"', '/etc/login.defs')
if '90' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_7_1_2():
'''
By restricting the frequency of password changes, an administrator can
prevent users from repeatedly changing their password in an attempt to
circumvent password reuse controls.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_7_1_2
'''
benchmark = '7.1.2 Set password change minimum number of days (Scored)'
ret = _grep('"PASS_MIN_DAYS"', '/etc/login.defs')
if '7' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_7_1_3():
'''
Providing an advance warning that a password will be expiring gives users
time to think of a secure password. Users caught unaware may choose a simple
password or write it down where it may be discovered
CLI Example:
.. code-block:: shell
salt '*' cis.audit_7_1_3
'''
benchmark = '7.1.3 Set password expiring warning days (Scored)'
ret = _grep('"PASS_WARN_AGE"', '/etc/login.defs')
if '7' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_7_2():
'''
It is important to make sure that accounts that are not being used by
regular users are locked to prevent them from being used to provide an
interactive shell. By default, CentOS sets the password field for these
accounts to an invalid string, but it is also recommended that the shell field
in the password file be set to /sbin/nologin. This prevents the account from
potentially being used to run any commands
CLI Example:
.. code-block:: shell
salt '*' cis.audit_7_2
'''
benchmark = '7.2 Disable system accounts (Scored)'
cmd = "egrep -v '^\+' /etc/passwd | awk -F: '($1!='root' && $1!='sync' && $1!='shutdown' && $1!='halt' && $3<500 && $7!='/sbin/nologin') {print}'"
ret = __salt__['cmd.run'](cmd, python_shell=True)
if not ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_7_3():
'''
Using GID 0 for the root account helps prevent root-owned files from
accidentally becoming accessible to non-privileged users.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_7_3
'''
benchmark = '7.3 Set default group for root account (Scored)'
cmd = 'getent passwd root'
ret = __salt__['cmd.run'](cmd, python_shell=False)
if '0:0' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_7_4():
'''
Setting a very secure default value for umask ensures that users make a
conscious choice about their file permissions. A default umask setting of 077
causes files and directories created by users to not be readable by any other
user on the system. A umask of 027 would make files and directories readable by
users in the same Unix group, while a umask of 022 would make files readable by
every user on the system. Note: The directives in this section apply to bash
and shell. If other shells are supported on the system, it is recommended that
their configuration files also are checked.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_7_4
'''
benchmark = '7.4 Set default umask for users (Scored)'
ret = _grep('"^umask"', '/etc/bashrc')
if '077' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_7_5():
'''
Inactive accounts pose a threat to system security since the users are not
logging in to notice failed login attempts or other anomalies.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_7_5
'''
benchmark = '7.5 Lock inactive user accounts (Scored)'
ret = _grep('"^INACTIVE"', '/etc/default/useradd')
if '-1' in ret:
CIS['Failed'].append(benchmark)
else:
CIS['Passed'].append(benchmark)
return CIS
def audit_8_1():
'''
Warning messages inform users who are attempting to login to the system of
their legal status regarding the system and must include the name of the
organization that owns the system and any monitoring policies that are in
place. Consult with your organization's legal counsel for the appropriate
wording for your specific organization.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_8_1
'''
benchmark = '8.1 Set warning banner for standard login services (Scored)'
ret1 = _stat('/etc/motd')
ret2 = _stat('/etc/issue')
ret3 = _stat('/etc/issue.net')
if '600 0 0' in (ret1 and ret2 and ret3):
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_8_2():
'''
Displaying OS and patch level information in login banners also has the
side effect of providing detailed system information to attackers attempting to
target specific exploits of a system. Authorized users can easily get this
information by running the "uname -a" command once they have logged in.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_8_2
'''
benchmark = '8.2 Remove OS Information from login warning banners (Scored)'
ret1 = _grep('"(\\v|\\r|\\m|\\s)"', '/etc/issue')
ret1 = _grep('"(\\v|\\r|\\m|\\s)"', '/etc/motd')
ret1 = _grep('"(\\v|\\r|\\m|\\s)"', '/etc/issue.net')
if not (ret1 or ret2 or ret3):
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_9_1_2():
'''
It is critical to ensure that the /etc/passwd file is protected from
unauthorized write access. Although it is protected by default, the file
permissions could be changed either inadvertently or through malicious actions.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_9_1_2
'''
benchmark = '9.1.2 Verify permissions on /etc/passwd (Scored)'
ret = _stat('/etc/passwd')
if '600' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_9_1_3():
'''
If attackers can gain read access to the /etc/shadow file, they can easily
run a password cracking program against the hashed password to break it. Other
security information that is stored in the /etc/shadow file (such as
expiration) could also be useful to subvert the user accounts.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_9_1_3
'''
benchmark = '9.1.3 Verify permissions on /etc/shadow (Scored)'
ret = _stat('/etc/shadow')
if '000' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_9_1_4():
'''
If attackers can gain read access to the /etc/gshadow file, they can easily
run a password cracking program against the hashed password to break it. Other
security information that is stored in the /etc/gshadow file (such as
expiration) could also be useful to subvert the group accounts.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_9_1_4
'''
benchmark = '9.1.4 Verify permissions on /etc/gshadow (Scored)'
ret = _stat('/etc/gshadow')
if '000' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_9_1_5():
'''
The /etc/group file needs to be protected from unauthorized changes by
non-privileged users, but needs to be readable as this information is used with
many non-privileged programs.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_9_1_5
'''
benchmark = '9.1.5 Verify permissions on /etc/group (Scored)'
ret = _stat('/etc/group')
if '644' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_9_1_6():
'''
The /etc/passwd file needs to be protected from unauthorized changes by
non-priliveged users, but needs to be readable as this information is used with
many non-privileged programs.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_9_1_6
'''
benchmark = '9.1.6 Verify user/group ownership on /etc/passwd (Scored)'
ret = _stat('/etc/passwd')
if '0 0' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_9_1_7():
'''
If attackers can gain read access to the /etc/shadow file, they can easily
run a password cracking program against the hashed password to break it. Other
security information that is stored in the /etc/shadow file (such as
expiration) could also be useful to subvert the user accounts.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_9_1_7
'''
benchmark = '9.1.7 Verify user/group ownership on /etc/shadow (Scored)'
ret = _stat('/etc/shadow')
if '0 0' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_9_1_8():
'''
If attackers can gain read access to the /etc/gshadow file, they can easily
run a password cracking program against the hashed password to break it. Other
security information that is stored in the /etc/gshadow file (such as
expiration) could also be useful to subvert the group accounts.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_9_1_8
'''
benchmark = '9.1.8 Verify user/group ownership on /etc/gshadow (Scored)'
ret = _stat('/etc/gshadow')
if '0 0' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_9_1_9():
'''
Verify User/Group Ownership on /etc/group (Scored)
The /etc/group file needs to be protected from unauthorized changes by
non-priliveged users, but needs to be readable as this information is used with
many non-privileged programs.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_9_1_9
'''
benchmark = '9.1.9 Verify user/group ownership on /etc/group (Scored)'
ret = _stat('/etc/group')
if '0 0' in ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_9_1_11():
'''
Find Un-owned Files and Directories (Scored)
A new user who is assigned the deleted user's user ID or group ID may then
end up "owning" these files, and thus have more access on the system than was
intended.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_9_1_11
'''
benchmark = '9.1.11 Find Un-owned Files and Directories (Scored)'
cmd = "df --local -P | awk {'if (NR!=1) print $6'} | xargs -I '{}' find '{}' -xdev -nouser -ls"
ret = __salt__['cmd.run'](cmd, python_shell=True)
if not ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_9_1_12():
'''
A new user who is assigned the deleted user's user ID or group ID may then
end up "owning" these files, and thus have more access on the system than was
intended.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_9_1_12
'''
benchmark = '9.1.12 Find Un-grouped Files and Directories (Scored)'
cmd = "df --local -P | awk {'if (NR!=1) print $6'} | xargs -I '{}' find '{}' -xdev -nogroup -ls"
ret = __salt__['cmd.run'](cmd, python_shell=True)
if not ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_9_2_1():
'''
All accounts must have passwords or be locked to prevent the account from
being used by an unauthorized user.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_9_2_1
'''
benchmark = '9.2.1 Ensure password fields are not emply (Scored)'
cmd = "/bin/awk -F: '($2 == \"\" ) { print $1 \" does not have a password \"}' /etc/shadow"
ret = __salt__['cmd.run'](cmd, python_shell=False)
if not ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_9_2_2():
'''
These entries may provide an avenue for attackers to gain privileged access
on the system.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_9_2_2
'''
benchmark = '9.2.2 Verify no legacy "+" entries exist in /etc/passwd file (Scored)'
ret = _grep('"^+:"', '/etc/passwd')
if not ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_9_2_3():
'''
These entries may provide an avenue for attackers to gain privileged access
on the system.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_9_2_3
'''
benchmark = '9.2.3 Verify no legacy "+" entries exist in /etc/shadow file (Scored)'
ret = _grep('"^+:"', '/etc/shadow')
if not ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_9_2_4():
'''
These entries may provide an avenue for attackers to gain privileged access
on the system.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_9_2_4
'''
benchmark = '9.2.4 Verify no legacy "+" entries exist in /etc/group file (Scored)'
ret = _grep('"^+:"', '/etc/group')
if not ret:
CIS['Passed'].append(benchmark)
else:
CIS['Failed'].append(benchmark)
return CIS
def audit_9_2_5():
'''
This access must be limited to only the default root account and only from
the system console. Administrative access must be through an unprivileged
account using an approved mechanism as noted in Item 7.5 Restrict root Login to
System Console.
CLI Example:
.. code-block:: shell
salt '*' cis.audit_9_2_5
'''
benchmark = '9.2.5 Verify No UID 0 Accounts Exist Other Than root (Scored)'
cmd = "/bin/awk -F: '($3 == 0) { print $1 }' /etc/passwd"
ret = __salt__['cmd.run'](cmd, python_shell=False)
if not 'root' in ret:
CIS['Failed'].append(benchmark)
else:
CIS['Passed'].append(benchmark)
return CIS
def audit(details=False, failed=False, passed=False):
'''
Return all the things!
'''
CIS['Details']['Hostname'] = __salt__['pillar.get']('cmdb:details:name')
CIS['Details']['Device Service'] = __salt__['pillar.get']('cmdb:details:device_service')
audit_1_1_1()
audit_1_1_2()
audit_1_1_3()
audit_1_1_4()
audit_1_1_5()
audit_1_1_6()
audit_1_1_7()
audit_1_1_8()
audit_1_1_9()
audit_1_1_10()
audit_1_1_14()
audit_1_1_15()
audit_1_1_16()
audit_1_1_17()
audit_1_2_2()
audit_1_5_1()
audit_1_5_2()
audit_1_5_3()
audit_1_5_4()
audit_1_5_5()
audit_1_6_1()
audit_1_6_2()
audit_1_6_3()
audit_2_1_1() ## 2.1.1 Remove telnet-server (Scored)
audit_2_1_2() ## 2.1.2 Remove telnet Clients (Scored)
audit_2_1_3() ## 2.1.3 Remove rsh-server (Scored)
audit_2_1_4() ## 2.1.4 Remove rsh (Scored)
audit_2_1_5() ## 2.1.5 Remove NIS Client (Scored)
audit_2_1_6() ## 2.1.6 Remove NIS Server (Scored)
audit_2_1_7() ## 2.1.7 Remove tftp (Scored)
audit_2_1_8() ## 2.1.8 Remove tftp-server (Scored)
audit_2_1_9() ## 2.1.9 Remove talk (Scored)
audit_2_1_10() ## 2.1.10 Remove talk-server (Scored)
audit_2_1_11() ## 2.1.11 Remove xinetd (Scored)
audit_2_1_12() ## 2.1.12 Disable chargen-dgram (Scored)
audit_2_1_13() ## 2.1.13 Disable chargen-stream (Scored)
audit_2_1_14() ## 2.1.14 Disable daytime-dgram (Scored)
audit_2_1_15() ## 2.1.15 Disable daytime-stream (Scored)
audit_2_1_16() ## 2.1.16 Disable echo-dgram (Scored)
audit_2_1_17() ## 2.1.17 Disable echo-stream (Scored)
audit_2_1_18() ## 2.1.18 Disable tcpmux-server (Scored)
audit_3_1() ## 3.1 Set Daemon umask (Scored)
audit_3_2() ## 3.2 Remove X Windows (Scored)
audit_3_3() ## 3.3 Disable Avahi Server (Scored)
audit_3_5() ## 3.5 Remove DHCP Server (Scored)
audit_3_6() ## 3.6 Configure Network Time Protocol (NTP) (Scored)
audit_3_16() ## 3.16 Configure Mail Transfer Agent for Local-Only Mode (Scored)
audit_4_1_1() ## 4.1.1 Disable IP Forwarding (Scored)
audit_4_1_2() ## 4.1.2 Disable Send Packet Redirects (Scored)
audit_4_2_1() ## 4.2.1 Disable Source Routed Packet Acceptance (Scored)
audit_4_2_2() ## 4.2.2 Disable ICMP Redirect Acceptance (Scored)
audit_4_2_4() ## 4.2.4 Log Suspicious Packets (Scored)
audit_4_2_5() ## 4.2.5 Enable Ignore Broadcast Requests (Scored)
audit_4_2_6() ## 4.2.6 Enable Bad Error Message Protection (Scored)
audit_4_2_8() ## 4.2.8 Enable TCP SYN Cookies (Scored)
audit_4_5_3() ## 4.5.3 Verify Permissions on /etc/hosts.allow (Scored)
audit_4_5_5() ## 4.5.5 Verify Permissions on /etc/hosts.deny (Scored)
audit_4_7() ## 4.7 Enable IPtables (Scored)
audit_5_1_1() ## 5.1.1 Install the rsyslog package (Scored)
audit_5_1_2() ## 5.1.2 Activate the rsyslog Service (Scored)
audit_5_1_5() ## 5.1.5 Configure rsyslog to Send Logs to a Remote Log Host (Scored)
audit_6_1_1() ## 6.1.1 Enable anacron Daemon (Scored)
audit_6_1_2() ## 6.1.2 Enable crond Daemon (Scored)
audit_6_1_3() ## 6.1.3 Set User/Group Owner and Permission on /etc/anacrontab (Scored)
audit_6_1_4() ## 6.1.4 Set User/Group Owner and Permission on /etc/crontab (Scored)
audit_6_1_5() ## 6.1.5 Set User/Group Owner and Permission on /etc/cron.hourly (Scored)
audit_6_1_6() ## 6.1.6 Set User/Group Owner and Permission on /etc/cron.daily (Scored)
audit_6_1_7() ## 6.1.7 Set User/Group Owner and Permission on /etc/cron.weekly (Scored)
audit_6_1_8() ## 6.1.8 Set User/Group Owner and Permission on /etc/cron.monthly (Scored)
audit_6_1_9() ## 6.1.9 Set User/Group Owner and Permission on /etc/cron.d (Scored)
audit_6_1_10() ## 6.1.10 Restrict at Daemon (Scored)
audit_6_1_11() ## 6.1.11 Restrict at/cron to Authorized Users (Scored)
audit_6_2_1() ## 6.2.1 Set SSH Protocol to 2 (Scored)
audit_6_2_2() ## 6.2.2 Set LogLevel to INFO (Scored)
audit_6_2_3() ## 6.2.3 Set Permissions on /etc/ssh/sshd_config (Scored)
audit_6_2_4() ## 6.2.4 Disable SSH X11 Forwarding (Scored)
audit_6_2_5() ## 6.2.5 Set SSH MaxAuthTries to 4 or Less (Scored)
audit_6_2_6() ## 6.2.6 Set SSH IgnoreRhosts to Yes (Scored)
audit_6_2_7() ## 6.2.7 Set SSH HostbasedAuthentication to No (Scored)
audit_6_2_8() ## 6.2.8 Disable SSH Root Login (Scored)
audit_6_2_9() ## 6.2.9 Set SSH PermitEmptyPasswords to No (Scored)
audit_6_2_10() ## 6.2.10 Do Not Allow Users to Set Environment Options (Scored)
audit_6_2_11() ## 6.2.11 Use Only Approved Cipher in Counter Mode (Scored)
audit_6_2_12() ## 6.2.12 Set Idle Timeout Interval for User Login (Scored)
audit_6_2_13() ## 6.2.13 Limit Access via SSH (Scored)
audit_6_2_14() ## 6.2.14 Set SSH Banner (Scored)
audit_6_3_1() ## 6.3.1 Upgrade Password Hashing Algorithm to SHA-512 (Scored)
audit_6_3_2() ## 6.3.2 Set Password Creation Requirement Parameters Using pam_cracklib (Scored)
audit_6_3_4() ## 6.3.4 Limit Password Reuse (Scored)
audit_6_5() ## 6.5 Restrict Access to the su Command (Scored)
audit_7_1_1() ## 7.1.1 Set Password Expiration Days (Scored)
audit_7_1_2() ## 7.1.2 Set Password Change Minimum Number of Days (Scored)
audit_7_1_3() ## 7.1.3 Set Password Expiring Warning Days (Scored)
audit_7_2() ## 7.2 Disable System Accounts (Scored)
audit_7_3() ## 7.3 Set Default Group for root Account (Scored)
audit_7_4() ## 7.4 Set Default umask for Users (Scored)
audit_7_5() ## 7.5 Lock Inactive User Accounts (Scored)
audit_8_1() ## 8.1 Set Warning Banner for Standard Login Services (Scored)
audit_8_2() ## 8.2 Remove OS Information from Login Warning Banners (Scored)
audit_9_1_2()
audit_9_1_3()
audit_9_1_4()
audit_9_1_5()
audit_9_1_6()
audit_9_1_7()
audit_9_1_8()
audit_9_1_9() ## 9.1.9 Verify User/Group Ownership on /etc/group (Scored)
audit_9_1_11()
audit_9_1_12()
audit_9_2_1()
audit_9_2_2()
audit_9_2_3()
audit_9_2_4()
audit_9_2_5()
for benchmark in CIS['Passed']:
CIS['Totals']['Pass'] += 1
for benchmark in CIS['Failed']:
CIS['Totals']['Fail'] += 1
if details:
return CIS
else:
return CIS['Totals']
|
def listoverlap(list1, list2):
list3 = [x for x in list1 if x in list2]
list_overlap = []
for x in list3:
if not(x in list_overlap):
list_overlap.append(x)
print(list_overlap)
|
import matplotlib.pyplot as plt
import numpy as np
import uncertainties.unumpy as unp
from scipy.optimize import curve_fit
from scipy import stats
from uncertainties import ufloat
# x = np.linspace(0, 10, 1000)
# y = x ** np.sin(x)
# plt.subplot(1, 2, 1)
# plt.plot(x, y, label='Kurve')
# plt.xlabel(r'$\alpha \:/\: \si{\ohm}$')
# plt.ylabel(r'$y \:/\: \si{\micro\joule}$')
# plt.legend(loc='best')
# plt.subplot(1, 2, 2)
# plt.plot(x, y, label='Kurve')
# plt.xlabel(r'$\alpha \:/\: \si{\ohm}$')
# plt.ylabel(r'$y \:/\: \si{\micro\joule}$')
# plt.legend(loc='best')
# # in matplotlibrc leider (noch) nicht möglich
# plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
# plt.savefig('plots/plot.pdf')
#Kapazität des Bleikalorimeters
def gKap(cw,mw,mk,Tw,Tk,Tm):
return cw*(mw*(Tw-Tm)-mk*(Tm-Tk))/(Tm-Tk)
c_bk=gKap(4180,0.23311,0.26764,96.1,21.7,52.0) #Bleikalorimeter
print('Wärmekapazität des Bleikaloriemeters c_g m_g: ',c_bk,'Joule/K')
c_ak=gKap(4180,0.25153,0.244,100.0,21.1,55.3) #Alukaloriemeter
print('Wärmekapazität des Alukaloriemeters c_g m_g: ',c_ak,'Joule/K')
#Messung der Bleiprobe
m_W = (0.50022,0.50044,0.50034)
m_W_Mittel=np.mean(m_W)
m_W_std = np.std(m_W)
T_W = (23.7,22.4,22.6)
T_W_Mittel=np.mean(T_W)
T_W_std = np.std(T_W)
T_B = (99.4,100.0,99.7)
T_B_Mittel=np.mean(T_B)
T_B_std = np.std(T_B)
T_M = (27.4,26.1,26.2)
T_M_Mittel=np.mean(T_M)
T_M_std = np.std(T_M)
print('Mittelwerte Bleimessung: \nMasse: ',m_W_Mittel,'\pm',m_W_std,'\nT_Wasser: ',T_W_Mittel,'\pm',T_W_std,'\nT_Blei: ',T_B_Mittel,'\pm',T_B_std,'\nT_Misch:',T_M_Mittel,'\pm',T_M_std,)
c_Wasser=4180
m_Blei=0.54263
m_Wasser=ufloat(m_W_Mittel,m_W_std)
T_Wasser=ufloat(T_W_Mittel,T_W_std)
T_Blei=ufloat(T_B_Mittel,T_B_std)
T_Misch=ufloat(T_M_Mittel,T_M_std)
c_kB = (c_Wasser*m_Wasser+c_bk)*(T_Misch-T_Wasser)/(m_Blei*(T_Blei-T_Misch))
print('C_kBlei: ',c_kB)
# Ohne Mittlung
Vec_m_W=np.array([0.50022,0.50044,0.50034])
Vec_T_W=np.array([23.7,22.4,22.6])
Vec_T_B =np.array([99.4,100.0,99.7])
Vec_T_Misch =np.array([27.4,26.1,26.2])
c_kBVec = (c_Wasser*Vec_m_W+c_bk)*(Vec_T_Misch-Vec_T_W)/(m_Blei*(Vec_T_B-Vec_T_Misch))
print('c_kBlei pro Messung',c_kBVec)
#Volumenkonstante Wärmekapazität Blei
a_B=0.000029
k_B=420000000000
V_0B=0.00001826
def C_V(C_p,a,k,V_0,T,mol):
return C_p*mol/1000-9*a**2 *k*V_0*(T+273.15)
Vec_C_VB = C_V(c_kBVec,a_B,k_B,V_0B,Vec_T_Misch,207.2/1000)
print('C_v für Blei pro Messung: ',Vec_C_VB)
print('c_v Blei Mittel:',np.mean(Vec_C_VB),np.std(Vec_C_VB) )
#Aluvermessung
M_alu=0.15646
M_Wasser=0.49981
t_Wasser=21.1
t_Alu=99.4
t_Misch=28.6
c_kA = (c_Wasser*M_Wasser+c_ak)*(t_Misch-t_Wasser)/(M_alu*(t_Alu-t_Misch))
print('C_kAlu: ',c_kA)
#Volumenkonstante Wärmekapazität bei Alukaloriemeter
a_A=0.0000235
k_A=750000000000
V_0A=0.00001
C_VA=C_V(c_kA,a_A,k_A,V_0A,t_Misch,26.98/1000)
print('C_v für Alu',C_VA)
|
from bs4 import BeautifulSoup
from requests_html import HTMLSession
import hashlib
import os
import multiprocessing
import math
import requests
import json
import datetime
import cv2
class image_downloader:
def __init__(self):
pass
def google_download_page(self, keywords):
#Goes to google and makes a query with the keywords
#return a list of links to download
#Preparing url from keywords
url = "https://www.google.com/search?q=" + (
keywords.replace(' ','+')) + (
"&sxsrf=ALeKk012aKzq5ZtEAObHa7bjYa-O7rAIZg:1624446613217&source=lnms&tbm=isch&sa=X&ved=2ahUKEwj88diaz63xAhWszoUKHehvAjQQ_AUoAXoECAEQAw&biw=1280&bih=605")
try:
session = HTMLSession()
r = session.get(url) #starting session and get html
soup = BeautifulSoup(r.content, 'html5lib') #prettify html
link_list = []
for img in soup.find_all('img'): #finding images from 1st load of the page
if img.get('data-src') is not None: #Cleaning nones
link_list.append(img.get('data-src')) #getting #data-src from each image
#Error handling
return link_list
except Exception as e:
return []
def bing_download_page(self, keywords, limit=20):
try:
link_list = []
base_url = "https://www.bing.com/images/search?q=" + (
keywords.replace(' ','+'))
current_position = "&first=1"
url = base_url + current_position
while len(link_list) < limit: #Will run until it passes the limit for the 1st time
link_list_verifier = (len(link_list))
session = HTMLSession()
head = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36"}
r = session.get(url, headers=head) #starting session and get html
soup = BeautifulSoup(r.content, 'html5lib') #prettify html
for img in soup.find_all('img'): #finding images from 1st load of the page
if (img.get('src') is not None) and ('OIP' in img.get('src')): #Cleaning nones and nonimage stuff
link_list.append(img.get('src'))
if img.get('data-src') is not None and 'OIP' in img.get('data-src'):
link_list.append(img.get('data-src'))
if (img.get('src') is not None) and ('OIF' in img.get('src')): #Cleaning nones and nonimage stuff
link_list.append(img.get('src'))
if img.get('data-src') is not None and 'OIF' in img.get('data-src'):
link_list.append(img.get('data-src'))
url = base_url + "&first=" + str(len(link_list)) #Preparing link for next loop
if len(link_list) - link_list_verifier < 10: #There is almost no new stuff
break
return link_list
except Exception as e:
return []
def download_from_links(self, keywords, dic="data/"):
urls = self.google_download_page(keywords)
bing_links = self.bing_download_page(keywords)
urls = bing_links + urls
downloaded_items = 0
if not os.path.isdir(dic):
raise Exception("No main data directory found.")
try:
session = requests.Session()
dic = self.create_dic(dic=dic, keywords=keywords)
for url in urls:
downloaded_items += 1
img_data = session.get(url).content #Save with random hash
with open(dic + str(hashlib.md5(url.encode('utf-8')).hexdigest()+".png"), 'wb') as handler:
handler.write(img_data)
#Running face_identifier right now to save memory
self.face_identifier(directory=dic)
self.face_reducer(directory=dic)
except Exception as e:
print(e)
def create_dic(self, dic, keywords):
name = (dic + keywords)
name = name.replace('?', ' ')
name = name.replace(':', ' ')
name = name.replace('*', ' ')
name = name.replace('"', ' ')
name = name.replace('|', ' ')
os.makedirs(name)
return (name + '/')
def face_identifier(self, directory, cascade_file='lbpcascade_animeface.xml'):
'''Uses the cascade file to identify in each character directory the faces, and deletes the
initial images.'''
if not os.path.isfile(cascade_file):
raise RuntimeError("%s: not found" % cascade_file)
cascade = cv2.CascadeClassifier(cascade_file)
files = []
for (dirpath, dirnames, filenames) in os.walk(directory):
files.extend(filenames)
break
for image_file in files:
image_file = directory + image_file
image = cv2.imread(image_file)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
faces = cascade.detectMultiScale(gray,
# detector options
scaleFactor = 1.1,
minNeighbors = 5,
minSize = (30, 30))
face_num = 0
for (x, y, w, h) in faces:
face_num += 1
crop_img = image[y:y+h, x:x+w]
cv2.imwrite(
str(image_file[:-4]) + '-' + str(face_num)+'.png',
crop_img
)
os.remove(image_file)
def face_reducer(self, directory):
files = []
for (dirpath, dirnames, filenames) in os.walk(directory):
files.extend(filenames)
break
for image_file in files:
image_file = directory + image_file
image = cv2.imread(image_file)
resized_image = cv2.resize(image, (128, 128), interpolation=cv2.INTER_AREA)
cv2.imwrite(
str(image_file),
resized_image
)
def main(database='anime-offline-database.json', workers = 1):
with open(database, 'r+',encoding='utf8') as anime_database:
data = json.load(anime_database) #Importing databse
jobs = []
number_of_anime = math.modf(len(data['data'])/int(workers))[1]
for job in range(workers):
p1 = multiprocessing.Process(target=json_to_character, args=(data["data"][int(job*number_of_anime):int((job+1)*number_of_anime)],job,))
jobs.append(p1)
p1.start()
def json_to_character(data, job):
'''Given a json database it returns the characters for each anime'''
current_position = 0
for anime in data:
current_position += 1
print('[%s] Worker %s: %s / %s' % (datetime.datetime.now().strftime("%H:%M:%S"), job+1, current_position, len(data)))
for character in anime['characters']:
image_downloader().download_from_links(anime['title'] + ' ' + str(character['name']['full']))
if __name__ == '__main__':
main(database='anime-offline-database.json', workers=1)
|
from example1 import line as p1
print("++++ executing "+ __file__)
print( p1( -3.4, q=0.5 ) )
|
# Perfect Squares
# Given a positive integer n, find the least number of perfect square numbers
# (for example 1, 4, 9, 16, ...) which sum to n
# Explanation: Sub-problem of recursive algorithm is what's the least number of
# perfect square numbers to (n - curr_sqr**2). Then we could just
# find the min of (n - curr_sqr**2) + 1, +1 for curr_sqr**2, and return.
# Run Time: O(n * log(n)). We have to calculate all the answers up to n. At every
# i for i = 1 to n, out calculation expand at most log(n).
def sol_dp(n):
if n < 4:
return n
ds = [0] * (n+1)
ds[0] = 0
ds[1] = 1
for i in range(2, n + 1):
curr_min = ds[i - 1] + 1
curr_sqr = 2
while curr_sqr**2 <= i:
curr_sol = ds[i - curr_sqr**2] + 1
if curr_sol < curr_min:
curr_min = curr_sol
curr_sqr += 1
ds[i] = curr_min
return ds[n]
# Explanation: This problem is about finding 1) division of n 2) finding the
# least size among answers to 1). For finding division, recursive
# answer will be going through all the possible options.
# Run Time: O(log(n)^n). For every level, we have log(n) options to loop through.
# Then this goes on for n times in depth so it is O(log(n)^n)
def sol_rec(n, depth):
if n == 0:
return depth
rtn_val = 9999
curr_sqr = 1
while curr_sqr <= n:
curr_rtn = sol_rec(n - curr_sqr**2, depth + 1)
rtn_val = min(curr_rtn, rtn_val)
curr_sqr += 1
return rtn_val
for i in range(50):
if sol_rec(i, 0) != sol_dp(i):
print(i)
break
else:
print('passed {}'.format(i))
|
print("Celcius To Ferenhiet or Vice-versa Conversion System")
print("Developed By Sudip Mitra")
print("E-mail : sudipmitraonline@gmail.com")
print("")
def c_to_f_translator(value) :
#Celcius to Ferenhiet Conversion Function
farenhiet_conversion = ( (value * 9) / 5 ) + 32
return farenhiet_conversion
def f_to_c_translator(value) :
#Ferenhiet to Celcius Conversion Function
celcius_conversion = ( ( value - 32 ) * 5 ) / 9
return celcius_conversion
#Choice for User Preference
print("Enter a choice : \n ' 1 ' for Celcius to Ferenhiet Conversion. \n ' 2 ' for Ferenhiet to Celcius Conversion.")
user_input = int(input())
if(user_input == 1):
print("Enter the Celcius Temperature to convert :")
user_input1 = int(input())
print("Converted Ferenhiet Temperature is : " ,c_to_f_translator(user_input1))
elif(user_input == 2):
print("Enter the Ferenhiet Temperature to convert :")
user_input1 = int(input())
print("Converted Celcius Temperature is : ",f_to_c_translator(user_input1))
else :
print("Wrong Choice. Process Abandoned. Please Try Again.")
input()
|
"""
Textbook example of recursion. The recursion tree is unbalanced. The rightmost path
has n/2 levels and the leftmost has n levels. So the first n/2 levels of the tree are full,
but the full n levels are not. So there are between O((sqrt 2)^n) = O(1.4^n) and O(2^n)
levels, which is the time complexity of the algorithm.
"""
def recursive_fib(n):
if n == 0 or n == 1:
return n
return recursive_fib(n-1)+recursive_fib(n-1)
# Finally, some linear time algorithms
# This one uses O(n) storage
def dp_fib(n):
dp = [0]*(n+1)
dp[1] = 1
if n == 0 or n == 1:
return dp[n]
def helper(n):
if n > 1 and dp[n] == 0:
helper(n-1)
helper(n-2)
dp[n] = dp[n-1]+dp[n-2]
helper(n)
return dp[n]
def iter_fib(n):
if n == 0 or n == 1:
return n
a, b = 0, 1
for _ in range(n-1):
a, b = b, a+b
return b
# This one proves you can have a O(n) solution that is recursive
def addseq(n, a, b):
if n == 0:
return a
elif n == 1:
return b
return addseq(n-1, b, a+b)
fib = lambda n: addseq(n, 0, 1)
|
"""Implementation of app API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import fnmatch
from treadmill import context
from treadmill import schema
from treadmill.scheduler import masterapi
_LOGGER = logging.getLogger(__name__)
class API(object):
"""Treadmill AppMonitor REST api."""
def __init__(self):
def _list(match=None):
"""List configured monitors."""
if match is None:
match = '*'
zkclient = context.GLOBAL.zk.conn
monitors = [
masterapi.get_appmonitor(zkclient, app)
for app in masterapi.appmonitors(zkclient)
]
filtered = [
monitor for monitor in monitors
if (monitor is not None and
fnmatch.fnmatch(monitor['_id'], match))
]
return sorted(filtered, key=lambda item: item['_id'])
@schema.schema(
{'$ref': 'appmonitor.json#/resource_id'},
)
def get(rsrc_id):
"""Get application monitor configuration."""
zkclient = context.GLOBAL.zk.conn
return masterapi.get_appmonitor(zkclient, rsrc_id,
raise_notfound=True)
@schema.schema(
{'$ref': 'appmonitor.json#/resource_id'},
{'allOf': [{'$ref': 'appmonitor.json#/resource'},
{'$ref': 'appmonitor.json#/verbs/create'}]}
)
def create(rsrc_id, rsrc):
"""Create (configure) application monitor."""
zkclient = context.GLOBAL.zk.conn
masterapi.update_appmonitor(zkclient, rsrc_id, rsrc['count'])
return masterapi.get_appmonitor(zkclient, rsrc_id)
@schema.schema(
{'$ref': 'appmonitor.json#/resource_id'},
{'allOf': [{'$ref': 'appmonitor.json#/resource'},
{'$ref': 'appmonitor.json#/verbs/update'}]}
)
def update(rsrc_id, rsrc):
"""Update application configuration."""
zkclient = context.GLOBAL.zk.conn
masterapi.update_appmonitor(zkclient, rsrc_id, rsrc['count'])
return masterapi.get_appmonitor(zkclient, rsrc_id)
@schema.schema(
{'$ref': 'appmonitor.json#/resource_id'},
)
def delete(rsrc_id):
"""Delete configured application monitor."""
zkclient = context.GLOBAL.zk.conn
masterapi.delete_appmonitor(zkclient, rsrc_id)
return None
self.list = _list
self.get = get
self.create = create
self.update = update
self.delete = delete
|
import numpy as np
from contrast import Contrast
class main(Contrast):
def __init__(self, layers, incident_light):
self.layers = layers
self.incident_light = incident_light
self.disulfuro = np.loadtxt("./refractive_indexes/disulfuro_molibdeno.txt", delimiter='\t') # Disulfuro wavelength and refractive index
self.silicon = np.loadtxt("./refractive_indexes/silicon.txt", delimiter='\t') # Silicon wavelength and refractive index
self.disulfuro_n = self.disulfuro[:,1] - 1j*self.disulfuro[:,2]; # Disulfuro refractive index
self.silicon_n = self.silicon[:,1] + 1j*self.silicon[:,2]; # Silicon refractive index
self.air_n = 1.0 + 0j # Air refractive index
def silicon_dioxide_n(self, wavelength):
n = 4.7996E-18*wavelength**6 - 1.9105E-14*wavelength**5 + 3.1531E-11*wavelength**4 - 2.7757E-08*wavelength**3 + 1.3872E-05*wavelength**2 - 3.8045E-03*wavelength + 1.9176
return n
def blood_n(self, wavelength):
# From 400 to 750 nm in lenght light
A1 = 0.7960
A2 = 5.1819e-6
B1 = 1.0772e4
B2 = -7.8301e5 # Sellmaeier coeficients
n = 1 + ((A1*wavelength**2)/(wavelength**2 - B1))+(A2*wavelength**2)/(wavelength**2 - B2)
n = np.sqrt(n)
return n
def glass_n(self, wavelength):
n = (-3.9079E-15*wavelength**5 + 1.2309E-11*wavelength**4 - 1.5542E-08*wavelength**3 + 9.9180E-06*wavelength**2 - 3.2596E-03*wavelength + 1.9673) + 0j #extrapoleted from [2]
return n
def grafeno(self):
wavelength = self.silicon[:,0]
graphene_n = 2.6-1.3j # Graphene refractive index
silicon_dioxide_n = self.silicon_dioxide_n(wavelength)
graphene_thickness = [0.34]
silicon_dioxide_thickenss = np.linspace(0,350,30)
silicon_dioxide_samples_thickness = np.linspace(0,350,5) # Samples of thicknes to linear plot
contrast = Contrast(self.layers, silicon_dioxide_samples_thickness, wavelength, self.incident_light)
contrast.refractive_indexes(self.air_n, graphene_n, silicon_dioxide_n, self.silicon_n)
contrast.layers_thickness(graphene_thickness, silicon_dioxide_thickenss)
contrast.thicknes_l1_variation("graphene", "SiO_2")
def disulfuro_molibdeno(self):
wavelength = self.disulfuro[:,0]
disulfuro_molibdeno_n = self.disulfuro_n
silicon_dioxide_n = self.silicon_dioxide_n(wavelength)
disulfuro_molibdeno_thickness = [0.55, 0.60]
silicon_dioxide_thickness = np.linspace(0,350,30)
silicon_dioxide_samples_thickness = np.linspace(0,350,5) # Samples of thicknes to linear plot
contrast = Contrast(self.layers, silicon_dioxide_samples_thickness, wavelength, self.incident_light)
contrast.refractive_indexes(self.air_n, disulfuro_molibdeno_n, silicon_dioxide_n, self.silicon_n)
contrast.layers_thickness(disulfuro_molibdeno_thickness, silicon_dioxide_thickness)
contrast.thicknes_l1_variation("disulfuro_molibdeno", "SiO_2")
def glass_blood(self):
wavelength = np.linspace(400,700,50)
blood_n = self.blood_n(wavelength)
glass_n = self.glass_n(wavelength)
glass_thickness = [140000]
blood_thickness =np.arange(2000, 5000, 50).tolist()
blood_samples_thickness = np.arange(2000, 5000, 500).tolist() # Samples of thicknes to linear plot
contrast = Contrast(self.layers, blood_samples_thickness, wavelength, self.incident_light)
contrast.refractive_indexes(self.air_n, glass_n, blood_n, glass_n)
contrast.layers_thickness(glass_thickness, blood_thickness)
contrast.thicknes_l1_variation("glass_blood", "Sangre")
def oil_glass_blood(self):
wavelength = np.linspace(400,700,50)
oil_n = 1.516
blood_n = self.blood_n(wavelength)
glass_n = self.glass_n(wavelength)
blood_thickness = np.arange(2000, 5000, 50).tolist()
glass_thickness = [140000]
oil_thickness = np.arange(200, 500, 50).tolist()
blood_samples_thickness = np.arange(2000, 5000, 1000).tolist() # Samples of thicknes to linear plot
contrast = Contrast(self.layers, blood_samples_thickness, wavelength, self.incident_light)
contrast.refractive_indexes_glass(self.air_n, oil_n, glass_n, blood_n, glass_n)
contrast.layers_thickness(oil_thickness, blood_thickness)
contrast.thicknes_l1_variation_glass("oil_glass_blood", glass_thickness, "Sangre")
if __name__ == "__main__":
layers = 3 # Number of layers, it does not include air
incident_light = 0 # 1 up and 0 down light incident
main_class = main(layers, incident_light) #
# Graphene
#main_class.grafeno()
# Disulfuro Molibdeno
#main_class.disulfuro_molibdeno()
# glass_blood
main_class.glass_blood()
layers = 4 # Number of layers, it does not include air
incident_light = 0 # 1 up and 0 down light incident
main_class = main(layers, incident_light) #
# oil_glass_blood
main_class.oil_glass_blood()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.