text stringlengths 37 1.41M |
|---|
"""functions to plot increase in various functions for quiz 1"""
import math
import matplotlib.pyplot as pyplot
def gen_vals(list, func):
'''function to generate y values for plot'''
temp_list = []
for num in list:
temp_list.append(func(num))
return temp_list
def gen_ten_to_nth(val):
'''return ten to the nth'''
return 10**val
def gen_n_to1point5 (val):
'''return n to the 1.5 power'''
return val**1.5
def gen_two_to_sqrt_log_n (val):
'''return 2 to the square root of log2 of n'''
return math.sqrt(math.log2(val))
def gen_n_to_5thirds (val):
'''return n to the 5/3 power'''
return val**(5/3)
def generate_chart(value_range):
'''plot various functions'''
x_values = gen_vals(value_range, float)
square_roots = gen_vals(value_range, math.sqrt)
# commented out because so much bigger
# ten_to_n = gen_vals(value_range, gen_ten_to_nth)
n_to_one_point_five = gen_vals(value_range, gen_n_to1point5)
two_to_sqrt = gen_vals(value_range, gen_two_to_sqrt_log_n)
n_to_five_thirds = gen_vals(value_range, gen_n_to_5thirds)
pyplot.plot(x_values, square_roots, label='square roots')
# commented out because so much bigger
# pyplot.plot(x_values, ten_to_n, label = 'ten to the nth')
pyplot.plot(x_values, n_to_one_point_five, label = 'n to the 1.5')
pyplot.plot(x_values, two_to_sqrt, label = 'two to the sqrt(lot of n)')
pyplot.plot(x_values, n_to_five_thirds, label = 'n to the 5/3')
pyplot.legend(loc='upper left')
pyplot.xscale('log')
pyplot.yscale('log')
pyplot.show()
number_list = range(1,301)
generate_chart(number_list)
|
import numpy as np
import numpy.random as random
import game_functions as game
from abc import ABC, abstractmethod
import sqlite3
# run csv_to_sql here
path = "C:/Users/Grace Sun/physics_hackathon/Databases"
# name additional conn based on what databases you want to input; make sure to close conn at the end!
conn1 = sqlite3.connect(path+"Items"+".db")
c = conn1.cursor()
class Item(ABC):
def __init__(self, Item_Name, **kwargs):
#fetchone()[0] for the float
self.Weight = c.execute('SELECT Weight FROM Items WHERE NAME = ?', [Item_Name]).fetchone()[0]
#test = Item('Club')
#print(test.Weight)
class Equippable(Item):
def __init__(self, owner):
pass
@abstractmethod
def equip(character):
pass
@abstractmethod
def unequip(character):
pass
class Consumable(Item):
def __init__(self):
pass
def consume(character):
pass
class Weapon(Equippable):
def __init__(self, **kwargs):
self.one_handed=False
self.two_handed=False
class Armor(Equippable):
def __init__(self):
pass
class Shield(Equippable):
def __init__(self):
pass
conn1.close() |
class Zone:
def __init__(self, label, x, y, z):
self.label = label
self.x = x
self.y = y
self.z = z
def show(self):
print("# Zone : %s" % (self.label))
print("# Coordinates : (%s,%s,%s)" % (self.x, self.y, self.z))
def to_string(self):
return "%3s %3s %3s %3s" % (self.label,
self.x,
self.y,
self.z)
def __repr__(self):
return "%s" % (self.label)
def __hash__(self):
return hash(self.label)
def __str__(self):
return "%s" % (self.label)
def __eq__(self, other):
return self.label == other.label
def SearchZone(list, label):
for zone in list:
if zone.label == label:
return zone
return None
|
__author__ = "Christopher Raleigh and Anthony Ferrero"
from board_square_type import BoardSquareType
from board import Board
from board_state import BoardState
def generate_from_file(ascii_board_file_path):
"""Returns a maze from an inputted file."""
agent_x = 0
agent_y = 0
with open(ascii_board_file_path, 'r') as ascii_board_file:
file_lines = ascii_board_file.readlines()
board_width = len(file_lines[0]) - 1
board_height = len(file_lines)
board = Board(width=board_width, height=board_height)
for j in xrange(board_height):
for i in xrange(board_width):
next_char = file_lines[j][i]
if next_char is '@':
agent_x = i
agent_y = j
square_type = char_to_board_square_type(next_char)
board.set_square(i, j, square_type)
board_state = BoardState(board, agent_x, agent_y)
return board_state
def char_to_board_square_type(character):
"""Changes a square type to a readable character."""
return {
'.': BoardSquareType.empty,
'#': BoardSquareType.wall,
'%': BoardSquareType.food,
}.get(character, BoardSquareType.empty)
|
# Implementar la función organizar_estudiantes() que tome como parámetro una lista de Estudiantes
# y devuelva un diccionario con las carreras como keys, y la cantidad de estudiantes en cada una de ellas como values.
from practico_02.ejercicio_04 import Estudiante
def organizar_estudiantes(e):
dcarrera = {}
x=0
for i in e :
if not dcarrera.get(i.carr) :
x=0
dcarrera.update({i.carr : x+1})
else:
x=dcarrera.get(i.carr)
dcarrera.update({i.carr : x+1})
print(dcarrera)
return dcarrera
e = []
x = Estudiante("Ingeniería en Sistemas", 2018, 20, 10, "Fulanito", 19, 'H', 64.3, 1.72)
y = Estudiante("Ingeniería en Sistemas", 2018, 20, 10, "Cosme", 19, 'H', 64.3, 1.72)
z = Estudiante("Ingeniería Quimica", 2018, 20, 10, "Fulanito", 19, 'H', 64.3, 1.72)
w = Estudiante("Ingeniería Quimica", 2018, 20, 10, "Cosme", 19, 'H', 64.3, 1.72)
e.append(x)
e.append(y)
e.append(z)
e.append(w)
assert organizar_estudiantes(e) == {'Ingeniería en Sistemas': 2, 'Ingeniería Quimica': 2}
|
# Implementar la función es_primo(), que devuelva un booleano en base a
# si numero es primo o no.
def es_primo(num):
z=False
if num > 1:
for i in range(2,num):
if (num % i) == 0:
z=False
return z
break
else:
z=True
return z
else :
return z
assert es_primo(4) == False
assert es_primo(7) == True
|
# Implementar la función mitad(), que devuelve la mitad de palabra.
# Si la longitud es impar, redondear hacia arriba.
# hola -> ho
# verde -> ver
import math
def mitad(palabra):
i = math.ceil(int(len(palabra))/2)
mit = palabra[0:i]
print(mit)
return mit
assert mitad("hola") == "ho"
assert mitad("verde") == "ver"
|
# 开发时间2021/3/15 22:21
# 加油啊测试!
#python的输入与输出
#字面量
#字面量插值 格式化输出 通过string.format()方法拼接 Formatted string literals:字符串格式化机制(>=python3.6)(最推荐最简单)
#格式化输出 查看转换表 不演示了
#format()方法
name = "Decade"
age = 20
print("my name is {0},age is {1}{1}{0}".format(name,age)) #{}里的数字代表填写format()里的第几个变量,可以多次调用
#format支持列表、字典
list1 = [1,2,3]
dic1 = {"a" : "a","b" : "b","c" : "c"}
print("输出列表{}和字典{}".format(list1,dic1))
#一个列表拆分多个元素输出到format中
print("现在有的数字{}{}{}".format(*list1)) #列表前方写一个*就是解包
#字典的解包
print("现在有的字母{a}{b}{b}".format(**dic1))
#F-strings:字符串格式化机制
#{}中可以放常量、变量、表达式、函数
#使用方法 f"这是一个字符串" 在字符串前面加f就可以了
print(f"我的名字是{name},age is {age},{list1[0]}{dic1['a']}")
#{}中不能转义\ 可以是函数或表达式
print(f"举例的表达式{3+2},{name.upper()},{(lambda x:x**2)(2)}") #大写变量name {}中不能加: 需要时用()括起来
#文件读取
#第一步 打开文件,获取文件描述符
#第二步 操作文件描述符 读或写
#第三步 关闭文件
#使用open()
a = open("learn_py_06.txt")
print(a.readable()) #查询文件是否可读
#print(a.readlines()) #读取文件的所有行并返回一个列表
print(a.readline()) #读取文件的一行
a.close()
#with语句块,可以将文件打开之后,操作完毕,自动关闭这个文件
with open("learn_py_06.txt","rt") as b:
while True:
line = b.readline()
if line:
print(line)
else:
break
# print(b.readlines()) 会占用太多系统资源
# 图片读取需要使用rb 读取二进制的格式
#正常文本使用rt 也就是默认格式
#Json 格式转化
import json
data = {
"name":["Decade","DDD"],
"age":30,
"ride":"kaman"
}
data1 = json.dumps(data) #将json转化为字符串
print(type(data))
print(data)
print(data1)
print(type(data1))
data2 = json.loads(data1) #将字符串转化为json
print(data2)
print(type(data2)) |
# 开发时间2021/3/13 12:05
# 加油啊测试!
#变量赋值
a = 1
b = 1
c,d = 3,4
print(a,b,c,d)
#数字类型 int float complex
int_a = 3
float_a = 44.5
complex_a = 1234j
print(type(complex_a))
print(type(int_a))
print(type(float_a))
#输出如下结果
#<class 'complex'>
#<class 'int'>
#<class 'float'>
#字符串
string_a = "第一行\n第二行"
print(string_a)
#想要输出\n时
string_b = "第一行\\n第一行"
string_c = r"第一1行\n第一1行" #r:忽略转义符的作用
print(string_b)
print(string_c)
#拼接字符串
print(string_c+string_b+string_a)
print("aa" "bb")
#+ 可以用在变量之间的连接,空格不能
#引用语法f+{} / format
D = "Decade"
DD = f"DDD{D}"
DDD = "DDD{}"
print(DD)
print(DDD.format(D))
#输出:DDDDecade
#多个变量使用format拼接的简单使用
D_a = "Dec"
D_b = "ade"
DDDD = "DDD{}{}"
print(DDDD.format(D_a,D_b))
#多个变量使用format拼接的精确使用
DDdd = "DDD{a}{b}"
print(DDdd.format(a=D_a,b=D_b))
#列表索引
list_a = [1,2,4,5,"asd","zxcv",2j]
print(list_a[0])
print(list_a[-2]) #倒数第2个值用-2 以此类推
#列表切片
print(list_a[3:5]) #取下标3-5之间,包括下标3,不包括下标5 区间:[3,5)
|
# 开发时间2021/3/16 23:50
# 加油啊测试!
#错误和异常
#错误基本都是语法错误,熟练之后可以避免
#主要看异常
from genshin import chouka
class MyError(Exception): #自定义异常类
def __init__(self):
self.value = 90
while True:
try:
x = int(input("请输入抽卡次数:"))
if x > 90:
raise MyError
except ValueError:
print("您输入的不是数字,请再次尝试") #要求输入int,输入了非数字,不能转换成int时报错
except MyError as asd:
print(f"您一次最多只能抽{asd.value}发")
else:
chouka(x)
break
finally:
print("finally")
print("抽卡结束")
#预定义的清理行为
with open("learn_py_06.txt") as ff:
for line in ff:
print(line, end="") |
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 3 15:58:41 2018
@author: Liam
"""
import numpy as np
def coin_flips_analyse(flips,nb_flips):
"""
Returns the fraction of heads according the flips
"""
count_heads = 0 #counter for the heads
for flip in flips:
if flip == 1:
count_heads = count_heads + 1
#fraction of heads for the sample
fraction = count_heads/nb_flips
return fraction
def flip_coins(nb_coins,nb_flips):
"""
Flips nb_coins coins with nb_flips flips each.
Returns a 2D array (each line is a coin, each column is a flip except
the last which is the fractionof heads) and the minimum fraction of heads
obtained by a coin of our sample.
"""
coins_data = [] #all our coins with their corresponding flips and frequencies
minimum_heads = 1 #to store the minimum fraction of heads obtained by a coin in an experience
#Loop for each coin
for i in range(nb_coins):
#Flips the coin (1=heads)
flips = np.random.binomial(1,0.5,size=nb_flips)
#Get the fraction of heads for the flips
fraction = coin_flips_analyse(flips,nb_flips)
flips = np.append(flips,fraction)
coins_data.append(flips)
#Update the minimum fraction
if minimum_heads > fraction:
minimum_heads = fraction
return coins_data, minimum_heads
if __name__ == '__main__':
#The number of coins flipped in one experience
nb_coins = 1000
#The number of flips done by a single coin
nb_flips = 10
#Number of independent experiences done
nb_experiences = 100000
sum_min_freq = 0
sum_rand_freq = 0
sum_first_freq = 0
head_frequencies = []
curr_experience = []
for i in range(nb_experiences):
#GET the min fraction, the random fraction and the first fraction
coins_data, min_freq = flip_coins(nb_coins,nb_flips)
first_freq = coins_data[0][10]
rand_freq = coins_data[np.random.randint(1000)][10]
curr_experience.append(first_freq)
curr_experience.append(rand_freq)
curr_experience.append(min_freq)
head_frequencies.append(curr_experience)
sum_min_freq = sum_min_freq + min_freq
sum_rand_freq = sum_rand_freq + rand_freq
sum_first_freq = sum_first_freq + first_freq
#Display every 1000 experiences done
if (i+1)%1000 == 0:
print("Step {}".format(i+1))
#Fraction AVERAGES (min,random,first)
min_freq_average = sum_min_freq/nb_experiences
rand_freq_average = sum_rand_freq/nb_experiences
first_freq_average = sum_first_freq/nb_experiences
print("The average value of the minimum frequency is {}".format(min_freq_average))
print("The average value of the first coin frequency is {}".format(first_freq_average))
print("The average value of the random coin frequency is {}".format(rand_freq_average))
|
"""
This is a python version of the genetic algorithm tutorial from ai-junkie.com
converted by Ryan Massoth
http://www.ryanmassoth.com
https://github.com/rmassoth/ga_tut.git
"""
import random
# constants
CROSSOVER_RATE = 0.7
MUTATION_RATE = 0.001
POP_SIZE = 1000
CHROMO_LENGTH = 300
GENE_LENGTH = 4
MAX_ALLOWABLE_GENERATIONS = 100
# return random number between 0 and 1
RANDOM_NUM = random.random
class chromo_typ():
"""
Define a class which will define a chromosome
"""
def __init__(self, bits="", fitness=0.0):
self.bits = bits
self.fitness = fitness
def get_random_bits(length):
"""Return a string of random 1s and 0s of desired length."""
bit_string = ""
for _ in range(length):
if RANDOM_NUM() > 0.5:
bit_string = "{}{}".format(bit_string, "1")
else:
bit_string = "{}{}".format(bit_string, "0")
return bit_string
def bin_to_dec(bit_string):
"""Convert string of bits to a decimal integer."""
return int(bit_string, 2)
def parse_bits(bit_string, gene_buffer, gene_length):
"""
Given a string, this function will step through the genes one at a time
and insert the decimal values of each gene into the buffer. Returns the
number of elements in the buffer.
Step through the bits one gene at a time until end and store decimal values
of valid operators and numbers. Don't forget we are looking for operator
number - operator - number and so on... We ignore the unused genes 1111 and
1110
I changed this one to include gene_length as a parameter for automated test
purposes.
Also, it doesn't return the number of elements like the original c++
program because it's unnecessary in Python.
"""
# Flag to determine if we are looking for an operator or a number
b_operator = True
# Storage for a decimal value of currently tested gene
this_gene = 0
for i in range(0, len(bit_string), gene_length):
# Convert the current gene to decimal
this_gene = bin_to_dec(bit_string[i:i+gene_length])
# Find a gene which represents an operator
if b_operator:
if this_gene < 10 or this_gene > 13:
continue
else:
b_operator = False
gene_buffer.append(this_gene)
continue
# Find a gene which represents a number
else:
if this_gene > 9:
continue
else:
b_operator = True
gene_buffer.append(this_gene)
continue
# Now we have to run through buffer to see if a possible divide by zero
# is included and delete it. (id a '/' followed by a '0'). We take an
# easy way out here and just change the '/' to a '+'. This will not
# effect the evolution of the solution.
for i, val in enumerate(gene_buffer):
if i<len(gene_buffer)-1:
if val == 13 and gene_buffer[i+1] == 0:
gene_buffer[i] = 10
def assign_fitness(bit_string, target_value):
"""
Given a string of bits and a target value this function will calculate
its representation and return a fitness score accordingly
"""
# Holds decimal values of gene sequence
gene_buffer = []
# Fill buffer from bit_string
parse_bits(bit_string, gene_buffer, GENE_LENGTH)
# Now we a have a buffer filled with valid values of: operator - number -
# operator - number..
# Now we calculate what this represents
result = 0.0
for i in range(0, len(gene_buffer)-1, 2):
# No switch/case in Python :'(
if gene_buffer[i] == 10:
result += gene_buffer[i+1]
elif gene_buffer[i] == 11:
result -= gene_buffer[i+1]
elif gene_buffer[i] == 12:
result *= gene_buffer[i+1]
elif gene_buffer[i] == 13:
result /= gene_buffer[i+1]
# Now we calculate the fitness. First check to see if a solution has been
# Found and assign an arbitrarily high fitness score if this is so.
if result == target_value:
return 999.0
else:
return 10/abs(target_value - result)
def print_chromo(bit_string):
"""
Decodes and prints a chromo to screen. This is a little redundant for
Python but for the sake of following the original source I will copy it.
I will do this better in my own code eventually.
"""
# Buffer to hold the chromosome
gene_buffer = []
# Parse the string into genes
parse_bits(bit_string, gene_buffer, GENE_LENGTH)
# Loop over every integer in the buffer and convert it to a string
# representing the uncoded values
for _, val in enumerate(gene_buffer):
print_gene_symbol(val)
def print_gene_symbol(val):
"""
Given and integer, this function outputs its symbol to the screen
"""
if val < 10:
print(val, " ", end="")
else:
if val == 10:
print("+", end="")
elif val == 11:
print("-", end="")
elif val == 12:
print("*", end="")
elif val == 13:
print("/", end="")
print(" ", end="")
def mutate(bit_string):
"""
Mutates a chromosome's bits dependent on the MUTATION_RATE
"""
for i, _ in enumerate(bit_string):
original_string = bit_string
mutated = False
if RANDOM_NUM() < MUTATION_RATE:
mutated = True
if bit_string[i] == "1":
bit_string[i] = "0"
else:
bit_string[i] = "1"
return original_string, mutated
def crossover(offspring1, offspring2):
"""
Dependent on the CROSSOVER_RATE, this function selects a random point along
the length of the chromosomes and swaps all the bits after that point
"""
# Dependent on the crossover rate
if RANDOM_NUM() < CROSSOVER_RATE:
# Create a random crossover point
crossover = int(RANDOM_NUM() * len(offspring1))
t1 = "{}{}".format(
offspring1[:crossover], offspring2[crossover:])
t2 = "{}{}".format(
offspring2[:crossover], offspring1[crossover:])
return (t1, t2, True,)
else:
return (offspring1, offspring2, False)
def roulette(total_fitness, population, pop_size):
"""
Selects a chromosome from the population via roulette wheel selection.
"""
# Generate a random number between 0 and total fitness count
pie_slice = RANDOM_NUM() * total_fitness
# Go through the chromosomes adding up the fitness so far
fitness_so_far = 0.0
for i in range(pop_size):
fitness_so_far += population[i].fitness
# If the fitness so far > random number return the chromo at this point
if fitness_so_far >= pie_slice:
return population[i].bits
return ""
def main():
"""
Main function where all the magic happens.
"""
# Loop forever. I will probably change this.
while True:
population = []
# Get a target number from the user. (no error checking)
target = input("Input a target number: ")
# First create a random population, all with zero fitness
for _ in range(POP_SIZE):
new_chromo = chromo_typ(bits=get_random_bits(CHROMO_LENGTH),
fitness=0.0)
population.append(new_chromo)
generations_required_to_find_solution = 0
# We will set this flag if a solution has been found
bFound = False
total_fitness_list = []
while not bFound:
# Main GA loop
# Used for roulette wheel sampling
total_fitness = 0.0
# Test and update the fitness of every chromosome in the
# population
for i in range(POP_SIZE):
population[i].fitness = assign_fitness(
population[i].bits, float(target))
total_fitness += population[i].fitness
total_fitness_list.append(total_fitness)
# Check to see if we have found any solutions (fitness will be 999)
for i in range(POP_SIZE):
if population[i].fitness == 999.0:
print("Solution found in ",
generations_required_to_find_solution,
" generations")
print_chromo(population[i].bits)
bFound = True
break
# Create a new population by selecting two parents at a time and
# creating offspring by applying crossover and mutation. Do this
# until the desired number of offspring have been created.
# Define some temporary storage for the new population we are about
# to create.
previous_generation = population
temp = []
cPop = 0
# Loop until we have created POP_SIZE new chromosomes
while cPop < POP_SIZE:
# We are going to create the new population by grabbing members
# of the old population two at a time via roulette wheel
# selection.
offspring1 = roulette(total_fitness, population, POP_SIZE)
offspring2 = roulette(total_fitness, population, POP_SIZE)
# Add crossover dependent on the crossover rate
offspring1, offspring2, crossed_over = crossover(offspring1,
offspring2)
# Add these offspring to the new population. (Assigning zero as
# their fitness scores)
temp.append(chromo_typ(offspring1, 0.0))
temp.append(chromo_typ(offspring2, 0.0))
cPop += 2
# Copy temp population into main population array
population = temp
generations_required_to_find_solution += 1
# Exit app if no solution found within the maximum allowable number
# of generations
if generations_required_to_find_solution > \
MAX_ALLOWABLE_GENERATIONS:
print("No solutions found this run!")
bFound = True
with open("fitness.csv", 'w') as output:
for i, val in enumerate(previous_generation):
output.write('{},{}\n'.format(
val.fitness, val.bits))
# Run main function if started from the command line
if __name__ == "__main__":
main() |
def if_check_triangle_make(a,b,c):
b=0
b+=(a+b>c)
b+=(a+c>b)
b+=(b+c>a)
if(b==3):
return "Yes"
else:
return "No"
|
def if_check_digit_number_odd_even(a):
x=a%2
if(a//100)>0 and (a//1000)==0:
if(x):
return "Uch xonali toq son"
else:
return "Uch xonali juft son"
if(a//10)>0 and (a//100)==0:
if(x):
return "Ikki xonali toq son"
else:
return "Ikki xonali juft son" |
def if_check_temperature(Temp):
a=Temp
if(a<0):
return "Freezing"
if(a>0 and a<11):
return "Very Cold"
if(a>10 and a<21):
return "Cold"
if(a>20 and a<31):
return "Normal"
if(a>30 and a<41):
return "Hot"
if(a>40):
return "Very Hot" |
from math import sqrt ,pow
n=int(input())
print(2*pow(n+3,2)) |
import os
import csv
import operator
csvpath = os.path.join(".","Resources","election_data.csv")
vote_count = 0
winner_candidate = 'none'
distData={}
sorted_distData={}
with open (csvpath, newline='') as votefile:
csvread = csv.reader(votefile, delimiter=',')
# skipo the header
next(csvread,None)
# read the file row by row
for row in csvread:
vote_count += 1
# Check if the candidate exists in new dictionay
if row[2] in distData:
thisCount = distData[row[2]]
newCount = thisCount + 1
distData[row[2]] = newCount
# Add the candidate in the dictionay if the name is not present
else:
distData[row[2]] = 1
#sort the dictionary in descending order. This is the order needed to print in the report
sorted_distData = sorted(distData.items(), key=lambda x: x[1], reverse=True)
#Now generate report
print (f"\nElection Results \n------------------------- \nTotal Votes: {str(vote_count)} \n-------------------------")
#read the dictionary and print the report
idx = 0
for Winner in sorted_distData:
perCent = float((float(Winner[1])/float(vote_count))*100)
print (str(Winner[0]) + ": " + str(format(perCent, '.3f')) + "% (" + str(Winner[1]) + ")")
idx = idx + 1
key_max = max(distData.keys(), key=(lambda k: distData[k]))
print(f"\n------------------------- \n Winner: {str(key_max)} \n------------------------- \n ")
#write output to a file
with open("Output.txt", "w") as text_file:
print (f"\nElection Results \n------------------------- \nTotal Votes: {str(vote_count)} \n-------------------------",file=text_file)
idx = 0
for Winner in sorted_distData:
perCent = float((float(Winner[1])/float(vote_count))*100)
print (str(Winner[0]) + ": " + str(format(perCent, '.3f')) + "% (" + str(Winner[1]) + ")", file=text_file)
idx = idx + 1
print(f"\n------------------------- \n Winner: {str(key_max)} \n------------------------- \n ", file=text_file)
|
# Guessing Game Exercise
# Write a program that makes the following:
# Ask the user: "What's my favorite food"
# If the user enters the name of your favorite food, output: "Yep! So amazing!"
# If the user doesn't enter the name of your favorite food, output: "Yuck! That's not it"
# Regardless of what the user enters, output: "Thanks for playing!"
# val = input("Enter your value: ")
# print(val)
# exit()
fav_food = input("What's my favorite food?")
print(fav_food)
if fav_food == "pizza":
print("Yep! So amazing!")
else :
print("Yuck! That's not it")
print("Thanks for playing!") |
child_meal = float(4.50)
adult_meal = float(9.00)
shake_drink = float(1.75)
Pie_dessert = float(1.00)
child = (int(input ( "How adults are eating today? " )))
adult = (int(input( "How many children are eating today? ")))
shake = (int(input( "How many icecream shakes do you want to drink? ")))
pie = (int(input ( "How many cherry pies would you like? ")))
sales_tax = float(0.06)
subtotal = ((child_meal * child) + (adult_meal * adult) + (shake_drink * shake) + (Pie_dessert * pie))
subtotal_formatted = "{:.2f}".format(subtotal)
my_str = (str(" Subtotal "))
print(f"{my_str} ${subtotal_formatted}")
# print (f"${subtotal}")
sales_tax = (((child_meal * child)+(adult_meal * adult) + (shake_drink * shake) + (Pie_dessert * pie) * sales_tax))
sales_tax_formatted = "{:.2f}".format(sales_tax)
sales_tax_print = (str(" Sales Tax "))
print(f"{sales_tax_print} ${sales_tax_formatted}")
# print(f"${sales_tax}")
total_sale = sales_tax + subtotal
total_sale_formatted = "{:.2f}".format(total_sale)
total_sale_print = (str(" Total Sale "))
print(f"{total_sale_print} ${total_sale_formatted}")
# print(f"${total_sale}")
Amount_paid = float(input(" How much will you be paying? $"))
Amount_paid_float = float(Amount_paid)
Change = (float(total_sale_formatted) - (Amount_paid_float))
Change_formatted = "{:.2f}".format(Change)
print(f" ${Change_formatted}")
|
board1 =[1,2,3,4,5,6]
board2 =["!","@","$","@","$","!"]
guess = input("what would you like to guess?")
for item in board1:
if int(guess) == item:
print(board2[item-1])
else:
print(item)
guess2 = input("what number does it match?")
if board2[guess-1] == board2[guess2-1]:
print("win")
else:
print("loss") |
#tri par base
def maxValue(array):
resultat = 0
for i in range(array.size):
v = array[i].getValue()
if v > resultat:
resultat = v
return resultat
def radix_countingSort(array, base, power):
resultat = array.size * [0]
offsets = base * [0]
for i in range(array.size):
d = array[i].getValue()//power % base
offsets[d] += 1
for d in range(1, base):
offsets[d] += offsets[d-1]
#parcours en sens inverse pour un tri stable
for i in range(array.size-1, -1, -1):
d = array[i].getValue()//power % base
offsets[d] -= 1
pos = offsets[d]
resultat[pos] = array[i]
for i in range(array.size):
array[i] = resultat[i]
def radixLSDSort(array, base=2):
m = maxValue(array)
power = 1
while m // power > 0:
radix_countingSort(array, base, power)
power *= base
def countingSort(array):
m = maxValue(array)
radix_countingSort(array, m+1, 1) |
#tri par tas
def heap_parent(i): return (i-1)//2
def heap_left(i): return 2*i + 1
def heap_right(i): return 2*i + 2
def siftDown(array, beg, end):
root = beg
while heap_left(root) <= end:
left = heap_left(root)
right = left + 1
swap = root
if array[swap] < array[left]:
swap = left
if right <= end and array[swap] < array[right]:
swap = right
if swap == root:
break
else:
array.swap(swap, root)
root = swap
def heapify(array):
for i in range(heap_parent(array.size-1), -1, -1):
siftDown(array, i, array.size-1)
def heapSort(array):
heapify(array)
for i in range(array.size-1, 0, -1):
array.swap(0, i)
siftDown(array, 0, i-1)
|
def sum_nat_nums(num):
nat_sum = 0
for n in range(3,num):
if n % 3 == 0 or n % 5 == 0:
nat_sum += n
return nat_sum |
bob = int(input("enter a number"))
b = ["57234" "00907"]
h = []
h.append(bob)
ff = (h[2:5])
df = (h[1:1])
d3f = (h[2:2])
sum3 = (d3f) + (df)
if (sum3 % 2 == 0):
print("right")
print(ff)
else:
print("left")
print(ff)
|
def parseArguments(argv):
"""Parses cli input -options and --flags into a dictionary."""
params = {}
while argv:
if argv[0][0] == '-' and argv[0][1] == '-':
# Found flag
params[argv[0]] = True
elif argv[0][0] == '-':
# Found option
params[argv[0]] = argv[1]
argv = argv[1:]
return params |
# dfs recursive
class Solution:
def findCircleNum(self, M):
result = 0
visited = [0] * len(M)
for i in range(len(M)):
if visited[i] == 0:
self.dfs(M, visited, i)
result += 1
return result
def dfs(self, M, visited, i):
for j in range(len(M)):
if visited[j] == 0 and M[i][j] == 1:
visited[j] = 1
self.dfs(M, visited, j)
# dfs iterative
class Solution3:
def findCircleNum(self, M):
result = 0
visited = [0] * len(M)
stack = []
for i in range(len(M)):
if visited[i] == 0:
stack.append(i)
while stack:
out = stack.pop()
visited[out] = 1
for j in range(len(M)):
if visited[j] == 0 and M[out][j] == 1:
stack.append(j)
result += 1
return result
# bfs
from collections import deque
class Solution2:
def findCircleNum(self, M):
result = 0
visited = [0] * len(M)
q = deque([])
for i in range(len(M)):
if visited[i] == 0:
q.append(i)
while q:
out = q.popleft()
visited[out] = 1
for j in range(len(M)):
if visited[j] == 0 and M[out][j] == 1:
q.append(j)
result += 1
return result
|
'''
Created on Apr 7, 2014
@author: JWShumaker
ProjectEuler.net
Problem ID 3
The prime factors of 13195 are 5, 7, 13 and 29.
What is the largest prime factor of the number 600851475143 ?
'''
import math
# Test p to see if it is prime
def IsPrime(p):
# We only need to check values between 2 and the square root of p.
for x in range(2, math.floor(math.sqrt(p)) + 1):
if not p % x:
return False
# If p was not divisible, then it must be prime
return True
# Find the largest prime factor of a
def LargestPrime(a):
# Let's assume that a is prime, hence it is it's own largest factor
# If we find another prime that divides a, a must not be prime
largest = a
# Loop through all possible factors between 2 and the square root of a
for x in range(2, math.floor(math.sqrt(a)) + 1):
# If x divides a and is prime, set it as our new largest prime factor
if not a % x and IsPrime(x):
largest = x
return largest
print(LargestPrime(600851475143))
|
import tkinter
import os
from gtts import gTTS
import playsound
window = tkinter.Tk()
window.title("TEXT TO SPEECH")
window.config(padx = 50 , pady = 50)
def destroy():
window.destroy()
def reset():
main_title_box.delete(0,"end")
def converter():
message = main_title_box.get()
word = gTTS(text = message )
word.save("text.mp3")
playsound.playsound("text.mp3")
os.remove("C:/Users/Laptop/PycharmProjects/TEXTTOSPEECH/text.mp3")
# please put your file dir to make this program work effeciently
main_title = tkinter.Label(text ="TEXT TO SPEECH", font = ("Arial", 18) )
main_title.grid(row = 0 , column = 1, padx = 20 , pady = 20)
main_title_box = tkinter.Entry(width = 80)
main_title_box.grid(row = 1, column = 0 , columnspan = 3, padx = 20, pady = 20)
reset_button = tkinter.Button(text = "RESET", fg = "black",width = 20,bg = "green",command = reset)
reset_button.grid(row = 2 , column = 0, pady = 20)
Audio_button = tkinter.Button(text = "Audio", fg = "black",width = 20,bg = "yellow", command = converter)
Audio_button.grid(row = 2 , column = 1, pady = 20)
Exit_button = tkinter.Button(text = "EXIT", fg = "black",width = 20,bg = "red", command = destroy)
Exit_button.grid(row = 2 , column = 2, pady = 20)
window.mainloop() |
import math
def encrypt(key, txt):
encrypted_txt = [''] * key
for col in range(key):
curr_index = col
while curr_index < len(txt):
encrypted_txt[col] += txt[curr_index]
curr_index += key
return ''.join(encrypted_txt)
def decrypt(key, txt):
num_of_columns = math.ceil(len(txt) / key)
num_of_rows = key
num_of_shaded_boxes = (num_of_columns * num_of_rows) - len(txt)
plaintext = [''] * num_of_columns
col = 0
row = 0
for symbol in txt:
plaintext[col] += symbol
col += 1
if (col == num_of_columns) or (col == num_of_columns - 1 and row >= num_of_rows - num_of_shaded_boxes):
col = 0
row += 1
return ''.join(plaintext)
def main():
texts = ['Underneath(s)a(s)huge(s)oak(s)tree(s)there(s)was(s)of(s)swine(s)a(s)huge(s)company',
'That(s)grunted(s)as(s)they(s)crunched(s)the(s)mast:(s)'
'For(s)that(s)was(s)ripe,(s)and(s)fell(s)full(s)fast.',
'Then(s)they(s)trotted(s)away,(s)for(s)the(s)wind(s)grew(s)high:(s)'
'One(s)acorn(s)they(s)left,(s)and(s)no(s)more(s)might(s)you(s)spy']
key = 9
for text in texts:
print(encrypt(key, text))
if __name__ == '__main__':
main()
|
import re
TEXT = "Either the well was very deep, or she fell very slowly, for she had plenty of time as she went down to look " \
"about her 33.179.51.137and to wonder what was going to happen next. First, she tried to look down and make " \
"out what she was coming to, but it was too dark to see anything; then she looked at the side153.237.67.105s " \
"of the well, and noticed that they were filled with cu198.124.44.100boards and book-shelves; here and there " \
"she saw maps and pictures hung upon pegs. S57.134.21.120he took down a jar from one of the shelves as she " \
"passed; it was labelled, but to her great di169.153.58.171 disappointment it was empty: she did not like to " \
"drop " \
"the jar for fear of killing somebody, so managed to put it into one of the cupboards as she fell past it. "
IP_REGEX = r"(?:(?:25[0-5]|2[0-4][0-9]|[1]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[1]?[0-9][0-9]?)"
addresses = re.findall(IP_REGEX, TEXT)
print(addresses)
|
import re
name="random string. myname123@website.com .some random text."
pattern=re.compile("[a-zA-Z0-9]+@[a-zA-Z0-9]+\.+[a-zA-Z0-9]+")
#\. represents that treat period as period
result=pattern.search(name) #search only searches for first occurance.
print(result)
# to look out for multiple occurance we can use 'findall' instead of search.
#USING FINDALL
name_1="random string. myname123@website.com . yourname222@company.net .some random text."
pattern_1=re.compile("[a-zA-Z0-9]+@[a-zA-Z0-9]+\.+[a-zA-Z0-9]+")
result_new=pattern_1.findall(name_1)
print(result_new)
#now it will print out both the emails.
#we can use -_. as email contains these sometimes inside brackets and we have to use backslash
#to treat it what it actually is. |
import datetime
import pytz
# datetime.date (Y, M, D)
# datetime.time (H, M, S, MS)
# datetime.datetime(Y, M, D, H, Min, S, MS)
today=datetime.date.today()
print(today) #print out today date.
date_time=datetime.datetime.now() #will tell both time and day
print(date_time)
birthday=datetime.date(2000,12,22) #you can't put 07 as it will not take 0 as integer.
print(birthday)
type(birthday) #birthdate is an object
days_since_birth=(today-birthday).days #.days will not show the datetime
print(days_since_birth)
tdelta=datetime.timedelta(days=10) #adding 10 days to current day.
print(today+tdelta)
print(today.month)
print(today.day)
print(today.weekday()) #it starts from monday-0, tue-1, wed-2......
#output : 5 i.e saturday
hour_delta=datetime.timedelta(hours=4) #adding 4 hours to current day
print(date_time+hour_delta)
#using pytz
datetime_today=(datetime.datetime.now(tz=pytz.UTC))
print(datetime_today)
datetime_pacific=(datetime_today.astimezone(pytz.timezone('US/Pacific')))
# Pacific time zone The Pacific Time Zone (PT) is a time zone encompassing
# parts of western Canada, the western United States, and western Mexico.
# Places in this zone observe standard time by subtracting eight hours from
# Coordinated Universal Time (UTC−08:00). during daytime, a time offset of UTc -7:00 is used
print(datetime_pacific)
for tz in pytz.all_timezones: #printing all timezones in world
print(tz)
#string formatting with dates.
#2020-03-14 -> March 14,2020
#strftime (f= formatting )
today_date=datetime.date.today()
print(today_date.strftime('%B %d, %Y')) #B for month, d for days, Y for year
#out- March 14, 2020
#March 14,2020-> 2020-03-14
#strptime (p=parsing)
date_thing=datetime.datetime.strptime('March 09, 2020','%B %d, %Y')
print(date_thing) |
#!/usr/bin/env python
########################################################
# Spyros Paparrizos
# spyridon.paparrizos@lsce.ipsl.fr
########################################################
# The Kobayashi approach using Python on comparing simulated and observed values using mean squared deviation and its components
# Kobayashi K, Salam MU, 2001. Comparing Simulated and Measured Values Using Mean Squared Deviation and its Components, Journal
# of Agronomy, 92, 345-352. https://dl.sciencesocieties.org/publications/aj/abstracts/92/2/345?access=0&view=pdf
import numpy as np
x = np.ma.array(np.random.random(100)) # 1st testing dataset
y = np.ma.array(np.random.random(100)) # 2nd testing dataset
# Kobayashi formula
def Kobayashi(x, y):
xmean = x.mean(axis=0)
ymean = y.mean(axis=0)
SB = (xmean - ymean)**2
SDs = ( ( (x - xmean)**2 ).mean(axis=0) )**0.5
SDm = ( ( (y - ymean)**2 ).mean(axis=0) )**0.5
r = ( (x - xmean)*(y - ymean) ).mean(axis=0) / (SDs*SDm)
SDSD = (SDs - SDm)**2
LCS = 2*SDs*SDm*(1 - r)
if (1-r) < 0: print r; raw_input("NAN")
return SB, SDSD, LCS
SB, SDSD, LCS = Kobayashi(x, y)
MSD = SB+SDSD+LCS
# Additional check whether the Kobayashi formula was correctly applied (MSD == RMSE**2)
def RMSE(x, y): # x and y need to have the same dimensions
return ( ( (x - y)**2 ).mean(axis=0) )**0.5
RMSE = RMSE(x,y)
print "MSD (SB+SDSD+LCS) - RMSE**2 = 0, and here it is: ",(SB+SDSD+LCS - RMSE**2).sum()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 30 10:42:07 2015
@author: mjbeck
"""
# Import smtplib for the actual sending function
#import smtplib
import win32com.client as win32
# Import the email modules we'll need
from email.mime.text import MIMEText
textfile = 'H:\Code\Perl\perl_notes.pl'
you = "dstauffman@gmail.com"
me = you
# Open a plain text file for reading. For this example, assume that
# the text file contains only ASCII characters.
with open(textfile) as fp:
# Create a text/plain message
msg = MIMEText(fp.read())
# me == the sender's email address
# you == the recipient's email address
msg['Subject'] = 'The contents of %s' % textfile
msg['From'] = me
msg['To'] = you
# Send the message via our own SMTP server.
#s = smtplib.SMTP('denmail.us.lmco.com')
#s.login("","")
#s.send_message(msg)
#s.quit()
outlook = win32.Dispatch('outlook.application')
mail = outlook.CreateItem(0)
mail.To = you
mail.Subject = 'Message sent via python'
mail.body = """I'm playing around with emails sent from python programs.
Since I don't know if LM has an SMTP server let alone what its name or address is I'm doing it
via the win32com library. There's the drawback that outlook doesn't like being manhandled so
it pops up a warning box about an automatic email and I have to click 'Accept', but then again,
all my outlook emails have the security popup so it will always require some human interaction.
Alternatively, I could set up an AutoIT script to watch for the warning pop-up, accept it, then
dismiss the security pop-up as well.
How was mexico?
-Matt """
mail.send
print('done')
|
p = int(input("quantas provincias são? "))
listac = ["azul", "branco", "preto", "vermelho"]
listapc = []
lista = []
for i in range(0,p):
v = input("digite as provincias vizinhas da " + str(i+1)+" ")
lista.append(v)
for i in range(0,p):
listaSeq = lista[i].split(",")
for n in range(len(listaSeq)):
#1caso
if len(listapc) == 0:
add = str(i) + str(listac[0])
listapc.append(add)
#demais casos
else:
print("we")
for k in range(len(listapc)):
if str(listaSeq[n]) not in str(listapc[k]):
corQNpode = str(listapc[k][1:])
#pegar prox cor
for proxc in range(len(listac)):
print(listapc)
if corQNpode != listac[proxc]:
add = str(i) + str(listac[proxc])
listapc.append(add)
else:
break
print(listapc)
|
import math
from scipy import stats
from src.statistics.StatisticsAbstract import StatisticsAbstract
from src.statistics.GetSample import GetSample
def population_mean(number_list):
# population mean formula:
number_list = list(number_list)
sum_value = 0
for x in number_list:
sum_value = sum(number_list)
# take the sum of the items and then divide it by the number of items
number_count = len(number_list)
result = sum_value / number_count
# then return that value
return result
def median(number_list):
# first sort the list
number_list = list(number_list)
number_list.sort()
# take the list and get the value at the middle of the number list
if len(number_list) % 2 is not 0:
middle_val_index = int(len(number_list) / 2)
middle_val = number_list[middle_val_index]
# self.result = middle_val
return middle_val
else:
# if the number count of the list is even, take the middle two numbers... add them and divide by 2
middle_val_index = int((len(number_list) - 1) / 2)
after_middle_index = middle_val_index + 1
# self.result = int((float(number_list[middle_val_index]) + float(number_list[after_middle_index])) / 2.0)
return (float(number_list[middle_val_index]) + float(number_list[after_middle_index])) / 2
def pop_standard_deviation(number_list):
# 1. Calculate the mean of number_list
mean = population_mean(number_list)
# 2. Subtract mean from each data point and then square each value
new_list = []
for x in number_list:
new_val = x - mean
new_val = math.pow(new_val, 2)
new_list.append(new_val)
# 3. Calculate the mean of the squared differences, this is the variance
new_mean = population_mean(new_list)
# 4. pop standard deviation is the square root of the variance
result = math.sqrt(new_mean)
return result
def mode(number_list):
number_list = list(number_list)
value_dict = {}
mode_val = 0
for val in number_list:
value_dict[val] = number_list.count(val)
for key in value_dict.keys():
if value_dict.get(key) == max(value_dict.values()):
mode_val = key
return mode_val
def variance_pop_proportion(number_list):
# the formula for this is PQ / n
# P is the population proportion
number_list = list(number_list)
p = 0
for x in number_list:
p = p + x
# Q is the proportion of population elements that do no have particular attr. => Q = 1 - P
q = p - 1
# n is the num of elements in a sample
n = len(number_list)
result = (p * q) / n
return result
def p_value(number_list):
number_list = list(number_list)
pval = stats.ttest_1samp(number_list, 0.05)
return pval
def proportion(number_list):
number_list = list(number_list)
p = 0
for x in number_list:
p = p + x
n = len(number_list)
result = p / n
return result
def sample_mean(number_list, sample_size):
# population mean formula:
number_list = list(number_list)
sample = GetSample(number_list, sample_size)
sum_value = 0
for x in number_list:
sum_value = sum(sample)
# take the sum of the items and then divide it by the number of items
number_count = len(sample)
result = sum_value / number_count
# then return that value
return result
def sample_standard_deviation(number_list, sample_size):
# 1. Calculate the mean of number_list
mean = sample_mean(number_list, sample_size)
# 2. Subtract mean from each data point and then square each value
new_list = []
for x in number_list:
new_val = x - mean
new_val = math.pow(new_val, 2)
new_list.append(new_val)
# 3. Calculate the mean of the squared differences, this is the variance
new_mean = population_mean(new_list)
# 4. pop standard deviation is the square root of the variance
result = math.sqrt(new_mean)
return result
def variance_sample_proportion(number_list, sample_size):
# the formula for this is SQ / n
# S is the sample proportion
number_list = list(number_list)
sample = GetSample(number_list, sample_size)
s = 0
for x in number_list:
s = s + x
# Q is the proportion of population elements that do no have particular attr. => Q = 1 - S
q = s - 1
# n is the num of elements in a sample
n = len(sample)
result = (s * q) / n
return result
class Statistics(StatisticsAbstract):
result = 0
def population_mean(self, number_list):
self.result = population_mean(number_list)
return self.result
def median(self, number_list):
self.result = median(number_list)
return self.result
def mode(self, number_list):
self.result = mode(number_list)
return self.result
def pop_standard_deviation(self, number_list):
self.result = pop_standard_deviation(number_list)
return self.result
def variance_pop_proportion(self, number_list):
self.result = variance_pop_proportion(number_list)
return self.result
def zscore(self, number_list):
self.result = zscore(number_list)
return self.result
def standardized_score(self, number_list):
self.result = standardized_score(number_list)
return self.result
def population_corre_coefficient(self, number_list):
self.result = population_corre_coefficient(number_list)
return self.result
def confidence_interval(self, number_list):
self.result = confidence_interval(number_list)
return self.result
def population_variance(self, number_list):
self.result = population_variance(number_list)
return self.result
def p_value(self, number_list):
self.result = p_value(number_list)
return self.result
def proportion(self, number_list):
self.result = proportion(number_list)
return self.result
def sample_mean(self, sample_size):
self.result = sample_mean(sample_size)
return self.result
def sample_standard_deviation(self, sample_size):
self.result = sample_standard_deviation(sample_size)
return self.result
def variance_sample_proportion(self, sample_size):
self.result = variance_sample_proportion(sample_size)
return self.result
|
print("Welcome to the tip calculator.")
no_tip_bill = input("What is the total bill? $")
percentage_tip = input("What percentage tip would you like to give? ")
number_people = input("How many people are splitting the bill? ")
bill_float = float(no_tip_bill)
percentage = int(percentage_tip)
people_int = int(number_people)
bill_total_split = ((percentage / 100) + 1) * bill_float / people_int
bill_2dp = "{:.2f}".format(bill_total_split)
print(f"Each person should pay: ${bill_2dp}") |
import os
from art import logo
def clear():
os.system('clear')
bids = {}
def highest_bid():
highest_bid = 0
bidder_name = ''
for name, bid in bids.items():
if bid > highest_bid:
bidder_name = name
highest_bid = bid
print(logo)
print(f"The highest bidder was {bidder_name.capitalize()} with ${highest_bid}!")
again = 'yes'
while not again == 'no':
print(logo)
print("Welcome to the auction.")
name = input("What is your name? ")
bid = int(input("What is your bid? $"))
bids[name] = bid
again = input("Is there any other bidders? 'yes' or 'no' \n").lower()
clear()
highest_bid()
|
'''
A number is said to be a Magic number if the sum of its digits is calculated repeatedly till a single digit obtained
and that single digit is 1.
WAP to check whether given no. is magic no. or not
For n = 199, Sum of digits = 1 + 9 + 9 = 19,
Sum of digits of 19 = 1 + 9 = 10
Sum of digits of 10 = 1 + 0 = 1
'''
def is_magic(n):
add = 0
while n > 0 or add > 9:
if n == 0:
n = add
add = 0
digit = n % 10
add = add + digit
n /= 10
n = int(n)
if add == 1:
return 1
else:
return 0
def main():
n = int(input("Enter n:"))
print(is_magic(n))
if __name__ == '__main__':
main()
|
# WAP to accept pattern and string from user and count occurances of the given pattern in the string using search method
import re
def main():
count = 0
pattern = input("Enter pattern :")
input_string = input("Enter string :")
ans = re.search(pattern, input_string)
while ans != None:
count = count + 1
index = ans.end()
input_string = input_string[index:]
ans = re.search(pattern, input_string)
print("Count :", count)
if __name__ == '__main__':
main()
|
#Write a program to display pattern
# A
# B A B
# C B A B C
# D C B A B C D
def print_pattern(n):
count = 0
for i in range(1, n + 1):
char_cnt = 1
num = ord('A') + i - 1
for j in range(n - i):
print(" ", end=" ")
for j in range(i + count):
print(chr(num), end=" ")
if char_cnt >= i:
num = num + 1
else:
num = num - 1
char_cnt = char_cnt + 1
count = count + 1
print("")
def main():
row = int(input("Enter no. of rows:"))
print_pattern(row)
if __name__ == '__main__':
main()
|
'''WAP to accept 2 strings from user. And check if second string is a right rotation of first string.
Hint:
India, iaInd -> This should be true
Jeetendra 1st right rotation "aJeetendr" '''
def is_rotation(input_str,validate_str):
if len(input_str) == len(validate_str):
return validate_str in input_str + input_str
return False
def main():
input_str = input("Enter string:")
validate_str = input("Enter validate string:")
if is_rotation(input_str,validate_str):
print("{} is right rotation of {}".format(validate_str, input_str))
else:
print("{} is not right rotation of {}".format(validate_str, input_str))
if __name__ == '__main__':
main()
|
# WAP to accept two strings and without using count method,count the occurances of first string into second string
def count_occurance(input_str,sub_str):
input_str = input_str.lower()
sub_str = sub_str.lower()
input_str_len = len(input_str)
sub_str_len = len(sub_str)
i = 0
count = 0
while i < input_str_len:
if input_str[i:i + sub_str_len] == sub_str:
count = count + 1
i = i + 1
return count
def main():
input_string = input("Enter string:")
sub_string = input("Enter sub string:")
print("%s is occured %d times in %s" % (sub_string, count_occurance(input_string, sub_string), input_string))
if __name__ == '__main__':
main()
|
# Write a program to accept a range from user and print all prime numbers from range
import math
def is_prime(num):
if num < 0:
num = num * -1
if num % 2 == 0:
return False
for x in range(3, int(math.sqrt(num)) + 1,2):
if num % x == 0:
return False
return True
def main():
start_range = int(input("Enter start value:"))
end_range = int(input("Enter end value:"))
print("Prime numbers within range are:")
if start_range < end_range:
for x in range(start_range,end_range):
if is_prime(x):
print(x)
else:
print("Start value is greater than end value")
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 29 11:28:53 2020
@author: Hp
"""
import math
def f(x):
return x**3 + 2*x**2 + 10*x - 20
a = 1
b = 1.5
e = 0.000001
N = 100
iterasi = 0
print('==================================')
print(' c f(c)')
print('==================================')
while True:
iterasi += 1
c = (a + b)/2
if f(a)*f(c) < 0:
b = c
else:
a = c
print('{:7.6f} \t {:+15.10f}'.format(c, f(c)))
if abs(f(c)) < e or iterasi >= N:
break
print('==================================') |
#Maria Loren I. Ignacio
#https://github.com/lorenignacio
#lorenignacio00@gmail.com
def only_upper(s):
upper_chars = ("Thank God for everything")
for char in s:
if char.isupper():
upper_chars += char
return upper_chars
only_upper(s)
|
"""
Tendo como dados de entrada a altura de uma pessoa, construa um algoritmo
que calcule seu peso ideal, usando a seguinte fórmula: (72.7*altura) - 58
"""
altura = float(input('Informe sua altura em metro: '))
pesoideal = (72.7 * altura) - 58
print(f'Se você tem {altura} m de altura, seu peso ideal são {pesoideal:.2f} kg.')
|
"""
João Papo-de-Pescador, homem de bem, comprou um microcomputador para controlar o rendimento diário
de seu trabalho. Toda vez que ele traz um peso de peixes maior que o estabelecido pelo regulamento
de pesca do estado de São Paulo (50 quilos) deve pagar uma multa de R$ 4,00 por quilo excedente.
João precisa que você faça um programa que leia a variável 'peso' (peso de peixes) e calcule o excesso.
Gravar na variável 'excesso' a quantidade de quilos além do limite e na variável 'multa' o valor da multa
que João deverá pagar. Imprima os dados do programa com as mensagens adequadas.
"""
peso = float(input('Informe o peso total do pescado em quilos: '))
excesso = peso - 50
# O ideal para essa tarefa seria criar uma codição que não efetuasse o cálculo da multa
# em casos em que não houvesse excesso de pescado. Entretanto o tópico não é abordado
# nessa série de exercícios, tanto é que não foi solicitado no enunciado.
multa = excesso * 4
print(f'Foram pescados {excesso:.2f} kg além do limite.\n'
f'Multa devida: R$ {multa:.2f}')
|
from app.utils import is_valid_name
class HumanPlayer:
def __init__(self, mark: str) -> None:
self.mark = mark
def set_player_info(self, meta_str: str) -> None:
"""
Ask the name of the player and set it
Args:
meta_str: Player's turn Either First player or Second player
"""
while True:
name = input(f'Enter a {meta_str} Name - {self.mark}: ')
try:
if is_valid_name(name):
self.name = name
return True
else:
print("Invalid Name")
except KeyboardInterrupt:
print("Keyboard Interrupted")
def get_choice(self, game_object: object) -> int:
"""
Get player's choice
Args:
game_object: Context of the game to use for the
Returns:
position: Player's choice for the move
"""
while True:
position = input(f"{self.name}'s turn ==> Enter your choice: ")
try:
position = int(position.strip())
if not game_object.is_valid_cell_no(position):
print("Position is Invalid")
continue
if not game_object.is_cell_available(position):
print("Cell is already occupied")
continue
return position
except BaseException:
print("Given input is invalid")
|
"""
Week 1 assignments for Deep-neural-network
This file contains sample codes for:
- different initialization
- L2 and dropout regularization
- Gradient check for back-propagation
"""
#
# --- Initialization ---
#
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
from init_utils import sigmoid, relu, compute_loss, forward_propagation, backward_propagation
from init_utils import update_parameters, predict, load_dataset, plot_decision_boundary, predict_dec
%matplotlib inline
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# load image dataset: blue/red dots in circles
train_X, train_Y, test_X, test_Y = load_dataset()
def model(X, Y, learning_rate = 0.01, num_iterations = 15000, print_cost = True, initialization = "he"):
"""
Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (containing 0 for red dots; 1 for blue dots), of shape (1, number of examples)
learning_rate -- learning rate for gradient descent
num_iterations -- number of iterations to run gradient descent
print_cost -- if True, print the cost every 1000 iterations
initialization -- flag to choose which initialization to use ("zeros","random" or "he")
Returns:
parameters -- parameters learnt by the model
"""
grads = {}
costs = [] # to keep track of the loss
m = X.shape[1] # number of examples
layers_dims = [X.shape[0], 10, 5, 1]
# Initialize parameters dictionary.
if initialization == "zeros":
parameters = initialize_parameters_zeros(layers_dims)
elif initialization == "random":
parameters = initialize_parameters_random(layers_dims)
elif initialization == "he":
parameters = initialize_parameters_he(layers_dims)
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
a3, cache = forward_propagation(X, parameters)
# Loss
cost = compute_loss(a3, Y)
# Backward propagation.
grads = backward_propagation(X, Y, cache)
# Update parameters.
parameters = update_parameters(parameters, grads, learning_rate)
# Print the loss every 1000 iterations
if print_cost and i % 1000 == 0:
print("Cost after iteration {}: {}".format(i, cost))
costs.append(cost)
# plot the loss
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
# GRADED FUNCTION: initialize_parameters_zeros
def initialize_parameters_zeros(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
parameters = {}
L = len(layers_dims) # number of layers in the network
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.zeros((layers_dims[l], layers_dims[l-1]))
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
### END CODE HERE ###
return parameters
parameters = model(train_X, train_Y, initialization = "zeros")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
print ("predictions_train = " + str(predictions_train))
print ("predictions_test = " + str(predictions_test))
plt.title("Model with Zeros initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# GRADED FUNCTION: initialize_parameters_random
def initialize_parameters_random(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
np.random.seed(3) # This seed makes sure your "random" numbers will be the as ours
parameters = {}
L = len(layers_dims) # integer representing the number of layers
for l in range(1, L):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layers_dims[l], layers_dims[l-1]) * 10
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
### END CODE HERE ###
return parameters
parameters = model(train_X, train_Y, initialization = "random")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
print (predictions_train)
print (predictions_test)
plt.title("Model with large random initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# GRADED FUNCTION: initialize_parameters_he
def initialize_parameters_he(layers_dims):
"""
Arguments:
layer_dims -- python array (list) containing the size of each layer.
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
b1 -- bias vector of shape (layers_dims[1], 1)
...
WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
bL -- bias vector of shape (layers_dims[L], 1)
"""
np.random.seed(3)
parameters = {}
L = len(layers_dims) - 1 # integer representing the number of layers
for l in range(1, L + 1):
### START CODE HERE ### (≈ 2 lines of code)
parameters['W' + str(l)] = np.random.randn(layers_dims[l], layers_dims[l-1]) * np.sqrt(2 / layers_dims[l-1])
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
### END CODE HERE ###
return parameters
parameters = model(train_X, train_Y, initialization = "he")
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
plt.title("Model with He initialization")
axes = plt.gca()
axes.set_xlim([-1.5,1.5])
axes.set_ylim([-1.5,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
#
# --- Regularization ---
#
# import packages
import numpy as np
import matplotlib.pyplot as plt
from reg_utils import sigmoid, relu, plot_decision_boundary, initialize_parameters, load_2D_dataset, predict_dec
from reg_utils import compute_cost, predict, forward_propagation, backward_propagation, update_parameters
import sklearn
import sklearn.datasets
import scipy.io
from testCases import *
%matplotlib inline
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
train_X, train_Y, test_X, test_Y = load_2D_dataset()
def model(X, Y, learning_rate = 0.3, num_iterations = 30000, print_cost = True, lambd = 0, keep_prob = 1):
"""
Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (output size, number of examples)
learning_rate -- learning rate of the optimization
num_iterations -- number of iterations of the optimization loop
print_cost -- If True, print the cost every 10000 iterations
lambd -- regularization hyperparameter, scalar
keep_prob - probability of keeping a neuron active during drop-out, scalar.
Returns:
parameters -- parameters learned by the model. They can then be used to predict.
"""
grads = {}
costs = [] # to keep track of the cost
m = X.shape[1] # number of examples
layers_dims = [X.shape[0], 20, 3, 1]
# Initialize parameters dictionary.
parameters = initialize_parameters(layers_dims)
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
if keep_prob == 1:
a3, cache = forward_propagation(X, parameters)
elif keep_prob < 1:
a3, cache = forward_propagation_with_dropout(X, parameters, keep_prob)
# Cost function
if lambd == 0:
cost = compute_cost(a3, Y)
else:
cost = compute_cost_with_regularization(a3, Y, parameters, lambd)
# Backward propagation.
assert(lambd==0 or keep_prob==1) # it is possible to use both L2 regularization and dropout,
# but this assignment will only explore one at a time
if lambd == 0 and keep_prob == 1:
grads = backward_propagation(X, Y, cache)
elif lambd != 0:
grads = backward_propagation_with_regularization(X, Y, cache, lambd)
elif keep_prob < 1:
grads = backward_propagation_with_dropout(X, Y, cache, keep_prob)
# Update parameters.
parameters = update_parameters(parameters, grads, learning_rate)
# Print the loss every 10000 iterations
if print_cost and i % 10000 == 0:
print("Cost after iteration {}: {}".format(i, cost))
if print_cost and i % 1000 == 0:
costs.append(cost)
# plot the cost
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (x1,000)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
parameters = model(train_X, train_Y)
print ("On the training set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
plt.title("Model without regularization")
axes = plt.gca()
axes.set_xlim([-0.75,0.40])
axes.set_ylim([-0.75,0.65])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# GRADED FUNCTION: compute_cost_with_regularization
def compute_cost_with_regularization(A3, Y, parameters, lambd):
"""
Implement the cost function with L2 regularization. See formula (2) above.
Arguments:
A3 -- post-activation, output of forward propagation, of shape (output size, number of examples)
Y -- "true" labels vector, of shape (output size, number of examples)
parameters -- python dictionary containing parameters of the model
Returns:
cost - value of the regularized loss function (formula (2))
"""
m = Y.shape[1]
W1 = parameters["W1"]
W2 = parameters["W2"]
W3 = parameters["W3"]
cross_entropy_cost = compute_cost(A3, Y) # This gives you the cross-entropy part of the cost
### START CODE HERE ### (approx. 1 line)
L2_regularization_cost = (np.sum(np.square(W1)) + np.sum(np.square(W2)) + np.sum(np.square(W3))) * lambd / 2 / m
### END CODER HERE ###
cost = cross_entropy_cost + L2_regularization_cost
return cost
# GRADED FUNCTION: backward_propagation_with_regularization
def backward_propagation_with_regularization(X, Y, cache, lambd):
"""
Implements the backward propagation of our baseline model to which we added an L2 regularization.
Arguments:
X -- input dataset, of shape (input size, number of examples)
Y -- "true" labels vector, of shape (output size, number of examples)
cache -- cache output from forward_propagation()
lambd -- regularization hyperparameter, scalar
Returns:
gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables
"""
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
### START CODE HERE ### (approx. 1 line)
dW3 = 1./m * np.dot(dZ3, A2.T) + lambd * W3 / m
### END CODE HERE ###
db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)
dA2 = np.dot(W3.T, dZ3)
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
### START CODE HERE ### (approx. 1 line)
dW2 = 1./m * np.dot(dZ2, A1.T) + lambd * W2 / m
### END CODE HERE ###
db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)
dA1 = np.dot(W2.T, dZ2)
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
### START CODE HERE ### (approx. 1 line)
dW1 = 1./m * np.dot(dZ1, X.T) + lambd * W1 / m
### END CODE HERE ###
db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,"dA2": dA2,
"dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1,
"dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
parameters = model(train_X, train_Y, lambd = 0.7)
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
plt.title("Model with L2-regularization")
axes = plt.gca()
axes.set_xlim([-0.75,0.40])
axes.set_ylim([-0.75,0.65])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# GRADED FUNCTION: forward_propagation_with_dropout
def forward_propagation_with_dropout(X, parameters, keep_prob = 0.5):
"""
Implements the forward propagation: LINEAR -> RELU + DROPOUT -> LINEAR -> RELU + DROPOUT -> LINEAR -> SIGMOID.
Arguments:
X -- input dataset, of shape (2, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
W1 -- weight matrix of shape (20, 2)
b1 -- bias vector of shape (20, 1)
W2 -- weight matrix of shape (3, 20)
b2 -- bias vector of shape (3, 1)
W3 -- weight matrix of shape (1, 3)
b3 -- bias vector of shape (1, 1)
keep_prob - probability of keeping a neuron active during drop-out, scalar
Returns:
A3 -- last activation value, output of the forward propagation, of shape (1,1)
cache -- tuple, information stored for computing the backward propagation
"""
np.random.seed(1)
# retrieve parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
Z1 = np.dot(W1, X) + b1
A1 = relu(Z1)
### START CODE HERE ### (approx. 4 lines) # Steps 1-4 below correspond to the Steps 1-4 described above.
D1 = np.random.rand(A1.shape[0], A1.shape[1]) # Step 1: initialize matrix D1 = np.random.rand(..., ...)
D1 = (D1 < keep_prob) # Step 2: convert entries of D1 to 0 or 1 (using keep_prob as the threshold)
A1 = A1 * D1 # Step 3: shut down some neurons of A1
A1 = A1 / keep_prob # Step 4: scale the value of neurons that haven't been shut down
### END CODE HERE ###
Z2 = np.dot(W2, A1) + b2
A2 = relu(Z2)
### START CODE HERE ### (approx. 4 lines)
D2 = np.random.rand(A2.shape[0], A2.shape[1]) # Step 1: initialize matrix D2 = np.random.rand(..., ...)
D2 = (D2 < keep_prob) # Step 2: convert entries of D2 to 0 or 1 (using keep_prob as the threshold)
A2 = A2 * D2 # Step 3: shut down some neurons of A2
A2 = A2 / keep_prob # Step 4: scale the value of neurons that haven't been shut down
### END CODE HERE ###
Z3 = np.dot(W3, A2) + b3
A3 = sigmoid(Z3)
cache = (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3)
return A3, cache
# GRADED FUNCTION: backward_propagation_with_dropout
def backward_propagation_with_dropout(X, Y, cache, keep_prob):
"""
Implements the backward propagation of our baseline model to which we added dropout.
Arguments:
X -- input dataset, of shape (2, number of examples)
Y -- "true" labels vector, of shape (output size, number of examples)
cache -- cache output from forward_propagation_with_dropout()
keep_prob - probability of keeping a neuron active during drop-out, scalar
Returns:
gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables
"""
m = X.shape[1]
(Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
dW3 = 1./m * np.dot(dZ3, A2.T)
db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)
dA2 = np.dot(W3.T, dZ3)
### START CODE HERE ### (≈ 2 lines of code)
dA2 = dA2 * D2 # Step 1: Apply mask D2 to shut down the same neurons as during the forward propagation
dA2 = dA2 / keep_prob # Step 2: Scale the value of neurons that haven't been shut down
### END CODE HERE ###
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
dW2 = 1./m * np.dot(dZ2, A1.T)
db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)
dA1 = np.dot(W2.T, dZ2)
### START CODE HERE ### (≈ 2 lines of code)
dA1 = dA1 * D1 # Step 1: Apply mask D1 to shut down the same neurons as during the forward propagation
dA1 = dA1 / keep_prob # Step 2: Scale the value of neurons that haven't been shut down
### END CODE HERE ###
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
dW1 = 1./m * np.dot(dZ1, X.T)
db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,"dA2": dA2,
"dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1,
"dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
parameters = model(train_X, train_Y, keep_prob = 0.86, learning_rate = 0.3)
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
plt.title("Model with dropout")
axes = plt.gca()
axes.set_xlim([-0.75,0.40])
axes.set_ylim([-0.75,0.65])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
#
# --- Gradient check ---
#
# Packages
import numpy as np
from testCases import *
from gc_utils import sigmoid, relu, dictionary_to_vector, vector_to_dictionary, gradients_to_vector
def forward_propagation_n(X, Y, parameters):
"""
Implements the forward propagation (and computes the cost) presented in Figure 3.
Arguments:
X -- training set for m examples
Y -- labels for m examples
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
W1 -- weight matrix of shape (5, 4)
b1 -- bias vector of shape (5, 1)
W2 -- weight matrix of shape (3, 5)
b2 -- bias vector of shape (3, 1)
W3 -- weight matrix of shape (1, 3)
b3 -- bias vector of shape (1, 1)
Returns:
cost -- the cost function (logistic cost for one example)
"""
# retrieve parameters
m = X.shape[1]
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
Z1 = np.dot(W1, X) + b1
A1 = relu(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = relu(Z2)
Z3 = np.dot(W3, A2) + b3
A3 = sigmoid(Z3)
# Cost
logprobs = np.multiply(-np.log(A3),Y) + np.multiply(-np.log(1 - A3), 1 - Y)
cost = 1./m * np.sum(logprobs)
cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3)
return cost, cache
def backward_propagation_n(X, Y, cache):
"""
Implement the backward propagation presented in figure 2.
Arguments:
X -- input datapoint, of shape (input size, 1)
Y -- true "label"
cache -- cache output from forward_propagation_n()
Returns:
gradients -- A dictionary with the gradients of the cost with respect to each parameter, activation and pre-activation variables.
"""
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
dW3 = 1./m * np.dot(dZ3, A2.T)
db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)
dA2 = np.dot(W3.T, dZ3)
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
dW2 = 1./m * np.dot(dZ2, A1.T)
db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)
dA1 = np.dot(W2.T, dZ2)
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
dW1 = 1./m * np.dot(dZ1, X.T)
db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,
"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2,
"dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
# GRADED FUNCTION: gradient_check_n
def gradient_check_n(parameters, gradients, X, Y, epsilon = 1e-7):
"""
Checks if backward_propagation_n computes correctly the gradient of the cost output by forward_propagation_n
Arguments:
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
grad -- output of backward_propagation_n, contains gradients of the cost with respect to the parameters.
x -- input datapoint, of shape (input size, 1)
y -- true "label"
epsilon -- tiny shift to the input to compute approximated gradient with formula(1)
Returns:
difference -- difference (2) between the approximated gradient and the backward propagation gradient
"""
# Set-up variables
parameters_values, _ = dictionary_to_vector(parameters)
grad = gradients_to_vector(gradients)
num_parameters = parameters_values.shape[0]
J_plus = np.zeros((num_parameters, 1))
J_minus = np.zeros((num_parameters, 1))
gradapprox = np.zeros((num_parameters, 1))
# Compute gradapprox
for i in range(num_parameters):
# Compute J_plus[i]. Inputs: "parameters_values, epsilon". Output = "J_plus[i]".
# "_" is used because the function you have to outputs two parameters but we only care about the first one
### START CODE HERE ### (approx. 3 lines)
thetaplus = np.copy(parameters_values) # Step 1
thetaplus[i][0] = thetaplus[i][0] + epsilon # Step 2
J_plus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaplus)) # Step 3
### END CODE HERE ###
# Compute J_minus[i]. Inputs: "parameters_values, epsilon". Output = "J_minus[i]".
### START CODE HERE ### (approx. 3 lines)
thetaminus = np.copy(parameters_values) # Step 1
thetaminus[i][0] = thetaminus[i][0] - epsilon # Step 2
J_minus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaminus)) # Step 3
### END CODE HERE ###
# Compute gradapprox[i]
### START CODE HERE ### (approx. 1 line)
gradapprox[i] = (J_plus[i] - J_minus[i]) / 2 / epsilon
### END CODE HERE ###
# Compare gradapprox to backward propagation gradients by computing difference.
### START CODE HERE ### (approx. 1 line)
numerator = np.linalg.norm(grad - gradapprox) # Step 1'
denominator = (np.linalg.norm(grad) + np.linalg.norm(gradapprox)) # Step 2'
difference = numerator / denominator # Step 3'
### END CODE HERE ###
if difference > 2e-7:
print ("\033[93m" + "There is a mistake in the backward propagation! difference = " + str(difference) + "\033[0m")
else:
print ("\033[92m" + "Your backward propagation works perfectly fine! difference = " + str(difference) + "\033[0m")
return difference
|
from PIL import Image
import pytesseract as pt
import os
class ImageFeeder:
"""This class interfaces with the pytesseract OCR transcriber to convert image files into text."""
def OCR_transcription(folder_path, new_path, f):
"""Transcribes an image file and saves the text file into the new path, returns the path"""
input_path = os.path.join(folder_path, f)
image_file = Image.open(input_path)
image_text = pt.image_to_string(image_file, lang="eng")
f = f.split(".")[0] + ".txt"
final_path = os.path.join(new_path, f)
f_parsed = open(final_path, "w")
f_parsed.write(image_text)
f_parsed.close()
return f
|
# Professional Academy UCD- Data analysis and Visualization on real- world Dataset
# 1) Real World Scenario
# Country Covid Confirmed cases data imported
# from https://www.kaggle.com/harikrishna9/covid19-dataset-by-john-hopkins-university as CSV file
# Country GDP data imported from https://www.kaggle.com/londeen/world-happiness-report-2020 as CSV file
# 2) Importing Data
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
covid_dataset = pd.read_csv("RAW_global_confirmed_cases.csv")
print(covid_dataset.head())
print(covid_dataset.shape)
print(covid_dataset.info())
country_gdp = pd.read_csv("WHR20_DataForFigure2.1.csv")
print(country_gdp.head())
print(country_gdp.shape)
print(country_gdp.info())
# 3) Analyzing data
# Drop ‘Province/State’, Lat’ & ‘Long’ columns
covid_dataset.drop(['Province/State', 'Lat', 'Long'], axis=1, inplace=True)
print(covid_dataset.head())
# Group by - sum of people infected per country
covid_country = covid_dataset.groupby(covid_dataset.columns[0])[covid_dataset.columns[-1]].sum()
print("after sum")
print(covid_country.head())
covid_country_rename = covid_country.rename({'Country/Region ': 'Country name'}, axis=1, inplace=False)
print("covid_country_rename")
print(covid_country_rename.head())
# Slicing of Country GDP Data to exclude incomplete 2020 data
# gdp_data = country_gdp.loc[:, "Ladder score": "2020"]
# Subsetting country_gdp data to focus on logged GDP per capita
twentytwenty_gdp = country_gdp[["Country name", "Regional indicator", "Logged GDP per capita"]]
print(twentytwenty_gdp.head())
# Merging of GDP(GDP per capita per country)data with Covid infections per country
covid_gdp = country_gdp.merge(covid_country, on="Country name")
print("covid_gdp")
print(covid_gdp.head())
country_gdp = country_gdp.columns.str.upper
print(country_gdp)
# Create a list of EU Countries
EU_Countries = {"Austria", "Belgium", "Bulgaria", "Croatia", "Cyprus","Denmark",
"Estonia", "Finland", "France", "Germany", "Greece", "Hungary", "Ireland", "Italy",
"Latvia", "Lithuania", "Luxembourg", "Malta", "Netherlands", "Poland", "Portugal",
"Romania", "Slovakia", "Slovenia", "Spain", "Sweden"}
# Create a list of EU countries with similar populations
Select_EU_Countries = {"Finland", "Ireland", "Slovakia"}
print("covid_country.loc[Select_EU_Countries].head()")
print(covid_country.loc[Select_EU_Countries].head())
x = [Select_EU_Countries]
y = [10.4, 10.6,10.2]
plt.show()
# 5) Visualize Data
# Visualize the data for Finland, Ireland & Slovakia
covid_country.loc[Select_EU_Countries].plot(kind='bar', x='GDP(%)', y='EU Countries', color='red')
plt.show()
plt.title("Covid Infections and GDP")
plt.ylabel("Covid Cases")
plt.xlabel("EU Countries")
country_gdp["Logged GDP per capita"] .value_counts()
country_gdp.plot(kind="scatter" , x="Country name", y="Logged GDP per capita")
plt.show()
# Compare the number of people infected with covid and GDP per capita
# in 3 European Countries with similar populations Finland, Ireland & Slovakia
# sns.set_theme(style="whitegrid")
# sns.barplot(y='Country Name', x='Number_of_Pop_infected', data=covid_country.loc[Select_EU_Countries], color="blue")
# sns.barplot(y='Country Name', x='Logged GDP per capita', data=covid_country, color="blue")
# plt.bar(x_array, y_array, color='green')
# plt.title("Covid Infections and GDP")
# plt.xlabel("GDP(%)")
# plt.ylabel("EU Countries")
# plt.show()
import matplotlib.pyplot as plt
import csv
Names = []
Values = []
with open('WHR20_DataForFigure2.4.csv', 'r') as csvfile:
lines = csv.reader(csvfile, delimiter=',')
for row in lines:
Names.append(row[0])
Values.append(str(row[6]))
plt.scatter(Values, Names, color='g', s=100)
plt.xticks(rotation=25)
plt.xlabel('Names')
plt.ylabel('Values')
plt.title('Logged GDP per Capita', fontsize=20)
plt.show()
# 6) Data Insights
# 1 Ireland has 3 times the number of people infected compared to Finland
# 2 Slovakia has 4.5 times the number of people infected compared to Finland
# 3 Slovakia has the lowest GPD per capita of the 3 countries and the highest number of people infected with Covid
# 4 Finland has the 2nd highest GDP per capita amongst these 3 countries but the lowest Covid infection rates
# 5 There is a correlation between GDP per capita and life expectancy, Ireland has the highest Life Expectancy and the highest GDP per capita. |
"""Школьный квест"""
from random import randrange
lesson_1 = False
canteen = False
lesson_2 = False
lesson_3 = False
def lesson_math():
"""Кабинет математики"""
global lesson_1
if lesson_1:
print("Тут уже никого нет!")
return
print("Урок окончен!")
lesson_1 = True
def canteen():
"""Столовая"""
...
def lesson_cse():
"""Кабинет информатики"""
...
def lesson_pe():
"""Спортзал"""
...
def timetable():
"""Расписание"""
print("""
=== Расписание уроков ===
| Математика |
| Обед |
| Информатика |
| Физ-ра |
=========================
""")
def hallway():
"""Коридор"""
while True:
print("\n=== Коридор ===\n")
print("Справа от тебя висит расписание уроков, а дальше идут "
"классы и другие помещения.")
print("Смотреть расписание")
print("Пойти в столовую")
print("Пойти в спортзал")
print("Пойти на математику")
print("Пойти на информатику")
choice = input("> ")
if "смотр" in choice or "расп" in choice:
timetable()
elif "стол" in choice:
canteen()
elif "спорт" in choice:
lesson_pe()
elif "матем" in choice:
lesson_math()
elif "инфо" in choice:
lesson_cse()
else:
print(f"Что значит «{choice}»? Реши, что делать дальше?")
if lesson_3:
break
print("Ура, все уроки закончились! Завтра каникулы!")
def start():
"""Начало"""
print("Ты зашёл в школу и оказался в коридоре.")
hallway()
if __name__ == "__main__":
start()
|
people = 30
cars = 40
trucks = 15
if cars > people:
print("Let's take a car")
elif cars < people:
print("Do not take a car")
else:
print("Can't choose")
if trucks > cars:
print("Too mach trucks")
elif trucks < cars:
print("May be trucks?")
else:
print("Still can't choose")
if people > trucks:
print("OK, let's take a truck")
else:
print("Well, let's be at home")
|
print("Let's play a game!")
print("You are in the dark room and you have 2 doors. Chose one")
door = input("> ")
if door == '1':
print("There are huge bear in this room. What will you do?")
print("1. Teese it")
print("2. Argue it")
bear = input("> ")
if bear == '1':
print("Well, bear bit your face")
elif bear == '2':
print("Well, bear bit your leg")
else:
print("Great! One and only right action. Bear ran away.")
elif door == '2':
print("You faced Ctulchu. What will you do?")
print("1. Told him about Siberia")
print("2. Touch my jacket buttons")
print("3. Sing him a song")
ctulchu = input("> ")
if ctulchu == '1' or ctulchu == '2':
print("You resqued!")
else:
print("You fall into Ctulchu pool.")
else:
print("You become insane")
|
"""
Module implements the Assignment class.
"""
from util.Color import *
class Assignment:
"""
Class that models assignments as objects.
Each assignment is identified by 4 fields:
- studentID <int> - aka numar matricol
- description <string>
- deadline <string>
- grade <float>
"""
def __init__(self, studentID, description=None, deadline=None, grade=None):
"""
studentID is the only mandatory field.
Everything else is optional.
"""
self.studentID = studentID
self.description = description
self.deadline = deadline
self.grade = grade
def __repr__(self):
message = "Student with ID=%s has the following assignment:\n" % Color.strong(self.studentID)
message += "\t%s: %s\n" % (Color.bold("Description"), self.description)
message += "\t%s: %s\n" % (Color.bold("Deadline"), self.deadline)
message += "\t%s: %.2f\n" % (Color.bold("Grade"), self.grade)
return message
# SET STUFF
def setDescription(self, newDescription):
"""
Method sets the description of the current assignment.
"""
self.description = newDescription
def setDeadline(self, newDeadline):
"""
Method sets the deadline of the current assignment.
"""
self.deadline = newDeadline
def setGrade(self, newGrade):
"""
Method sets the grade of the current assignment.
"""
self.grade = newGrade
# GET STUFF
def getStudentID(self):
"""
Method returns the studentID of the current assignment.
"""
return self.studentID
def getDescription(self):
"""
Method returns the description of the current assignment.
"""
return self.description
def getDeadline(self):
"""
Method returns the deadline of the current assignment.
"""
return self.deadline
def getGrade(self):
"""
Method returns the grade of the current assignment.
"""
return self.grade
|
"""
Module implements the Color class, which allows
for prettier output.
"""
class Color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def bold(s):
"""
Method takes a string and
makes it bold.
"""
return '%s%s%s' % (Color.BOLD, str(s), Color.END)
def error(s):
"""
Method takes a string and
makes it bold and red.
"""
return '%s%s%s' % (Color.RED, bold(s), Color.END)
|
#!/bin/python3
from collections import deque
from math import sin, cos, sqrt, exp, pi
from random import random, uniform
import matplotlib.pyplot as plt
import logging
"""
https://en.wikipedia.org/wiki/Particle_swarm_optimization
Let S be the number of particles in the swarm, each having a position xi ∈ ℝn in the search-space and a velocity vi ∈ ℝn. Let pi be the best known position of particle i and let g be the best known position of the entire swarm. A basic PSO algorithm is then:
for each particle i = 1, ..., S do
Initialize the particle's position with a uniformly distributed random vector: xi ~ U(blo, bup)
Initialize the particle's best known position to its initial position: pi ← xi
if f(pi) < f(g) then
update the swarm's best known position: g ← pi
Initialize the particle's velocity: vi ~ U(-|bup-blo|, |bup-blo|)
while a termination criterion is not met do:
for each particle i = 1, ..., S do
for each dimension d = 1, ..., n do
Pick random numbers: rp, rg ~ U(0,1)
Update the particle's velocity: vi,d ← ω vi,d + φp rp (pi,d-xi,d) + φg rg (gd-xi,d)
Update the particle's position: xi ← xi + vi
if f(xi) < f(pi) then
Update the particle's best known position: pi ← xi
if f(pi) < f(g) then
Update the swarm's best known position: g ← pi
"""
class Problem:
functions = {
'McCormick': {
'xMin': -1.5,
'xMax': 4,
'xLabel': -1.4,
'yMin': -3,
'yMax': 4,
'yLabel': 3.2,
'population': 100,
'maxIterations': 500,
'omega': 0.01,
'phiP': 0.06,
'phiG': 0.03,
'particleSize': 4,
'f': lambda p: sin(p[0] + p[1]) + (p[0] - p[1])**2.0 - 1.5 * p[0] + 2.5 * p[1] + 1.0
},
'Cross-in-tray': {
'xMin': -10,
'xMax': 10,
'xLabel': -9.7,
'yMin': -10,
'yMax': 10,
'yLabel': 7.7,
'population': 100,
'maxIterations': 1000,
'omega': 0.005,
'phiP': 0.025,
'phiG': 0.025,
'particleSize': 3,
'f': lambda p: -0.0001 * (abs(sin(p[0]) * sin(p[1]) * exp(100 - sqrt(p[0]**2 + p[1]**2) / pi)) + 1) ** 0.1
},
'Eggholder': {
'xMin': -512,
'xMax': 512,
'xLabel': -500,
'yMin': -512,
'yMax': 512,
'yLabel': 410,
'population': 500,
'maxIterations': 1000,
'omega': 0.15,
'phiP': 0.01,
'phiG': 0.01,
'particleSize': 3,
'f': lambda p: -(p[1] + 47) * sin(sqrt(abs(p[0] / 2 + (p[1] + 47)))) - p[0] * sin(sqrt(abs(p[0] - (p[1] + 47))))
},
'Easom': {
'xMin': -100,
'xMax': 100,
'xLabel': -96,
'yMin': -100,
'yMax': 100,
'yLabel': 75,
'population': 100,
'maxIterations': 1000,
'omega': 0.4,
'phiP': 0.005,
'phiG': 0.015,
'particleSize': 4,
'f': lambda p: -cos(p[0]) * cos(p[1]) * exp(-((p[0] - pi)**2 + (p[1] - pi)**2))
},
'Rastrigin': {
'xMin': -5.12,
'xMax': 5.12,
'xLabel': -5,
'yMin': -5.12,
'yMax': 5.12,
'yLabel': 4.1,
'population': 100,
'maxIterations': 1000,
'omega': 0.4,
'phiP': 0.005,
'phiG': 0.015,
'particleSize': 2,
'f': lambda p: 10 * 2 + p[0]**2 - 10 * cos(2 * pi * p[0]) + p[1]**2 - 10 * cos(2 * pi * p[1])
},
}
function = 'McCormick'
config = functions[function]
xBound = abs(config['xMax'] - config['xMin'])
yBound = abs(config['yMax'] - config['yMin'])
iterationsShown = 10
clearBetweenIterations = False
saveFrames = False
def fitness(position):
return Problem.config['f'](position)
class Particle:
def __init__(self, swarm=None):
self.setRandomPosition()
self.setRandomVelocity()
self.bestPosition = self.position
self.swarm = swarm
def __repr__(self):
return "Particle(%.2f, %.2f)" % self.position
def setRandomPosition(self):
x = uniform(Problem.config['xMin'], Problem.config['xMax'])
y = uniform(Problem.config['yMin'], Problem.config['yMax'])
self.position = (x, y)
def setRandomVelocity(self):
dx = uniform(-Problem.xBound, Problem.xBound)
dy = uniform(-Problem.yBound, Problem.yBound)
self.velocity = (dx, dy)
def updateVelocity(self, i, p, g):
newComponent = Problem.config['omega'] * self.velocity[i] \
+ Problem.config['phiP'] * p * (self.bestPosition[i] - self.position[i]) \
+ Problem.config['phiG'] * g * \
(self.swarm.bestPosition[i] - self.position[i])
if i == 0:
self.velocity = (newComponent, self.velocity[1])
else:
self.velocity = (self.velocity[0], newComponent)
def updatePosition(self):
newX = max(min(self.position[0] + self.velocity[0],
Problem.config['xMax']), Problem.config['xMin'])
newY = max(min(self.position[1] + self.velocity[1],
Problem.config['yMax']), Problem.config['yMin'])
self.position = (newX, newY)
def updateBestPosition(self):
self.bestPosition = self.position
class SwarmPlot:
def __init__(self, swarm):
self.swarm = swarm
self.currentlyPlotted = {
'iterations': deque([]),
'text': None
}
def clearOldPoints(self):
iterations = self.currentlyPlotted['iterations']
if len(iterations) > Problem.iterationsShown:
iterations[0].remove()
iterations.popleft()
def plotCurrentIteration(self):
plt.axis([Problem.config['xMin'], Problem.config['xMax'],
Problem.config['yMin'], Problem.config['yMax']])
particles = self.swarm.particles
xs = list(map(lambda particle: particle.position[0], particles))
ys = list(map(lambda particle: particle.position[1], particles))
newIteration = plt.scatter(xs, ys, s=Problem.config['particleSize'])
self.currentlyPlotted['iterations'].append(newIteration)
def plotText(self, iteration):
bestPosition = self.swarm.bestPosition
bestFitness = Problem.config['f'](bestPosition)
figureText = 'Iteration %i\n' % iteration + \
'Best particle: (%.5f, %.5f)\n' % bestPosition + \
'Best fitness: %.6f' % bestFitness
self.currentlyPlotted['text'] = plt.text(
Problem.config['xLabel'],
Problem.config['yLabel'],
figureText,
bbox=dict(facecolor='blue', alpha=0.4),
fontsize=10,
family="Ubuntu Mono")
def clearOldText(self):
if self.currentlyPlotted['text'] is not None:
self.currentlyPlotted['text'].remove()
def plotEverything(self, iteration):
if Problem.clearBetweenIterations:
plt.clf()
self.plotCurrentIteration()
self.clearOldPoints()
self.clearOldText()
self.plotText(iteration)
if iteration == 1:
plt.tight_layout()
if Problem.saveFrames:
plt.savefig("img/%s-%i.png" % (Problem.function, iteration), dpi=200,
orientation='landscape')
plt.pause(0.0001)
class Swarm:
def __init__(self, population):
self.particles = [Particle(swarm=self) for _ in range(population)]
self.bestPosition = min([x.position for x in self.particles],
key=lambda x: Problem.fitness(x))
self.plot = SwarmPlot(self)
def simulate(self):
for iteration in range(1, 1 + Problem.config['maxIterations']):
logging.info("ITERATION #%i" % iteration)
self.plot.plotEverything(iteration)
for particle in self.particles:
for dimension in range(2):
p, g = random(), random()
particle.updateVelocity(dimension, p, g)
particle.updatePosition()
if Problem.fitness(particle.position) < Problem.fitness(particle.bestPosition):
particle.updateBestPosition()
if Problem.fitness(particle.bestPosition) < Problem.fitness(self.bestPosition):
self.bestPosition = particle.bestPosition
logging.info("CURRENT SOLUTION: (%.3f, %.3f)" % self.bestPosition)
def main():
logging.basicConfig(level=logging.INFO)
swarm = Swarm(Problem.config['population'])
swarm.simulate()
logging.info("Found best position = (%.6f, %.6f) " % swarm.bestPosition +
"after %i iterations" % Problem.config['maxIterations'])
if __name__ == '__main__':
main()
|
a=[{"maths":90,"science":40,"marathi":70},{"maths":40,"science":50,"marathi":60},
{"maths":50,"science":60,"marathi":70}]
subject=(input("enter a subject: "))
i=0
while i<len(a):
k=a[i]
if subject in k:
print(a[i][subject],k)
i=i+1 |
#!/usr/bin/python
"""
Starter code for exploring the Enron dataset (emails + finances);
loads up the dataset (pickled dict of dicts).
The dataset has the form:
enron_data["LASTNAME FIRSTNAME MIDDLEINITIAL"] = { features_dict }
{features_dict} is a dictionary of features associated with that person.
You should explore features_dict as part of the mini-project,
but here's an example to get you started:
enron_data["SKILLING JEFFREY K"]["bonus"] = 5600000
"""
import pickle
enron_data = pickle.load(open("../final_project/final_project_dataset.pkl", "r"))
### My code starts here ###
print "Dataset length: ", len(enron_data)
print "Number of features: ", len(enron_data.values()[0])
print "First person data: ", enron_data.values()[0]
# Count the number of POI:
# person_name - key
num_of_poi = 0
for person_name in enron_data:
if (enron_data[person_name]['poi'] == 1):
num_of_poi += 1
print "Number of POI: ", num_of_poi
# QUERY THE DATASET - 1
# Total value of the stock belonging to James Prentice
person_name = 'PRENTICE JAMES'
parameter = 'total_stock_value'
print "James Prentice stock: ", enron_data[person_name][parameter]
# QUERY THE DATASET - 2
# How many email messages do we have from Wesley Colwell to person of interest?
person_name = 'COLWELL WESLEY'
parameter = 'from_this_person_to_poi'
print "Messages from Wesley Colwell to PoI: ", enron_data[person_name][parameter]
# QUERY THE DATASET - 3
# The value of stock options exercised by Jeffrey K Skilling?
person_name = 'SKILLING JEFFREY K'
parameter = 'exercised_stock_options'
print "Value of stock options exercised by Jeffrey K Skilling: ", enron_data[person_name][parameter]
# Who took home the most money?
person_name = 'FASTOW ANDREW S'
parameter = 'total_payments'
print "Andy Fastow: ", enron_data[person_name][parameter]
person_name = 'SKILLING JEFFREY K'
print "Jeffrey Skilling: ", enron_data[person_name][parameter]
person_name = 'LAY KENNETH L'
print "Ken Lay: ", enron_data[person_name][parameter]
# Count valid emails
valid_emails = 0
for p in enron_data:
if enron_data[p]['email_address'] != 'NaN':
valid_emails += 1
print "Valid emails: ", valid_emails
# Count quantified salaries
quantified_salaries = 0
for p in enron_data:
if enron_data[p]['salary'] != 'NaN':
quantified_salaries += 1
print "Quantified salaries: ", quantified_salaries
# 1. Persons who has a NaN value for their total payments
# 2. Count percentage of people from dataset as a whole
q_total_payments = 0
q_total = 0
for p in enron_data:
q_total += 1
if enron_data[p]['total_payments'] == 'NaN':
q_total_payments += 1
print "NaN for total payments have: ", q_total_payments
print "NaN for total payments have (%): ", (float(q_total_payments)/float(q_total))*100
# 1. POI who has a NaN value for their total payments
# 2. Count percentage of POI from dataset as a whole
num_of_poi = 0
for p in enron_data:
if enron_data[p]['poi'] == 1:
if enron_data[p]['total_payments'] == 'NaN':
num_of_poi += 1
print "Number of POI: ", num_of_poi
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
counter = 100 # 赋值整型变量
miles = 1000.0 # 浮点型
name = "John" # 字符串
print(counter);
print(miles);
print(name);
list = ['runoob', 786, 2.23, 'john', 70.2]
tinylist = [123, 'john']
print (list); # 输出完整列表
print (list[0]); # 输出列表的第一个元素
print(list[1:3]); # 输出第二个至第三个的元素
print (list[2:]) # 输出从第三个开始至列表末尾的所有元素
print (tinylist * 2); # 输出列表两次
print (list + tinylist); # 打印组合的列表
print(int(123.5)); |
print("*************İleri Seviye Kümeler*************")
liste=[1,2,3,4,5,6,7,8,9,1,2,3,5,6,1,2,3]
küme={1,2,3,4,5,6,7}
kümedönüsüm=set(liste)
print(küme)
pythonküme=set("Python Programlama Dili")
print(pythonküme)
#kümeler her karakteri bir tane olacak şekilde oluşur ve kümelerin elemanlarına direkt erişilemez
# print(küme[1]) bu kullanım küme indeksine direkt ulaşmak istediği için hata verecektir
#kümelerin elemanları arasında döngüler ile gezilebilir veya
listeküme=list(küme)
print(listeküme[1])
#böye ulaşabiliriz burada kümeyi tip dönüşümü ile bir liste veri tipine dönüştürüdük
#ve sonra oluşturduğumuz liste üzerinden elemana eriştik
print("*************add Metodu*************")
pythonküme.add("Yeni Eklenen Eleman")
print(pythonküme)
pythonküme.add("Ahmet")
print(pythonküme)
#burada alacağımız çıktılar farklı olacaktır çünkü kümelerde indeksleme olmadığı için
#çıktı her zaman farklı olur sıralı olmaz
print("*************differance Metodu*************")
print(kümedönüsüm.difference(küme))#bu satırın manası kümedönüsümün kümeden farkı
#yani differance metodu uygulanan kümenin girilen parametre kümeden farkını gösterir
print("*************differance-update Metodu*************")
kümea={1,2,3,4,5,6,7,8,9}
kümeb={1,2,5,6,9,7,"Ahmet",11,56}
kümeb.difference_update(kümea)
kümea.difference_update(kümeb)
print(kümea)
print(kümeb)
#burada ise birinci kümenin ikinci kümeden farkını alıp birinci kümeyi bu farka göre
#güncelleyen differance-update kullanımı bulunmakta
print("*************intersection Metodu*************")
küme1={1,2,3,4,5,6,7,8,9}
küme2={1,2,5,6,9,7,"Ahmet",11,56}
print(küme1.intersection(küme2))
#intersection metodu ise girilen iki kümenin kesişimini geri dönderir
print("*************intersection-update Metodu*************")
küme1.intersection_update(küme2)
print(küme1)#burada ise intersection-update metodu iki kümenin kesişimini bulup
#birinci kümeyi kesişim ile günceller
print("*************isdisjoint Metodu*************")
kümeisim={"Esat","Sait","Yusuf","Belgin","Ahmet"}
print(küme1.isdisjoint(kümeisim))
print(küme2.isdisjoint(kümeisim))
#isdisjoint fonksiyonu ise verilen iki kümenin kesişim kümesi boşmu yani ayrık
#kümemi olup omadığını kontrol eder yani kesişim kümesi boş ise eğer kümeler ayrık
#küme ise True değer dönderir fakat kesişim kümesi var ise yani Ayrık küme Değil
#ise False değer dönderir
print("*************issubset Metodu*************")
kümealt={"Sait","Yusuf","Ahmet"}
kümealt1={"Yusuf","Sevde","Merve"}
kümealt2={1,2,3,4,5,6,7,8,9}
print(kümealt.issubset(kümeisim))
print(kümealt.issubset(kümeisim))
print(kümealt.issubset(kümealt2))
#issubset metodu ise birinci kümenin ikinci kümenin alt kümesi olup olmadığını
#kontrol eder eğer alt kümesi ise True değer alt kümesi değil ise False
#değer Dönderir
print("*************Union Metodu*************")
kümebirlesim=kümealt.union(kümealt1.union(kümealt2))
print(kümebirlesim)
#burada ise kümealt1 ile küme alt 2 nin birleşimini kümealt küme alt ile tekrardan
#birleştirdik,yani union metodu girilen iki kümenin birleşimini geri dönderir ve ikinvi
#fonksiyona başka bir birleşim gönderebilriz bu sayede birden çok küme birleştirebilriz
|
# -*- coding: utf-8 -*-
# Reading input file: dev.in
import sys
file_location = sys.argv[1]
words = list()
labels = list()
word_seq = list()
label_seq = list()
"""The following read file method allow us to get file input and store each sentence into
a sublist"""
def read(file, word_seq, label_seq):
words = list()
labels = list()
with open(file) as infile:
for line in infile:
if line == '\n':
# print(line)
word_seq.append(words)
label_seq.append(labels)
words = list()
labels = list()
else:
word, label = line.strip().split(' ')
print(word)
words.append(word)
labels.append(label)
# print(line)
read(file_location, word_seq, label_seq)
#print(word_seq)
#print(label_seq)
#####################################################################
"""write file function"""
|
def is_even_number(number):
return number % 2 == 0
def do_next_step(number):
if is_even_number(number):
return number / 2
else:
return number * 3 + 1
def count_steps(number):
count_of_steps = 0
while number != 1:
number = do_next_step(number)
count_of_steps += 1
return count_of_steps
def generate_range(start, end):
result = {}
for x in range(start, end + 1):
result[x] = 0
return result
def get_count_of_steps_for_every_number(start, end):
result = generate_range(start, end)
for x in range(start, end + 1):
result[x] = count_steps(x)
return result
def find_the_highest_count_of_steps(actual_range, start, end):
result = [0, 0]
for x in range(start, end + 1):
if result[0] < actual_range[x]:
result[0] = actual_range[x]
result[1] = x
return str(result[0]) + " " + str(result[1])
def read_line_from_file(path_to_input_file, number_of_line):
file = open(path_to_input_file)
line = ""
for x in range(0, number_of_line):
line = file.readline()
return line
def parse_range(line):
return [int(line[0:line.index(' ')]), int(line[line.index(' ') + 1:])]
def generate_output_file(path_to_input_file):
first_line = read_line_from_file(path_to_input_file, 1)
output_file = open('output.txt', 'w')
count_of_inputs = int(first_line[:first_line.index("\n")])
result = ""
for x in range(2, count_of_inputs + 2):
int_range = parse_range(read_line_from_file("test_input.txt", x))
counted_range = get_count_of_steps_for_every_number(int_range[0], int_range[1])
result += find_the_highest_count_of_steps(counted_range, int_range[0], int_range[1])
if not x + 1 == count_of_inputs + 2:
result += "\n"
output_file.write(result)
return result
|
from torch import nn
import torch.nn.functional as F
def _reduce(x, reduction='elementwise_mean'):
if reduction is 'none':
return x
elif reduction is 'elementwise_mean':
return x.mean()
elif reduction is 'sum':
return x.sum()
else:
raise ValueError('No such reduction {} defined'.format(reduction))
class MSELoss(nn.Module):
"""
Computes the weighted mean squared error loss.
The weight for an observation x:
.. math::
w = 1 + confidence \\times x
and the loss is:
.. math::
\ell(x, y) = w \cdot (y - x)^2
Args:
confidence (float, optional): the weighting of positive observations.
reduction (string, optional): Specifies the reduction to apply to the output:
'none' | 'elementwise_mean' | 'sum'. 'none': no reduction will be applied,
'elementwise_mean': the sum of the output will be divided by the number of
elements in the output, 'sum': the output will be summed. Default: 'elementwise_mean'
"""
def __init__(self, confidence=0, reduction='elementwise_mean'):
super(MSELoss, self).__init__()
self.reduction = reduction
self.confidence = confidence
def forward(self, input, target):
weights = 1 + self.confidence * (target > 0).float()
loss = F.mse_loss(input, target, reduction='none')
weighted_loss = weights * loss
return _reduce(weighted_loss, reduction=self.reduction)
class MultinomialNLLLoss(nn.Module):
"""
Computes the negative log-likelihood of the multinomial distribution.
.. math::
\ell(x, y) = L = - y \cdot \log(softmax(x))
Args:
reduction (string, optional): Specifies the reduction to apply to the output:
'none' | 'elementwise_mean' | 'sum'. 'none': no reduction will be applied,
'elementwise_mean': the sum of the output will be divided by the number of
elements in the output, 'sum': the output will be summed. Default: 'elementwise_mean'
"""
def __init__(self, reduction='elementwise_mean'):
super(MultinomialNLLLoss, self).__init__()
self.reduction = reduction
def forward(self, input, target):
loss = - target * F.log_softmax(input, dim=1)
return _reduce(loss, reduction=self.reduction)
|
import scipy.stats as stats
# value of interest
# change this
x = 3
# sample size
# change this
n = 10
# calculate probability
prob_1 = stats.binom.pmf(x, n, 0.5)
print(prob_1)
## Question 2
## seven heads out of 20 fair coin flips
prob_2 = stats.binom.pmf(7, 20, 0.5)
print(prob_2)
|
import re
print("INFORMACION PERSONAL")
while True:
print("Como se llama?")
nom = input()
res = re.findall(r'[0-9]+', nom)
res = ','.join(map(str, res))
if res.isalnum() is False:
break
print("Cual es su fecha de nacimiento?")
date = input()
print("Cual es su direccion?")
add = input()
print("cuales son sus metas personales?")
goal = input()
print(f"-nombre: {nom}")
print(f"-Fecha de Nacimiento: {date}")
print(f"-Direccion: {add}")
print(f"-Metas Personales: {goal}")
|
# -*- coding: UTF-8 -*-
import sys
import copy
from random import choice
class Jungle(object):
def __init__(self, player = 0, pieces = None):
self.board = self.Initial_board()
if pieces is None:
self.pieces = self.Initial_pieces()
else:
self.pieces = pieces
self.player = player
self.last_beat = 0
self.players_pieces = {0: ['r', 'c', 'd', 'w', 'j', 't', 'l', 'e'],
1: ['R', 'C', 'D', 'W', 'J', 'T', 'L', 'E']}
self.hierarchy = {}
i = 0
for e in self.players_pieces[0]:
self.hierarchy[e] = i
i += 1
i = 0
for e in self.players_pieces[1]:
self.hierarchy[e] = i
i += 1
def Initial_board(self):
board = '''
..#*#..
...#...
.......
.~~.~~.
.~~.~~.
.~~.~~.
.......
...#...
..#*#..
'''
return [list(e.strip()) for e in board.split()]
def Initial_pieces(self):
pieces = '''
L.....T
.D...C.
R.J.W.E
.......
.......
.......
e.w.j.r
.c...d.
t.....l
'''
return [list(e.strip()) for e in pieces.split()]
def find_piece(self, tpl):
x, y = tpl
return self.pieces[x][y]
def find_place(self, piece):
for i in range(len(self.pieces)):
for j in range(len(self.pieces[i])):
if self.pieces[i][j] == piece:
return (i, j)
def Check_draw(self):
if self.last_beat == 30:
return True
else:
return False
def Check_victory(self):
if self.pieces[0][3] != '.':
return 0
elif self.pieces[8][3] != '.':
return 1
else:
pieces_left = set()
for i in range(len(self.pieces)):
for j in range(len(self.pieces[i])):
if self.pieces[i][j] != '.':
pieces_left.add(self.pieces[i][j])
if all(e not in pieces_left for e in self.players_pieces[0]):
return '1 won'
elif all(e not in pieces_left for e in self.players_pieces[1]):
return '0 won'
else:
return False
def Win_when_draw(self):
player_0 = self.players_pieces[0]
player_1 = self.players_pieces[1]
player_1_have = set()
player_0_have = set()
for i in range(len(self.pieces)):
for j in range(len(self.pieces[i])):
if self.pieces[i][j] in player_0:
player_0_have.add(self.pieces[i][j])
if self.pieces[i][j] in player_1:
player_1_have.add(self.pieces[i][j])
player_0.reverse()
player_1.reverse()
i = 0
while i < 8:
if player_1[i] not in player_1_have and player_0[i] in player_0_have:
return '0 won'
if player_0[i] not in player_0_have and player_1[i] in player_1_have:
return '1 won'
i += 1
return '1 won'
def Where_I_land(self, start, move):
x, y = start
x1, y1 = move
x += x1
y += y1
if x >= 9 or y >= 7 or x < 0 or y < 0:
return False
return (x,y)
def Move_LorT(self, start, move):
x, y = self.Where_I_land(start, move)
if self.board[x][y] == '~':
if self.pieces[x][y] != 'r' and self.pieces[x][y] != 'R':
return self.Move_LorT((x,y), move)
else: return None
else:
return (x,y)
def moves(self, player):
posible_moves = [(i, j) for i in [-1, 1, 0] for j in [-1, 1, 0] if
(i == 0 or j == 0) and not (i == 0 and j == 0)]
pieces_to_move = self.players_pieces[player]
res = {}
for e in pieces_to_move:
res[e] = set()
for i in range(len(self.board)):
for j in range(len(self.board[i])):
if self.pieces[i][j] in pieces_to_move:
for e in posible_moves:
new_poz = self.Where_I_land((i,j), e)
if new_poz:
# print(new_poz)
x, y = new_poz
whereIland = self.board[x][y]
piece = self.pieces[i][j]
if whereIland == '.' or whereIland == '#':
if (piece == 'r' or piece == 'R') and self.board[i][j] == '~' and self.pieces[x][y] != '.':
pass
else:
res[piece].add(new_poz)
elif whereIland == '*':
if player == 0 and i < 5:
res[piece].add(new_poz)
elif player == 1 and i > 5:
res[piece].add(new_poz)
elif whereIland == '~':
if piece == 'r' or piece == 'R':
if self.board[i][j] == '.' and self.board[x][y] == '~' and self.pieces[x][y] != '.':
pass
else:
res[piece].add(new_poz)
'''if piece == 't' or piece == 'l':
new_poz = self.Move_LorT((i,j), e, 'R')
if new_poz is not None:
res[piece].add(new_poz)
if piece == 'T' or piece == 'L':
new_poz = self.Move_LorT((i, j), e, 'r')
if new_poz is not None:
res[piece].add(new_poz)'''
if piece == 't' or piece == 'T' or piece == 'l' or piece == 'L':
new_poz = self.Move_LorT((i, j), e)
if new_poz is not None:
res[piece].add(new_poz)
true_res = copy.deepcopy(res)
for e in res:
for f in res[e]:
x1, y1 = f
if self.pieces[x1][y1] != '.':
if self.pieces[x1][y1] in pieces_to_move:
true_res[e] -= set([f])
elif self.board[x1][y1] != '#':
if self.hierarchy[e] == 0 and self.hierarchy[self.pieces[x1][y1]] == 7:
pass
elif self.hierarchy[e] < self.hierarchy[self.pieces[x1][y1]]:
true_res[e] -= set([f])
if all(true_res[e] == set() for e in true_res):
return None
return true_res
def do_move(self, what, where):
self.player = (self.player + 1) % 2
if what == (-1, -1) and where == (-1,-1):
return
x,y = where
x1, y1 = self.find_place(what)
self.pieces[x1][y1] = '.'
if self.pieces[x][y] != '.':
self.last_beat = 0
else:
self.last_beat += 1
self.pieces[x][y] = what
if self.Check_victory() != False:
return 'won ' + self.Check_victory().__str__()
elif self.Check_draw():
return self.Win_when_draw()
else:
return
class Player:
def __init__(self):
self.reset()
def reset(self):
self.game = Jungle()
self.player = 1 # ustawie jak sie dowiem ktory jestem
self.say('RDY')
def say(self, what):
sys.stdout.write(what)
sys.stdout.write('\n')
sys.stdout.flush()
def hear(self):
line = sys.stdin.readline().split()
return line[0], line[1:]
def loop(self):
while True:
cmd, args = self.hear()
if cmd == 'HEDID':
start = tuple(int(m) for m in args[2:4])
ys, xs = start
move = tuple(int(m) for m in args[4:])
yd, xd = move
self.game.do_move(self.game.find_piece((xs, ys)), (xd,yd))
elif cmd == 'ONEMORE':
self.reset()
continue
elif cmd == 'BYE':
break
else: # 'UGO'
self.player = 0
my_moves = self.game.moves(self.player)
if my_moves is None:
xs, ys, xd, yd = -1,-1,-1,-1
else:
with_what = choice([e for e in my_moves])
while my_moves[with_what] == set():
with_what = choice([e for e in my_moves])
xs, ys = self.game.find_place(with_what)
xd, yd = choice(list(my_moves[with_what]))
self.game.do_move(with_what, (xd, yd))
print('IDO %d %d %d %d' % (ys,xs,yd,xd), with_what, file=open('test.txt', 'w'))
self.say('IDO %d %d %d %d' % (ys,xs,yd,xd))
if __name__ == '__main__':
player = Player()
player.loop() |
"""
Input consists of movement commands. Calculate your final position after responding
to each command. Start frm zero. Report depth multiplied by horizontal position.
Part 1: Commands are:
- forward x: increase horizontal position by x units
- down x: increase depth by x units
- up x: decrease depth by x units
Part 2: COmamnds are:
- down X: increases your aim by X units.
- up X decreases your aim by X units.
- forward X does two things:
- It increases your horizontal position by X units.
- It increases your depth by your aim multiplied by X.
"""
pos = 0
depth = 0
with open('2021/data/day02') as f:
for line in f:
cmd, ns = line.split()
n = int(ns)
if cmd == 'forward':
pos+= n
elif cmd == 'down':
depth+= n
elif cmd == 'up':
depth-= n
print('Part 1: %d' % (depth * pos))
pos = 0
depth = 0
aim = 0
with open('2021/data/day02') as f:
for line in f:
cmd, ns = line.split()
n = int(ns)
if cmd == 'forward':
pos+= n
depth+= (aim * n)
elif cmd == 'down':
aim+= n
elif cmd == 'up':
aim-= n
print('Part 2: %d' % (depth * pos))
# Part 1: 2147104
# PPart 2: 2044620088 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import math
# f=abs
# print f(-20)
# def add(x,y,f):
# return f(x)+f(y)
# print add(-3,-9,abs)
# print add(25,9,math.sqrt)
# def f(x):
# return x*x
# print map(f,[1,2,3,4,5,6,7,8,9])
#假设用户输入的英文名字不规范,没有按照首字母大写,后续字母小写的规则,请利用map()函数,
#把一个list(包含若干不规范的英文名字)变成一个包含规范英文名字的list
# def f(L):
# return L[0].upper()+L[1:].lower()
# print map(f,['adam', 'LISA', 'barT'])
# #请利用reduce()来求积
# def prod(x, y):
# return x*y
# print reduce(prod, [2, 4, 5, 7, 12])
# #取出一个list中的奇数
# def is_odd(x):
# if x%2==1:
# return x
# print filter(is_odd,[1,4,6,7,9,12,17])
# #利用filter()过滤出1~100中平方根是整数的数,即结果应该是:[1,4,9,16,25,36,49,64,81,100]
# def is_sqr(x):
# r = int(math.sqrt(x)) #取出x的平方根的整数
# return r*r== x #返回这个整数的平方!!!
# print filter(is_sqr,range(1, 101)) #整数的平方的范围是1~101,不包含101
# def t(s1,s2):
# u1 = s1.upper()
# u2 = s2.upper()
# return u1,u2
# print t('bob','about')
# #对字符串排序时,有时候忽略大小写排序更符合习惯.请利用sorted()高阶函数,实现忽略大小写排序的算法.
# #分析:对于比较函数cmp_ignore_case(s1,s2),要忽略大小写比较,就是先把两个字符串都变成大写(或者都变成小写),再进行比较
# def cmp_ignore_case(s1,s2):
# u1 = s1.upper()
# u2 = s2.upper()
# if u1 >u2:
# return 1
# if u1<u2:
# return -1
# return 0
# print sorted(['bob','about','Zoo','Credit'],cmp_ignore_case)
# def calc_prod(lst):
# def lazy_prod():
# def f(x,y):
# return x*y
# return reduce(f,lst,1)
# return lazy_prod()
#
# f = calc_prod([1, 2, 3, 4])
# print f
# myabs = lambda x:-x if x<0 else x
# print myabs(-1)
#
#
#匿名函数
# def is_not_empty(s):
# return s and len(s.strip()) > 0
# print filter(is_not_empty, ['test', None, '', 'str', ' ', 'END'])
#
#
# print filter(lambda s:s and len(s.strip()) >0,['test', None, '', 'str', ' ', 'END'])
#
# print sorted([1,3,9,5,0],lambda x,y:-cmp(x,y))
#装饰器
# def f1(x):
# print 'call f(1)'
# return x*2
# print f1(5)
# def f1(x):
# return x*2
# def new_fn(f): #new_fn()就是一个装饰器函数,它既包含了原函数的调用,又有一行日志来增强原函数的功能
# def fn(x):
# print 'call ' +f.__name__+'()'
# return f(x)
# return fn
# #调用new_fn()装饰器函数
# g1= new_fn(f1)
# print g1(6)
# import time
# def performance(f):
# def fn(*args,**kw):
# print time.strftime('%Y-%m-%d,%H:%M:%S',time.localtime(time.time()))
# return f(*args,**kw)
# return fn
#
# @performance
# def factorial(n):
# return reduce(lambda x,y: x*y, range(1, n+1))
#
# print factorial(10)
# L=[75,98,59,81,66,43,69,85]
# sum=0.0
# n=0
# for x in L:
# if x<60:
# continue
# sum=sum+x
# n=n+1
# print sum/n
#
#
#
L=[75,98,59,81,66,43,69,85]
sum=0.0
n=0
for x in L:
if x>=60:
sum=sum+x
n=n+1
print sum/n
for x in [1,2,3,4,5,6,7,8,9]:
for y in [0,1,2,3,4,5,6,7,8,9]:
if x<y:
print (x*10+y)
|
'''
Generates dataframe for multiple tickers
input -- ticker.csv
output -- dataframe with each output column representing one ticker
'''
import pandas as pd
import matplotlib.pyplot as plt
def test_run():
''' Scan through the symbols'''
start_date='2010-01-22'
end_date='2015-02-10'
# generate date range
dates=pd.date_range(start_date,end_date)
df=pd.DataFrame(index=dates)
# gget spy data and organize it
symbol='SPY'
dfSPY=pd.read_csv("data/{}.csv".format(symbol),index_col="Date",parse_dates=True,
usecols=['Date','Adj Close'],
na_values=['nan'])
dfSPY=dfSPY.rename(columns={'Adj Close':'SPY'})
# joine the two sets
df=df.join(dfSPY,how='inner') # with inner added, it allows you to remove nan
symbols=['GOOG','AAPL','GLD','A']
for symbol in symbols:
dfTemp=pd.read_csv("data/{}.csv".format(symbol),index_col="Date",parse_dates=True,
usecols=['Date','Adj Close'],
na_values=['nan'])
dfTemp=dfTemp.rename(columns={'Adj Close':symbol})
# joine the two sets
df=df.join(dfTemp,how='inner') # with inner added, it allows you to remove nan
#print(df1)
return df
def plot_stocks(df,title='Stock Prices'):
''' plot stocks '''
ax=df.plot(title=title,fontsize=2)
ax.set_xlabel('Dates')
ax.set_ylabel('Prices')
plt.show()
def normalize_data(df):
''' scale all prices to so we can have relative values '''
df=df/df.ix[0,:]
return df
if __name__=="__main__":
df=test_run()
df=normalize_data(df)
plot_stocks(df) |
d = {"immutable":"cannot change","mutable":"can change","ascertain":"make sure of something","fugitive":"foreigner"}
print(d)
i = input("Enter any word from dict:\n")
print(d[i])
|
### A professor with two assistants, Jamie and Drew, wants an attendance list of the students, in the order that they arrived in the classroom.
# Drew was the first one to note which students arrived, and then Jamie took over.
# After the class, they each entered their lists into the computer and emailed them to the professor, who needs to combine them into one, in the order of each student's arrival.
# Jamie emailed a follow-up, saying that her list is in reverse order.
# Complete the steps to combine them into one list as follows:
# the contents of Drew's list, followed by Jamie's list in reverse order, to get an accurate list of the students as they arrived.
# Function defined. This function takes lists as arguments.
def combine_lists(list1, list2):
# A new_list variable as an empty list is created to store a new list as per given instruction.
new_list=[]
# list2 i.e. Drews_list is added to the new_list using extend() method of list. I takes another list as argument and add that list to currrent variable.
new_list.extend(list2)
# For second list i.e. Jamies_list, firstly it should be reversed, so reverse() method of string is used.
list1.reverse()
# Later Jamies_list is also added using the same extend() method.
new_list.extend(list1)
# Finally the new_list is returned as function's return statement.
return new_list
# Function call
Jamies_list = ["Alice", "Cindy", "Bobby", "Jan", "Peter"]
# Function call
Drews_list = ["Mike", "Carol", "Greg", "Marcia"]
print(combine_lists(Jamies_list, Drews_list))
|
### Use a dictionary to count the frequency of letters in the input string.
# Only letters should be counted, not blank spaces, numbers, or punctuation.
# Upper case should be considered the same as lower case.
# For example, count_letters("This is a sentence.") should return {'t': 2, 'h': 1, 'i': 2, 's': 3, 'a': 1, 'e': 3, 'n': 2, 'c': 1}.
# Function defined
def count_letters(text):
# An empty dictionary 'result' to store final result.
result = {}
# Each letter in text is iterated.
for letter in text:
# Convert each letter to lower case as sugessted.
letter=letter.lower()
# Now check if the letter is an alphabet of not.
if letter.isalpha()==True:
# If yes then check if it is present in result dictionary.
if letter not in result:
# If not present in result dictionary then intialize it in result dictionary.
result[letter]=0
# If already present in result dictionary then increament its value by one.
result[letter]+=1
# Finally return the result.
return result
# Function call
print(count_letters("AaBbCc"))
# Should be {'a': 2, 'b': 2, 'c': 2}
# Function call
print(count_letters("Math is fun! 2+2=4"))
# Should be {'m': 1, 'a': 1, 't': 1, 'h': 1, 'i': 1, 's': 1, 'f': 1, 'u': 1, 'n': 1}
# Function call
print(count_letters("This is a sentence."))
# Should be {'t': 2, 'h': 1, 'i': 2, 's': 3, 'a': 1, 'e': 3, 'n': 2, 'c': 1}
|
# Noah Martinez
# 11/5/21
# Search on the internet for a way to calculate an approximation for pi. There are many that use simple arithmetic.
# Write a program to compute the approximation and then print that value as well as the value of math.pi
# from the math module.
import math
pi = float((4//1) - (4//3) + (4//5) - (4//7) + (4//9) - (4//11) + (4//13) - (4//15))
print(pi)
print(math.pi)
|
def print(*args, sep: str = ' ', end: str = '\n'):
string_args = []
for arg in args:
string_args.append(str(arg))
console.log(sep.join(string_args))
# console.log(sep.join([str(arg) for arg in args]))
|
word = '@大学生精英班@扎克伯格@俞敏洪'
a = word.split('@')
a[0] = '您@的好友有:'
for name in a :
print(name)
|
# we use interface when all the feature need to be implemented differently for different object
from abc import ABC,abstractmethod
class Father(ABC):
@abstractmethod
def disp(self):
pass
class Child(Father):
def disp(self):
print("child class")
print("defining abstract method")
c=Child()
c.disp()
print("-----------------------------------------------------------------------------")
class Father(ABC):
@abstractmethod
def disp1(self):
pass
@abstractmethod
def disp2(self):
pass
class Child(Father):
def disp1(self):
print("child class")
print("disp 1 abstract method")
class Grandchild(Child):
def disp2(self):
print("Grandchild class")
print("disp2 abstract method")
gc=Grandchild()
gc.disp1()
gc.disp2()
print("------------------------------------------------------------------------------")
# Abstract Properties :
# Abstract classes includes attributes in addition to methods, you can require
# the attributes in concrete classes by defining them with @abstractproperty.
class Animal(ABC):
@abstractmethod
def move(self):
pass
class Human(Animal):
def move(self):
print("I can walk and run")
class Snake(Animal):
def move(self):
print("I can crawl")
class Dog(Animal):
def move(self):
print("I can bark")
class Lion(Animal):
def move(self):
print("I can roar")
c=Human()
c.move()
|
import random
chars = "1234567890qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM!@$%^&*()_-+"
def gen():
try:
length = input("Введите длину пароля >> ")
res = ""
for i in range(int(length)):
res += chars[random.randint(0, len(chars) - 1)]
print("Пароль: " + res + "\n")
except:
print("Ошибка.\n")
gen()
gen() |
# cluster_utils.py
"""
utility file with clustering & plotting methods
"""
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import seaborn as sns
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def scree_plot(df, title_in="", drop_index=False):
"""
scree plot to view number of potentially viable components
"""
X = df
if drop_index:
X = df.drop(df.columns[0], axis=1)
X_std = StandardScaler().fit_transform(X)
pca = PCA().fit(X_std)
plt.plot(pca.singular_values_)
plt.xlabel("num components")
plt.ylabel("singular values")
title_str = title_in + " " + "scree plot"
plt.title(title_str)
plt.show()
def kmeans(df, num_clusters, title_in="", drop_index=False):
"""
2d kmeans clustering and plotting function
plots results along 2 principal components
"""
X = df
if drop_index:
X = df.drop(df.columns[0], axis=1)
cluster_km = KMeans(n_clusters=num_clusters)
cluster_y = cluster_km.fit_predict(X)
# plotting
reduced_data = PCA(n_components=2).fit_transform(X)
results = pd.DataFrame(reduced_data, columns=["pc1", "pc2"])
cmap = sns.color_palette(n_colors=num_clusters)
sns.scatterplot(x="pc1", y="pc2", hue=cluster_y, palette=cmap, data=results)
title_str = title_in + " " + str(num_clusters) + " Clusters"
plt.title(title_str)
plt.show()
X["cluster"] = cluster_y
return X
def kmeans3d(df, num_clusters, title_in="", drop_index=False):
"""
3d kmeans clustering and plotting function
plots results along 3 principal components
"""
X = df
if drop_index:
X = df.drop(df.columns[0], axis=1)
cluster_km = KMeans(n_clusters=num_clusters)
cluster_y = cluster_km.fit_predict(X)
# plotting
reduced_data = PCA(n_components=3).fit_transform(X)
results = pd.DataFrame(reduced_data, columns=["pc1", "pc2", "pc3"])
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
ax.scatter(results["pc1"].values, results["pc2"].values,
results["pc3"].values, c=cluster_y)
title_str = title_in + " " + str(num_clusters) + " Clusters"
plt.title(title_str)
plt.show()
X["cluster"] = cluster_y
return X
|
"""
NodeCount counts the number of nodes in a node
"""
from pythran.passmanager import NodeAnalysis
class NodeCount(NodeAnalysis):
"""
Count the number of nodes included in a node
This has nothing to do with execution time or whatever,
its mainly use is to prevent the AST from growing too much when unrolling
>>> import gast as ast
>>> from pythran import passmanager, backend
>>> node = ast.parse("if 1: return 3")
>>> pm = passmanager.PassManager("test")
>>> print(pm.gather(NodeCount, node))
5
"""
def __init__(self):
self.result = 0
super(NodeCount, self).__init__()
def generic_visit(self, node):
self.result += 1
super(NodeCount, self).generic_visit(node)
|
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
def nested_update(this, that):
"""Merge two nested dictionaries.
Effectively a recursive ``dict.update``.
Examples
--------
Merge two flat dictionaries:
>>> nested_update(
... {'a': 1, 'b': 2},
... {'b': 3, 'c': 4}
... )
{'a': 1, 'b': 3, 'c': 4}
Merge two nested dictionaries:
>>> nested_update(
... {'x': {'a': 1, 'b': 2}, 'y': 5, 'z': 6},
... {'x': {'b': 3, 'c': 4}, 'z': 7, '0': 8},
... )
{'x': {'a': 1, 'b': 3, 'c': 4}, 'y': 5, 'z': 7, '0': 8}
"""
for key, value in this.items():
if isinstance(value, dict):
if key in that and isinstance(that[key], dict):
nested_update(this[key], that[key])
elif key in that:
this[key] = that[key]
for key, value in that.items():
if key not in this:
this[key] = value
return this
|
from ._ufuncs import _lambertw
def lambertw(z, k=0, tol=1e-8):
r"""
lambertw(z, k=0, tol=1e-8)
Lambert W function.
The Lambert W function `W(z)` is defined as the inverse function
of ``w * exp(w)``. In other words, the value of ``W(z)`` is
such that ``z = W(z) * exp(W(z))`` for any complex number
``z``.
The Lambert W function is a multivalued function with infinitely
many branches. Each branch gives a separate solution of the
equation ``z = w exp(w)``. Here, the branches are indexed by the
integer `k`.
Parameters
----------
z : array_like
Input argument.
k : int, optional
Branch index.
tol : float, optional
Evaluation tolerance.
Returns
-------
w : array
`w` will have the same shape as `z`.
Notes
-----
All branches are supported by `lambertw`:
* ``lambertw(z)`` gives the principal solution (branch 0)
* ``lambertw(z, k)`` gives the solution on branch `k`
The Lambert W function has two partially real branches: the
principal branch (`k = 0`) is real for real ``z > -1/e``, and the
``k = -1`` branch is real for ``-1/e < z < 0``. All branches except
``k = 0`` have a logarithmic singularity at ``z = 0``.
**Possible issues**
The evaluation can become inaccurate very close to the branch point
at ``-1/e``. In some corner cases, `lambertw` might currently
fail to converge, or can end up on the wrong branch.
**Algorithm**
Halley's iteration is used to invert ``w * exp(w)``, using a first-order
asymptotic approximation (O(log(w)) or `O(w)`) as the initial estimate.
The definition, implementation and choice of branches is based on [2]_.
See Also
--------
wrightomega : the Wright Omega function
References
----------
.. [1] https://en.wikipedia.org/wiki/Lambert_W_function
.. [2] Corless et al, "On the Lambert W function", Adv. Comp. Math. 5
(1996) 329-359.
https://cs.uwaterloo.ca/research/tr/1993/03/W.pdf
Examples
--------
The Lambert W function is the inverse of ``w exp(w)``:
>>> from scipy.special import lambertw
>>> w = lambertw(1)
>>> w
(0.56714329040978384+0j)
>>> w * np.exp(w)
(1.0+0j)
Any branch gives a valid inverse:
>>> w = lambertw(1, k=3)
>>> w
(-2.8535817554090377+17.113535539412148j)
>>> w*np.exp(w)
(1.0000000000000002+1.609823385706477e-15j)
**Applications to equation-solving**
The Lambert W function may be used to solve various kinds of
equations, such as finding the value of the infinite power
tower :math:`z^{z^{z^{\ldots}}}`:
>>> def tower(z, n):
... if n == 0:
... return z
... return z ** tower(z, n-1)
...
>>> tower(0.5, 100)
0.641185744504986
>>> -lambertw(-np.log(0.5)) / np.log(0.5)
(0.64118574450498589+0j)
"""
return _lambertw(z, k, tol)
|
# -*- coding: utf-8 -*-
"""
This module offers a generic Easter computing method for any given year, using
Western, Orthodox or Julian algorithms.
"""
import datetime
__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"]
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
def easter(year, method=EASTER_WESTERN):
"""
This method was ported from the work done by GM Arts,
on top of the algorithm by Claus Tondering, which was
based in part on the algorithm of Ouding (1940), as
quoted in "Explanatory Supplement to the Astronomical
Almanac", P. Kenneth Seidelmann, editor.
This algorithm implements three different Easter
calculation methods:
1. Original calculation in Julian calendar, valid in
dates after 326 AD
2. Original method, with date converted to Gregorian
calendar, valid in years 1583 to 4099
3. Revised method, in Gregorian calendar, valid in
years 1583 to 4099 as well
These methods are represented by the constants:
* ``EASTER_JULIAN = 1``
* ``EASTER_ORTHODOX = 2``
* ``EASTER_WESTERN = 3``
The default method is method 3.
More about the algorithm may be found at:
`GM Arts: Easter Algorithms <http://www.gmarts.org/index.php?go=415>`_
and
`The Calendar FAQ: Easter <https://www.tondering.dk/claus/cal/easter.php>`_
"""
if not (1 <= method <= 3):
raise ValueError("invalid method")
# g - Golden year - 1
# c - Century
# h - (23 - Epact) mod 30
# i - Number of days from March 21 to Paschal Full Moon
# j - Weekday for PFM (0=Sunday, etc)
# p - Number of days from March 21 to Sunday on or before PFM
# (-6 to 28 methods 1 & 3, to 56 for method 2)
# e - Extra days to add for method 2 (converting Julian
# date to Gregorian date)
y = year
g = y % 19
e = 0
if method < 3:
# Old method
i = (19*g + 15) % 30
j = (y + y//4 + i) % 7
if method == 2:
# Extra dates to convert Julian to Gregorian date
e = 10
if y > 1600:
e = e + y//100 - 16 - (y//100 - 16)//4
else:
# New method
c = y//100
h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30
i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11))
j = (y + y//4 + i + 2 - c + c//4) % 7
# p can be from -6 to 56 corresponding to dates 22 March to 23 May
# (later dates apply to method 2, although 23 May never actually occurs)
p = i - j + e
d = 1 + (p + 27 + (p + 6)//40) % 31
m = 3 + (p + 26)//30
return datetime.date(int(y), int(m), int(d))
|
"""General floating point formatting functions.
Functions:
fix(x, digits_behind)
sci(x, digits_behind)
Each takes a number or a string and a number of digits as arguments.
Parameters:
x: number to be formatted; or a string resembling a number
digits_behind: number of digits behind the decimal point
"""
from warnings import warnpy3k
warnpy3k("the fpformat module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
import re
__all__ = ["fix","sci","NotANumber"]
# Compiled regular expression to "decode" a number
decoder = re.compile(r'^([-+]?)(\d*)((?:\.\d*)?)(([eE][-+]?\d+)?)$')
# \0 the whole thing
# \1 leading sign or empty
# \2 digits left of decimal point
# \3 fraction (empty or begins with point)
# \4 exponent part (empty or begins with 'e' or 'E')
try:
class NotANumber(ValueError):
pass
except TypeError:
NotANumber = 'fpformat.NotANumber'
def extract(s):
"""Return (sign, intpart, fraction, expo) or raise an exception:
sign is '+' or '-'
intpart is 0 or more digits beginning with a nonzero
fraction is 0 or more digits
expo is an integer"""
res = decoder.match(s)
if res is None: raise NotANumber, s
sign, intpart, fraction, exppart = res.group(1,2,3,4)
intpart = intpart.lstrip('0');
if sign == '+': sign = ''
if fraction: fraction = fraction[1:]
if exppart: expo = int(exppart[1:])
else: expo = 0
return sign, intpart, fraction, expo
def unexpo(intpart, fraction, expo):
"""Remove the exponent by changing intpart and fraction."""
if expo > 0: # Move the point left
f = len(fraction)
intpart, fraction = intpart + fraction[:expo], fraction[expo:]
if expo > f:
intpart = intpart + '0'*(expo-f)
elif expo < 0: # Move the point right
i = len(intpart)
intpart, fraction = intpart[:expo], intpart[expo:] + fraction
if expo < -i:
fraction = '0'*(-expo-i) + fraction
return intpart, fraction
def roundfrac(intpart, fraction, digs):
"""Round or extend the fraction to size digs."""
f = len(fraction)
if f <= digs:
return intpart, fraction + '0'*(digs-f)
i = len(intpart)
if i+digs < 0:
return '0'*-digs, ''
total = intpart + fraction
nextdigit = total[i+digs]
if nextdigit >= '5': # Hard case: increment last digit, may have carry!
n = i + digs - 1
while n >= 0:
if total[n] != '9': break
n = n-1
else:
total = '0' + total
i = i+1
n = 0
total = total[:n] + chr(ord(total[n]) + 1) + '0'*(len(total)-n-1)
intpart, fraction = total[:i], total[i:]
if digs >= 0:
return intpart, fraction[:digs]
else:
return intpart[:digs] + '0'*-digs, ''
def fix(x, digs):
"""Format x as [-]ddd.ddd with 'digs' digits after the point
and at least one digit before.
If digs <= 0, the point is suppressed."""
if type(x) != type(''): x = repr(x)
try:
sign, intpart, fraction, expo = extract(x)
except NotANumber:
return x
intpart, fraction = unexpo(intpart, fraction, expo)
intpart, fraction = roundfrac(intpart, fraction, digs)
while intpart and intpart[0] == '0': intpart = intpart[1:]
if intpart == '': intpart = '0'
if digs > 0: return sign + intpart + '.' + fraction
else: return sign + intpart
def sci(x, digs):
"""Format x as [-]d.dddE[+-]ddd with 'digs' digits after the point
and exactly one digit before.
If digs is <= 0, one digit is kept and the point is suppressed."""
if type(x) != type(''): x = repr(x)
sign, intpart, fraction, expo = extract(x)
if not intpart:
while fraction and fraction[0] == '0':
fraction = fraction[1:]
expo = expo - 1
if fraction:
intpart, fraction = fraction[0], fraction[1:]
expo = expo - 1
else:
intpart = '0'
else:
expo = expo + len(intpart) - 1
intpart, fraction = intpart[0], intpart[1:] + fraction
digs = max(0, digs)
intpart, fraction = roundfrac(intpart, fraction, digs)
if len(intpart) > 1:
intpart, fraction, expo = \
intpart[0], intpart[1:] + fraction[:-1], \
expo + len(intpart) - 1
s = sign + intpart
if digs > 0: s = s + '.' + fraction
e = repr(abs(expo))
e = '0'*(3-len(e)) + e
if expo < 0: e = '-' + e
else: e = '+' + e
return s + 'e' + e
def test():
"""Interactive test run."""
try:
while 1:
x, digs = input('Enter (x, digs): ')
print x, fix(x, digs), sci(x, digs)
except (EOFError, KeyboardInterrupt):
pass
|
"""
The function zetazero(n) computes the n-th nontrivial zero of zeta(s).
The general strategy is to locate a block of Gram intervals B where we
know exactly the number of zeros contained and which of those zeros
is that which we search.
If n <= 400 000 000 we know exactly the Rosser exceptions, contained
in a list in this file. Hence for n<=400 000 000 we simply
look at these list of exceptions. If our zero is implicated in one of
these exceptions we have our block B. In other case we simply locate
the good Rosser block containing our zero.
For n > 400 000 000 we apply the method of Turing, as complemented by
Lehman, Brent and Trudgian to find a suitable B.
"""
from .functions import defun, defun_wrapped
def find_rosser_block_zero(ctx, n):
"""for n<400 000 000 determines a block were one find our zero"""
for k in range(len(_ROSSER_EXCEPTIONS)//2):
a=_ROSSER_EXCEPTIONS[2*k][0]
b=_ROSSER_EXCEPTIONS[2*k][1]
if ((a<= n-2) and (n-1 <= b)):
t0 = ctx.grampoint(a)
t1 = ctx.grampoint(b)
v0 = ctx._fp.siegelz(t0)
v1 = ctx._fp.siegelz(t1)
my_zero_number = n-a-1
zero_number_block = b-a
pattern = _ROSSER_EXCEPTIONS[2*k+1]
return (my_zero_number, [a,b], [t0,t1], [v0,v1])
k = n-2
t,v,b = compute_triple_tvb(ctx, k)
T = [t]
V = [v]
while b < 0:
k -= 1
t,v,b = compute_triple_tvb(ctx, k)
T.insert(0,t)
V.insert(0,v)
my_zero_number = n-k-1
m = n-1
t,v,b = compute_triple_tvb(ctx, m)
T.append(t)
V.append(v)
while b < 0:
m += 1
t,v,b = compute_triple_tvb(ctx, m)
T.append(t)
V.append(v)
return (my_zero_number, [k,m], T, V)
def wpzeros(t):
"""Precision needed to compute higher zeros"""
wp = 53
if t > 3*10**8:
wp = 63
if t > 10**11:
wp = 70
if t > 10**14:
wp = 83
return wp
def separate_zeros_in_block(ctx, zero_number_block, T, V, limitloop=None,
fp_tolerance=None):
"""Separate the zeros contained in the block T, limitloop
determines how long one must search"""
if limitloop is None:
limitloop = ctx.inf
loopnumber = 0
variations = count_variations(V)
while ((variations < zero_number_block) and (loopnumber <limitloop)):
a = T[0]
v = V[0]
newT = [a]
newV = [v]
variations = 0
for n in range(1,len(T)):
b2 = T[n]
u = V[n]
if (u*v>0):
alpha = ctx.sqrt(u/v)
b= (alpha*a+b2)/(alpha+1)
else:
b = (a+b2)/2
if fp_tolerance < 10:
w = ctx._fp.siegelz(b)
if abs(w)<fp_tolerance:
w = ctx.siegelz(b)
else:
w=ctx.siegelz(b)
if v*w<0:
variations += 1
newT.append(b)
newV.append(w)
u = V[n]
if u*w <0:
variations += 1
newT.append(b2)
newV.append(u)
a = b2
v = u
T = newT
V = newV
loopnumber +=1
if (limitloop>ITERATION_LIMIT)and(loopnumber>2)and(variations+2==zero_number_block):
dtMax=0
dtSec=0
kMax = 0
for k1 in range(1,len(T)):
dt = T[k1]-T[k1-1]
if dt > dtMax:
kMax=k1
dtSec = dtMax
dtMax = dt
elif (dt<dtMax) and(dt >dtSec):
dtSec = dt
if dtMax>3*dtSec:
f = lambda x: ctx.rs_z(x,derivative=1)
t0=T[kMax-1]
t1 = T[kMax]
t=ctx.findroot(f, (t0,t1), solver ='illinois',verify=False, verbose=False)
v = ctx.siegelz(t)
if (t0<t) and (t<t1) and (v*V[kMax]<0):
T.insert(kMax,t)
V.insert(kMax,v)
variations = count_variations(V)
if variations == zero_number_block:
separated = True
else:
separated = False
return (T,V, separated)
def separate_my_zero(ctx, my_zero_number, zero_number_block, T, V, prec):
"""If we know which zero of this block is mine,
the function separates the zero"""
variations = 0
v0 = V[0]
for k in range(1,len(V)):
v1 = V[k]
if v0*v1 < 0:
variations +=1
if variations == my_zero_number:
k0 = k
leftv = v0
rightv = v1
v0 = v1
t1 = T[k0]
t0 = T[k0-1]
ctx.prec = prec
wpz = wpzeros(my_zero_number*ctx.log(my_zero_number))
guard = 4*ctx.mag(my_zero_number)
precs = [ctx.prec+4]
index=0
while precs[0] > 2*wpz:
index +=1
precs = [precs[0] // 2 +3+2*index] + precs
ctx.prec = precs[0] + guard
r = ctx.findroot(lambda x:ctx.siegelz(x), (t0,t1), solver ='illinois', verbose=False)
#print "first step at", ctx.dps, "digits"
z=ctx.mpc(0.5,r)
for prec in precs[1:]:
ctx.prec = prec + guard
#print "refining to", ctx.dps, "digits"
znew = z - ctx.zeta(z) / ctx.zeta(z, derivative=1)
#print "difference", ctx.nstr(abs(z-znew))
z=ctx.mpc(0.5,ctx.im(znew))
return ctx.im(z)
def sure_number_block(ctx, n):
"""The number of good Rosser blocks needed to apply
Turing method
References:
R. P. Brent, On the Zeros of the Riemann Zeta Function
in the Critical Strip, Math. Comp. 33 (1979) 1361--1372
T. Trudgian, Improvements to Turing Method, Math. Comp."""
if n < 9*10**5:
return(2)
g = ctx.grampoint(n-100)
lg = ctx._fp.ln(g)
brent = 0.0061 * lg**2 +0.08*lg
trudgian = 0.0031 * lg**2 +0.11*lg
N = ctx.ceil(min(brent,trudgian))
N = int(N)
return N
def compute_triple_tvb(ctx, n):
t = ctx.grampoint(n)
v = ctx._fp.siegelz(t)
if ctx.mag(abs(v))<ctx.mag(t)-45:
v = ctx.siegelz(t)
b = v*(-1)**n
return t,v,b
ITERATION_LIMIT = 4
def search_supergood_block(ctx, n, fp_tolerance):
"""To use for n>400 000 000"""
sb = sure_number_block(ctx, n)
number_goodblocks = 0
m2 = n-1
t, v, b = compute_triple_tvb(ctx, m2)
Tf = [t]
Vf = [v]
while b < 0:
m2 += 1
t,v,b = compute_triple_tvb(ctx, m2)
Tf.append(t)
Vf.append(v)
goodpoints = [m2]
T = [t]
V = [v]
while number_goodblocks < 2*sb:
m2 += 1
t, v, b = compute_triple_tvb(ctx, m2)
T.append(t)
V.append(v)
while b < 0:
m2 += 1
t,v,b = compute_triple_tvb(ctx, m2)
T.append(t)
V.append(v)
goodpoints.append(m2)
zn = len(T)-1
A, B, separated =\
separate_zeros_in_block(ctx, zn, T, V, limitloop=ITERATION_LIMIT,
fp_tolerance=fp_tolerance)
Tf.pop()
Tf.extend(A)
Vf.pop()
Vf.extend(B)
if separated:
number_goodblocks += 1
else:
number_goodblocks = 0
T = [t]
V = [v]
# Now the same procedure to the left
number_goodblocks = 0
m2 = n-2
t, v, b = compute_triple_tvb(ctx, m2)
Tf.insert(0,t)
Vf.insert(0,v)
while b < 0:
m2 -= 1
t,v,b = compute_triple_tvb(ctx, m2)
Tf.insert(0,t)
Vf.insert(0,v)
goodpoints.insert(0,m2)
T = [t]
V = [v]
while number_goodblocks < 2*sb:
m2 -= 1
t, v, b = compute_triple_tvb(ctx, m2)
T.insert(0,t)
V.insert(0,v)
while b < 0:
m2 -= 1
t,v,b = compute_triple_tvb(ctx, m2)
T.insert(0,t)
V.insert(0,v)
goodpoints.insert(0,m2)
zn = len(T)-1
A, B, separated =\
separate_zeros_in_block(ctx, zn, T, V, limitloop=ITERATION_LIMIT, fp_tolerance=fp_tolerance)
A.pop()
Tf = A+Tf
B.pop()
Vf = B+Vf
if separated:
number_goodblocks += 1
else:
number_goodblocks = 0
T = [t]
V = [v]
r = goodpoints[2*sb]
lg = len(goodpoints)
s = goodpoints[lg-2*sb-1]
tr, vr, br = compute_triple_tvb(ctx, r)
ar = Tf.index(tr)
ts, vs, bs = compute_triple_tvb(ctx, s)
as1 = Tf.index(ts)
T = Tf[ar:as1+1]
V = Vf[ar:as1+1]
zn = s-r
A, B, separated =\
separate_zeros_in_block(ctx, zn,T,V,limitloop=ITERATION_LIMIT, fp_tolerance=fp_tolerance)
if separated:
return (n-r-1,[r,s],A,B)
q = goodpoints[sb]
lg = len(goodpoints)
t = goodpoints[lg-sb-1]
tq, vq, bq = compute_triple_tvb(ctx, q)
aq = Tf.index(tq)
tt, vt, bt = compute_triple_tvb(ctx, t)
at = Tf.index(tt)
T = Tf[aq:at+1]
V = Vf[aq:at+1]
return (n-q-1,[q,t],T,V)
def count_variations(V):
count = 0
vold = V[0]
for n in range(1, len(V)):
vnew = V[n]
if vold*vnew < 0:
count +=1
vold = vnew
return count
def pattern_construct(ctx, block, T, V):
pattern = '('
a = block[0]
b = block[1]
t0,v0,b0 = compute_triple_tvb(ctx, a)
k = 0
k0 = 0
for n in range(a+1,b+1):
t1,v1,b1 = compute_triple_tvb(ctx, n)
lgT =len(T)
while (k < lgT) and (T[k] <= t1):
k += 1
L = V[k0:k]
L.append(v1)
L.insert(0,v0)
count = count_variations(L)
pattern = pattern + ("%s" % count)
if b1 > 0:
pattern = pattern + ')('
k0 = k
t0,v0,b0 = t1,v1,b1
pattern = pattern[:-1]
return pattern
@defun
def zetazero(ctx, n, info=False, round=True):
r"""
Computes the `n`-th nontrivial zero of `\zeta(s)` on the critical line,
i.e. returns an approximation of the `n`-th largest complex number
`s = \frac{1}{2} + ti` for which `\zeta(s) = 0`. Equivalently, the
imaginary part `t` is a zero of the Z-function (:func:`~mpmath.siegelz`).
**Examples**
The first few zeros::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> zetazero(1)
(0.5 + 14.13472514173469379045725j)
>>> zetazero(2)
(0.5 + 21.02203963877155499262848j)
>>> zetazero(20)
(0.5 + 77.14484006887480537268266j)
Verifying that the values are zeros::
>>> for n in range(1,5):
... s = zetazero(n)
... chop(zeta(s)), chop(siegelz(s.imag))
...
(0.0, 0.0)
(0.0, 0.0)
(0.0, 0.0)
(0.0, 0.0)
Negative indices give the conjugate zeros (`n = 0` is undefined)::
>>> zetazero(-1)
(0.5 - 14.13472514173469379045725j)
:func:`~mpmath.zetazero` supports arbitrarily large `n` and arbitrary precision::
>>> mp.dps = 15
>>> zetazero(1234567)
(0.5 + 727690.906948208j)
>>> mp.dps = 50
>>> zetazero(1234567)
(0.5 + 727690.9069482075392389420041147142092708393819935j)
>>> chop(zeta(_)/_)
0.0
with *info=True*, :func:`~mpmath.zetazero` gives additional information::
>>> mp.dps = 15
>>> zetazero(542964976,info=True)
((0.5 + 209039046.578535j), [542964969, 542964978], 6, '(013111110)')
This means that the zero is between Gram points 542964969 and 542964978;
it is the 6-th zero between them. Finally (01311110) is the pattern
of zeros in this interval. The numbers indicate the number of zeros
in each Gram interval (Rosser blocks between parenthesis). In this case
there is only one Rosser block of length nine.
"""
n = int(n)
if n < 0:
return ctx.zetazero(-n).conjugate()
if n == 0:
raise ValueError("n must be nonzero")
wpinitial = ctx.prec
try:
wpz, fp_tolerance = comp_fp_tolerance(ctx, n)
ctx.prec = wpz
if n < 400000000:
my_zero_number, block, T, V =\
find_rosser_block_zero(ctx, n)
else:
my_zero_number, block, T, V =\
search_supergood_block(ctx, n, fp_tolerance)
zero_number_block = block[1]-block[0]
T, V, separated = separate_zeros_in_block(ctx, zero_number_block, T, V,
limitloop=ctx.inf, fp_tolerance=fp_tolerance)
if info:
pattern = pattern_construct(ctx,block,T,V)
prec = max(wpinitial, wpz)
t = separate_my_zero(ctx, my_zero_number, zero_number_block,T,V,prec)
v = ctx.mpc(0.5,t)
finally:
ctx.prec = wpinitial
if round:
v =+v
if info:
return (v,block,my_zero_number,pattern)
else:
return v
def gram_index(ctx, t):
if t > 10**13:
wp = 3*ctx.log(t, 10)
else:
wp = 0
prec = ctx.prec
try:
ctx.prec += wp
h = int(ctx.siegeltheta(t)/ctx.pi)
finally:
ctx.prec = prec
return(h)
def count_to(ctx, t, T, V):
count = 0
vold = V[0]
told = T[0]
tnew = T[1]
k = 1
while tnew < t:
vnew = V[k]
if vold*vnew < 0:
count += 1
vold = vnew
k += 1
tnew = T[k]
a = ctx.siegelz(t)
if a*vold < 0:
count += 1
return count
def comp_fp_tolerance(ctx, n):
wpz = wpzeros(n*ctx.log(n))
if n < 15*10**8:
fp_tolerance = 0.0005
elif n <= 10**14:
fp_tolerance = 0.1
else:
fp_tolerance = 100
return wpz, fp_tolerance
@defun
def nzeros(ctx, t):
r"""
Computes the number of zeros of the Riemann zeta function in
`(0,1) \times (0,t]`, usually denoted by `N(t)`.
**Examples**
The first zero has imaginary part between 14 and 15::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> nzeros(14)
0
>>> nzeros(15)
1
>>> zetazero(1)
(0.5 + 14.1347251417347j)
Some closely spaced zeros::
>>> nzeros(10**7)
21136125
>>> zetazero(21136125)
(0.5 + 9999999.32718175j)
>>> zetazero(21136126)
(0.5 + 10000000.2400236j)
>>> nzeros(545439823.215)
1500000001
>>> zetazero(1500000001)
(0.5 + 545439823.201985j)
>>> zetazero(1500000002)
(0.5 + 545439823.325697j)
This confirms the data given by J. van de Lune,
H. J. J. te Riele and D. T. Winter in 1986.
"""
if t < 14.1347251417347:
return 0
x = gram_index(ctx, t)
k = int(ctx.floor(x))
wpinitial = ctx.prec
wpz, fp_tolerance = comp_fp_tolerance(ctx, k)
ctx.prec = wpz
a = ctx.siegelz(t)
if k == -1 and a < 0:
return 0
elif k == -1 and a > 0:
return 1
if k+2 < 400000000:
Rblock = find_rosser_block_zero(ctx, k+2)
else:
Rblock = search_supergood_block(ctx, k+2, fp_tolerance)
n1, n2 = Rblock[1]
if n2-n1 == 1:
b = Rblock[3][0]
if a*b > 0:
ctx.prec = wpinitial
return k+1
else:
ctx.prec = wpinitial
return k+2
my_zero_number,block, T, V = Rblock
zero_number_block = n2-n1
T, V, separated = separate_zeros_in_block(ctx,\
zero_number_block, T, V,\
limitloop=ctx.inf,\
fp_tolerance=fp_tolerance)
n = count_to(ctx, t, T, V)
ctx.prec = wpinitial
return n+n1+1
@defun_wrapped
def backlunds(ctx, t):
r"""
Computes the function
`S(t) = \operatorname{arg} \zeta(\frac{1}{2} + it) / \pi`.
See Titchmarsh Section 9.3 for details of the definition.
**Examples**
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> backlunds(217.3)
0.16302205431184
Generally, the value is a small number. At Gram points it is an integer,
frequently equal to 0::
>>> chop(backlunds(grampoint(200)))
0.0
>>> backlunds(extraprec(10)(grampoint)(211))
1.0
>>> backlunds(extraprec(10)(grampoint)(232))
-1.0
The number of zeros of the Riemann zeta function up to height `t`
satisfies `N(t) = \theta(t)/\pi + 1 + S(t)` (see :func:nzeros` and
:func:`siegeltheta`)::
>>> t = 1234.55
>>> nzeros(t)
842
>>> siegeltheta(t)/pi+1+backlunds(t)
842.0
"""
return ctx.nzeros(t)-1-ctx.siegeltheta(t)/ctx.pi
"""
_ROSSER_EXCEPTIONS is a list of all exceptions to
Rosser's rule for n <= 400 000 000.
Alternately the entry is of type [n,m], or a string.
The string is the zero pattern of the Block and the relevant
adjacent. For example (010)3 corresponds to a block
composed of three Gram intervals, the first ant third without
a zero and the intermediate with a zero. The next Gram interval
contain three zeros. So that in total we have 4 zeros in 4 Gram
blocks. n and m are the indices of the Gram points of this
interval of four Gram intervals. The Rosser exception is therefore
formed by the three Gram intervals that are signaled between
parenthesis.
We have included also some Rosser's exceptions beyond n=400 000 000
that are noted in the literature by some reason.
The list is composed from the data published in the references:
R. P. Brent, J. van de Lune, H. J. J. te Riele, D. T. Winter,
'On the Zeros of the Riemann Zeta Function in the Critical Strip. II',
Math. Comp. 39 (1982) 681--688.
See also Corrigenda in Math. Comp. 46 (1986) 771.
J. van de Lune, H. J. J. te Riele,
'On the Zeros of the Riemann Zeta Function in the Critical Strip. III',
Math. Comp. 41 (1983) 759--767.
See also Corrigenda in Math. Comp. 46 (1986) 771.
J. van de Lune,
'Sums of Equal Powers of Positive Integers',
Dissertation,
Vrije Universiteit te Amsterdam, Centrum voor Wiskunde en Informatica,
Amsterdam, 1984.
Thanks to the authors all this papers and those others that have
contributed to make this possible.
"""
_ROSSER_EXCEPTIONS = \
[[13999525, 13999528], '(00)3',
[30783329, 30783332], '(00)3',
[30930926, 30930929], '3(00)',
[37592215, 37592218], '(00)3',
[40870156, 40870159], '(00)3',
[43628107, 43628110], '(00)3',
[46082042, 46082045], '(00)3',
[46875667, 46875670], '(00)3',
[49624540, 49624543], '3(00)',
[50799238, 50799241], '(00)3',
[55221453, 55221456], '3(00)',
[56948779, 56948782], '3(00)',
[60515663, 60515666], '(00)3',
[61331766, 61331770], '(00)40',
[69784843, 69784846], '3(00)',
[75052114, 75052117], '(00)3',
[79545240, 79545243], '3(00)',
[79652247, 79652250], '3(00)',
[83088043, 83088046], '(00)3',
[83689522, 83689525], '3(00)',
[85348958, 85348961], '(00)3',
[86513820, 86513823], '(00)3',
[87947596, 87947599], '3(00)',
[88600095, 88600098], '(00)3',
[93681183, 93681186], '(00)3',
[100316551, 100316554], '3(00)',
[100788444, 100788447], '(00)3',
[106236172, 106236175], '(00)3',
[106941327, 106941330], '3(00)',
[107287955, 107287958], '(00)3',
[107532016, 107532019], '3(00)',
[110571044, 110571047], '(00)3',
[111885253, 111885256], '3(00)',
[113239783, 113239786], '(00)3',
[120159903, 120159906], '(00)3',
[121424391, 121424394], '3(00)',
[121692931, 121692934], '3(00)',
[121934170, 121934173], '3(00)',
[122612848, 122612851], '3(00)',
[126116567, 126116570], '(00)3',
[127936513, 127936516], '(00)3',
[128710277, 128710280], '3(00)',
[129398902, 129398905], '3(00)',
[130461096, 130461099], '3(00)',
[131331947, 131331950], '3(00)',
[137334071, 137334074], '3(00)',
[137832603, 137832606], '(00)3',
[138799471, 138799474], '3(00)',
[139027791, 139027794], '(00)3',
[141617806, 141617809], '(00)3',
[144454931, 144454934], '(00)3',
[145402379, 145402382], '3(00)',
[146130245, 146130248], '3(00)',
[147059770, 147059773], '(00)3',
[147896099, 147896102], '3(00)',
[151097113, 151097116], '(00)3',
[152539438, 152539441], '(00)3',
[152863168, 152863171], '3(00)',
[153522726, 153522729], '3(00)',
[155171524, 155171527], '3(00)',
[155366607, 155366610], '(00)3',
[157260686, 157260689], '3(00)',
[157269224, 157269227], '(00)3',
[157755123, 157755126], '(00)3',
[158298484, 158298487], '3(00)',
[160369050, 160369053], '3(00)',
[162962787, 162962790], '(00)3',
[163724709, 163724712], '(00)3',
[164198113, 164198116], '3(00)',
[164689301, 164689305], '(00)40',
[164880228, 164880231], '3(00)',
[166201932, 166201935], '(00)3',
[168573836, 168573839], '(00)3',
[169750763, 169750766], '(00)3',
[170375507, 170375510], '(00)3',
[170704879, 170704882], '3(00)',
[172000992, 172000995], '3(00)',
[173289941, 173289944], '(00)3',
[173737613, 173737616], '3(00)',
[174102513, 174102516], '(00)3',
[174284990, 174284993], '(00)3',
[174500513, 174500516], '(00)3',
[175710609, 175710612], '(00)3',
[176870843, 176870846], '3(00)',
[177332732, 177332735], '3(00)',
[177902861, 177902864], '3(00)',
[179979095, 179979098], '(00)3',
[181233726, 181233729], '3(00)',
[181625435, 181625438], '(00)3',
[182105255, 182105259], '22(00)',
[182223559, 182223562], '3(00)',
[191116404, 191116407], '3(00)',
[191165599, 191165602], '3(00)',
[191297535, 191297539], '(00)22',
[192485616, 192485619], '(00)3',
[193264634, 193264638], '22(00)',
[194696968, 194696971], '(00)3',
[195876805, 195876808], '(00)3',
[195916548, 195916551], '3(00)',
[196395160, 196395163], '3(00)',
[196676303, 196676306], '(00)3',
[197889882, 197889885], '3(00)',
[198014122, 198014125], '(00)3',
[199235289, 199235292], '(00)3',
[201007375, 201007378], '(00)3',
[201030605, 201030608], '3(00)',
[201184290, 201184293], '3(00)',
[201685414, 201685418], '(00)22',
[202762875, 202762878], '3(00)',
[202860957, 202860960], '3(00)',
[203832577, 203832580], '3(00)',
[205880544, 205880547], '(00)3',
[206357111, 206357114], '(00)3',
[207159767, 207159770], '3(00)',
[207167343, 207167346], '3(00)',
[207482539, 207482543], '3(010)',
[207669540, 207669543], '3(00)',
[208053426, 208053429], '(00)3',
[208110027, 208110030], '3(00)',
[209513826, 209513829], '3(00)',
[212623522, 212623525], '(00)3',
[213841715, 213841718], '(00)3',
[214012333, 214012336], '(00)3',
[214073567, 214073570], '(00)3',
[215170600, 215170603], '3(00)',
[215881039, 215881042], '3(00)',
[216274604, 216274607], '3(00)',
[216957120, 216957123], '3(00)',
[217323208, 217323211], '(00)3',
[218799264, 218799267], '(00)3',
[218803557, 218803560], '3(00)',
[219735146, 219735149], '(00)3',
[219830062, 219830065], '3(00)',
[219897904, 219897907], '(00)3',
[221205545, 221205548], '(00)3',
[223601929, 223601932], '(00)3',
[223907076, 223907079], '3(00)',
[223970397, 223970400], '(00)3',
[224874044, 224874048], '22(00)',
[225291157, 225291160], '(00)3',
[227481734, 227481737], '(00)3',
[228006442, 228006445], '3(00)',
[228357900, 228357903], '(00)3',
[228386399, 228386402], '(00)3',
[228907446, 228907449], '(00)3',
[228984552, 228984555], '3(00)',
[229140285, 229140288], '3(00)',
[231810024, 231810027], '(00)3',
[232838062, 232838065], '3(00)',
[234389088, 234389091], '3(00)',
[235588194, 235588197], '(00)3',
[236645695, 236645698], '(00)3',
[236962876, 236962879], '3(00)',
[237516723, 237516727], '04(00)',
[240004911, 240004914], '(00)3',
[240221306, 240221309], '3(00)',
[241389213, 241389217], '(010)3',
[241549003, 241549006], '(00)3',
[241729717, 241729720], '(00)3',
[241743684, 241743687], '3(00)',
[243780200, 243780203], '3(00)',
[243801317, 243801320], '(00)3',
[244122072, 244122075], '(00)3',
[244691224, 244691227], '3(00)',
[244841577, 244841580], '(00)3',
[245813461, 245813464], '(00)3',
[246299475, 246299478], '(00)3',
[246450176, 246450179], '3(00)',
[249069349, 249069352], '(00)3',
[250076378, 250076381], '(00)3',
[252442157, 252442160], '3(00)',
[252904231, 252904234], '3(00)',
[255145220, 255145223], '(00)3',
[255285971, 255285974], '3(00)',
[256713230, 256713233], '(00)3',
[257992082, 257992085], '(00)3',
[258447955, 258447959], '22(00)',
[259298045, 259298048], '3(00)',
[262141503, 262141506], '(00)3',
[263681743, 263681746], '3(00)',
[266527881, 266527885], '(010)3',
[266617122, 266617125], '(00)3',
[266628044, 266628047], '3(00)',
[267305763, 267305766], '(00)3',
[267388404, 267388407], '3(00)',
[267441672, 267441675], '3(00)',
[267464886, 267464889], '(00)3',
[267554907, 267554910], '3(00)',
[269787480, 269787483], '(00)3',
[270881434, 270881437], '(00)3',
[270997583, 270997586], '3(00)',
[272096378, 272096381], '3(00)',
[272583009, 272583012], '(00)3',
[274190881, 274190884], '3(00)',
[274268747, 274268750], '(00)3',
[275297429, 275297432], '3(00)',
[275545476, 275545479], '3(00)',
[275898479, 275898482], '3(00)',
[275953000, 275953003], '(00)3',
[277117197, 277117201], '(00)22',
[277447310, 277447313], '3(00)',
[279059657, 279059660], '3(00)',
[279259144, 279259147], '3(00)',
[279513636, 279513639], '3(00)',
[279849069, 279849072], '3(00)',
[280291419, 280291422], '(00)3',
[281449425, 281449428], '3(00)',
[281507953, 281507956], '3(00)',
[281825600, 281825603], '(00)3',
[282547093, 282547096], '3(00)',
[283120963, 283120966], '3(00)',
[283323493, 283323496], '(00)3',
[284764535, 284764538], '3(00)',
[286172639, 286172642], '3(00)',
[286688824, 286688827], '(00)3',
[287222172, 287222175], '3(00)',
[287235534, 287235537], '3(00)',
[287304861, 287304864], '3(00)',
[287433571, 287433574], '(00)3',
[287823551, 287823554], '(00)3',
[287872422, 287872425], '3(00)',
[288766615, 288766618], '3(00)',
[290122963, 290122966], '3(00)',
[290450849, 290450853], '(00)22',
[291426141, 291426144], '3(00)',
[292810353, 292810356], '3(00)',
[293109861, 293109864], '3(00)',
[293398054, 293398057], '3(00)',
[294134426, 294134429], '3(00)',
[294216438, 294216441], '(00)3',
[295367141, 295367144], '3(00)',
[297834111, 297834114], '3(00)',
[299099969, 299099972], '3(00)',
[300746958, 300746961], '3(00)',
[301097423, 301097426], '(00)3',
[301834209, 301834212], '(00)3',
[302554791, 302554794], '(00)3',
[303497445, 303497448], '3(00)',
[304165344, 304165347], '3(00)',
[304790218, 304790222], '3(010)',
[305302352, 305302355], '(00)3',
[306785996, 306785999], '3(00)',
[307051443, 307051446], '3(00)',
[307481539, 307481542], '3(00)',
[308605569, 308605572], '3(00)',
[309237610, 309237613], '3(00)',
[310509287, 310509290], '(00)3',
[310554057, 310554060], '3(00)',
[310646345, 310646348], '3(00)',
[311274896, 311274899], '(00)3',
[311894272, 311894275], '3(00)',
[312269470, 312269473], '(00)3',
[312306601, 312306605], '(00)40',
[312683193, 312683196], '3(00)',
[314499804, 314499807], '3(00)',
[314636802, 314636805], '(00)3',
[314689897, 314689900], '3(00)',
[314721319, 314721322], '3(00)',
[316132890, 316132893], '3(00)',
[316217470, 316217474], '(010)3',
[316465705, 316465708], '3(00)',
[316542790, 316542793], '(00)3',
[320822347, 320822350], '3(00)',
[321733242, 321733245], '3(00)',
[324413970, 324413973], '(00)3',
[325950140, 325950143], '(00)3',
[326675884, 326675887], '(00)3',
[326704208, 326704211], '3(00)',
[327596247, 327596250], '3(00)',
[328123172, 328123175], '3(00)',
[328182212, 328182215], '(00)3',
[328257498, 328257501], '3(00)',
[328315836, 328315839], '(00)3',
[328800974, 328800977], '(00)3',
[328998509, 328998512], '3(00)',
[329725370, 329725373], '(00)3',
[332080601, 332080604], '(00)3',
[332221246, 332221249], '(00)3',
[332299899, 332299902], '(00)3',
[332532822, 332532825], '(00)3',
[333334544, 333334548], '(00)22',
[333881266, 333881269], '3(00)',
[334703267, 334703270], '3(00)',
[334875138, 334875141], '3(00)',
[336531451, 336531454], '3(00)',
[336825907, 336825910], '(00)3',
[336993167, 336993170], '(00)3',
[337493998, 337494001], '3(00)',
[337861034, 337861037], '3(00)',
[337899191, 337899194], '(00)3',
[337958123, 337958126], '(00)3',
[342331982, 342331985], '3(00)',
[342676068, 342676071], '3(00)',
[347063781, 347063784], '3(00)',
[347697348, 347697351], '3(00)',
[347954319, 347954322], '3(00)',
[348162775, 348162778], '3(00)',
[349210702, 349210705], '(00)3',
[349212913, 349212916], '3(00)',
[349248650, 349248653], '(00)3',
[349913500, 349913503], '3(00)',
[350891529, 350891532], '3(00)',
[351089323, 351089326], '3(00)',
[351826158, 351826161], '3(00)',
[352228580, 352228583], '(00)3',
[352376244, 352376247], '3(00)',
[352853758, 352853761], '(00)3',
[355110439, 355110442], '(00)3',
[355808090, 355808094], '(00)40',
[355941556, 355941559], '3(00)',
[356360231, 356360234], '(00)3',
[356586657, 356586660], '3(00)',
[356892926, 356892929], '(00)3',
[356908232, 356908235], '3(00)',
[357912730, 357912733], '3(00)',
[358120344, 358120347], '3(00)',
[359044096, 359044099], '(00)3',
[360819357, 360819360], '3(00)',
[361399662, 361399666], '(010)3',
[362361315, 362361318], '(00)3',
[363610112, 363610115], '(00)3',
[363964804, 363964807], '3(00)',
[364527375, 364527378], '(00)3',
[365090327, 365090330], '(00)3',
[365414539, 365414542], '3(00)',
[366738474, 366738477], '3(00)',
[368714778, 368714783], '04(010)',
[368831545, 368831548], '(00)3',
[368902387, 368902390], '(00)3',
[370109769, 370109772], '3(00)',
[370963333, 370963336], '3(00)',
[372541136, 372541140], '3(010)',
[372681562, 372681565], '(00)3',
[373009410, 373009413], '(00)3',
[373458970, 373458973], '3(00)',
[375648658, 375648661], '3(00)',
[376834728, 376834731], '3(00)',
[377119945, 377119948], '(00)3',
[377335703, 377335706], '(00)3',
[378091745, 378091748], '3(00)',
[379139522, 379139525], '3(00)',
[380279160, 380279163], '(00)3',
[380619442, 380619445], '3(00)',
[381244231, 381244234], '3(00)',
[382327446, 382327450], '(010)3',
[382357073, 382357076], '3(00)',
[383545479, 383545482], '3(00)',
[384363766, 384363769], '(00)3',
[384401786, 384401790], '22(00)',
[385198212, 385198215], '3(00)',
[385824476, 385824479], '(00)3',
[385908194, 385908197], '3(00)',
[386946806, 386946809], '3(00)',
[387592175, 387592179], '22(00)',
[388329293, 388329296], '(00)3',
[388679566, 388679569], '3(00)',
[388832142, 388832145], '3(00)',
[390087103, 390087106], '(00)3',
[390190926, 390190930], '(00)22',
[390331207, 390331210], '3(00)',
[391674495, 391674498], '3(00)',
[391937831, 391937834], '3(00)',
[391951632, 391951636], '(00)22',
[392963986, 392963989], '(00)3',
[393007921, 393007924], '3(00)',
[393373210, 393373213], '3(00)',
[393759572, 393759575], '(00)3',
[394036662, 394036665], '(00)3',
[395813866, 395813869], '(00)3',
[395956690, 395956693], '3(00)',
[396031670, 396031673], '3(00)',
[397076433, 397076436], '3(00)',
[397470601, 397470604], '3(00)',
[398289458, 398289461], '3(00)',
#
[368714778, 368714783], '04(010)',
[437953499, 437953504], '04(010)',
[526196233, 526196238], '032(00)',
[744719566, 744719571], '(010)40',
[750375857, 750375862], '032(00)',
[958241932, 958241937], '04(010)',
[983377342, 983377347], '(00)410',
[1003780080, 1003780085], '04(010)',
[1070232754, 1070232759], '(00)230',
[1209834865, 1209834870], '032(00)',
[1257209100, 1257209105], '(00)410',
[1368002233, 1368002238], '(00)230'
]
|
# coding: utf-8
"""Compatibility tricks for Python 3. Mainly to do with unicode."""
import functools
import os
import sys
import re
import shutil
import types
from .encoding import DEFAULT_ENCODING
def no_code(x, encoding=None):
return x
def decode(s, encoding=None):
encoding = encoding or DEFAULT_ENCODING
return s.decode(encoding, "replace")
def encode(u, encoding=None):
encoding = encoding or DEFAULT_ENCODING
return u.encode(encoding, "replace")
def cast_unicode(s, encoding=None):
if isinstance(s, bytes):
return decode(s, encoding)
return s
def cast_bytes(s, encoding=None):
if not isinstance(s, bytes):
return encode(s, encoding)
return s
def buffer_to_bytes(buf):
"""Cast a buffer or memoryview object to bytes"""
if isinstance(buf, memoryview):
return buf.tobytes()
if not isinstance(buf, bytes):
return bytes(buf)
return buf
def _modify_str_or_docstring(str_change_func):
@functools.wraps(str_change_func)
def wrapper(func_or_str):
if isinstance(func_or_str, string_types):
func = None
doc = func_or_str
else:
func = func_or_str
doc = func.__doc__
doc = str_change_func(doc)
if func:
func.__doc__ = doc
return func
return doc
return wrapper
def safe_unicode(e):
"""unicode(e) with various fallbacks. Used for exceptions, which may not be
safe to call unicode() on.
"""
try:
return unicode_type(e)
except UnicodeError:
pass
try:
return str_to_unicode(str(e))
except UnicodeError:
pass
try:
return str_to_unicode(repr(e))
except UnicodeError:
pass
return u'Unrecoverably corrupt evalue'
# shutil.which from Python 3.4
def _shutil_which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
This is a backport of shutil.which from Python 3.4
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
import platform
if sys.version_info[0] >= 3 or platform.python_implementation() == 'IronPython':
str_to_unicode = no_code
unicode_to_str = no_code
str_to_bytes = encode
bytes_to_str = decode
cast_bytes_py2 = no_code
cast_unicode_py2 = no_code
buffer_to_bytes_py2 = no_code
string_types = (str,)
unicode_type = str
else:
str_to_unicode = decode
unicode_to_str = encode
str_to_bytes = no_code
bytes_to_str = no_code
cast_bytes_py2 = cast_bytes
cast_unicode_py2 = cast_unicode
buffer_to_bytes_py2 = buffer_to_bytes
string_types = (str, unicode)
unicode_type = unicode
if sys.version_info[0] >= 3:
PY3 = True
# keep reference to builtin_mod because the kernel overrides that value
# to forward requests to a frontend.
def input(prompt=''):
return builtin_mod.input(prompt)
builtin_mod_name = "builtins"
import builtins as builtin_mod
which = shutil.which
def isidentifier(s, dotted=False):
if dotted:
return all(isidentifier(a) for a in s.split("."))
return s.isidentifier()
xrange = range
def iteritems(d): return iter(d.items())
def itervalues(d): return iter(d.values())
getcwd = os.getcwd
MethodType = types.MethodType
def execfile(fname, glob, loc=None, compiler=None):
loc = loc if (loc is not None) else glob
with open(fname, 'rb') as f:
compiler = compiler or compile
exec(compiler(f.read(), fname, 'exec'), glob, loc)
# Refactor print statements in doctests.
_print_statement_re = re.compile(r"\bprint (?P<expr>.*)$", re.MULTILINE)
def _print_statement_sub(match):
expr = match.groups('expr')
return "print(%s)" % expr
@_modify_str_or_docstring
def doctest_refactor_print(doc):
"""Refactor 'print x' statements in a doctest to print(x) style. 2to3
unfortunately doesn't pick up on our doctests.
Can accept a string or a function, so it can be used as a decorator."""
return _print_statement_re.sub(_print_statement_sub, doc)
# Abstract u'abc' syntax:
@_modify_str_or_docstring
def u_format(s):
""""{u}'abc'" --> "'abc'" (Python 3)
Accepts a string or a function, so it can be used as a decorator."""
return s.format(u='')
def get_closure(f):
"""Get a function's closure attribute"""
return f.__closure__
else:
PY3 = False
# keep reference to builtin_mod because the kernel overrides that value
# to forward requests to a frontend.
def input(prompt=''):
return builtin_mod.raw_input(prompt)
builtin_mod_name = "__builtin__"
import __builtin__ as builtin_mod
import re
_name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$")
def isidentifier(s, dotted=False):
if dotted:
return all(isidentifier(a) for a in s.split("."))
return bool(_name_re.match(s))
xrange = xrange
def iteritems(d): return d.iteritems()
def itervalues(d): return d.itervalues()
getcwd = os.getcwdu
def MethodType(func, instance):
return types.MethodType(func, instance, type(instance))
def doctest_refactor_print(func_or_str):
return func_or_str
def get_closure(f):
"""Get a function's closure attribute"""
return f.func_closure
which = _shutil_which
# Abstract u'abc' syntax:
@_modify_str_or_docstring
def u_format(s):
""""{u}'abc'" --> "u'abc'" (Python 2)
Accepts a string or a function, so it can be used as a decorator."""
return s.format(u='u')
if sys.platform == 'win32':
def execfile(fname, glob=None, loc=None, compiler=None):
loc = loc if (loc is not None) else glob
scripttext = builtin_mod.open(fname).read()+ '\n'
# compile converts unicode filename to str assuming
# ascii. Let's do the conversion before calling compile
if isinstance(fname, unicode):
filename = unicode_to_str(fname)
else:
filename = fname
compiler = compiler or compile
exec(compiler(scripttext, filename, 'exec'), glob, loc)
else:
def execfile(fname, glob=None, loc=None, compiler=None):
if isinstance(fname, unicode):
filename = fname.encode(sys.getfilesystemencoding())
else:
filename = fname
where = [ns for ns in [glob, loc] if ns is not None]
if compiler is None:
builtin_mod.execfile(filename, *where)
else:
scripttext = builtin_mod.open(fname).read().rstrip() + '\n'
exec(compiler(scripttext, filename, 'exec'), glob, loc)
def annotate(**kwargs):
"""Python 3 compatible function annotation for Python 2."""
if not kwargs:
raise ValueError('annotations must be provided as keyword arguments')
def dec(f):
if hasattr(f, '__annotations__'):
for k, v in kwargs.items():
f.__annotations__[k] = v
else:
f.__annotations__ = kwargs
return f
return dec
# Parts below taken from six:
# Copyright (c) 2010-2013 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
return meta("_NewBase", bases, {})
|
"""
Enums representing sets of strings that Matplotlib uses as input parameters.
Matplotlib often uses simple data types like strings or tuples to define a
concept; e.g. the line capstyle can be specified as one of 'butt', 'round',
or 'projecting'. The classes in this module are used internally and serve to
document these concepts formally.
As an end-user you will not use these classes directly, but only the values
they define.
"""
from enum import Enum, auto
from matplotlib import _docstring
class _AutoStringNameEnum(Enum):
"""Automate the ``name = 'name'`` part of making a (str, Enum)."""
def _generate_next_value_(name, start, count, last_values):
return name
def __hash__(self):
return str(self).__hash__()
class JoinStyle(str, _AutoStringNameEnum):
"""
Define how the connection between two line segments is drawn.
For a visual impression of each *JoinStyle*, `view these docs online
<JoinStyle>`, or run `JoinStyle.demo`.
Lines in Matplotlib are typically defined by a 1D `~.path.Path` and a
finite ``linewidth``, where the underlying 1D `~.path.Path` represents the
center of the stroked line.
By default, `~.backend_bases.GraphicsContextBase` defines the boundaries of
a stroked line to simply be every point within some radius,
``linewidth/2``, away from any point of the center line. However, this
results in corners appearing "rounded", which may not be the desired
behavior if you are drawing, for example, a polygon or pointed star.
**Supported values:**
.. rst-class:: value-list
'miter'
the "arrow-tip" style. Each boundary of the filled-in area will
extend in a straight line parallel to the tangent vector of the
centerline at the point it meets the corner, until they meet in a
sharp point.
'round'
stokes every point within a radius of ``linewidth/2`` of the center
lines.
'bevel'
the "squared-off" style. It can be thought of as a rounded corner
where the "circular" part of the corner has been cut off.
.. note::
Very long miter tips are cut off (to form a *bevel*) after a
backend-dependent limit called the "miter limit", which specifies the
maximum allowed ratio of miter length to line width. For example, the
PDF backend uses the default value of 10 specified by the PDF standard,
while the SVG backend does not even specify the miter limit, resulting
in a default value of 4 per the SVG specification. Matplotlib does not
currently allow the user to adjust this parameter.
A more detailed description of the effect of a miter limit can be found
in the `Mozilla Developer Docs
<https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/stroke-miterlimit>`_
.. plot::
:alt: Demo of possible JoinStyle's
from matplotlib._enums import JoinStyle
JoinStyle.demo()
"""
miter = auto()
round = auto()
bevel = auto()
@staticmethod
def demo():
"""Demonstrate how each JoinStyle looks for various join angles."""
import numpy as np
import matplotlib.pyplot as plt
def plot_angle(ax, x, y, angle, style):
phi = np.radians(angle)
xx = [x + .5, x, x + .5*np.cos(phi)]
yy = [y, y, y + .5*np.sin(phi)]
ax.plot(xx, yy, lw=12, color='tab:blue', solid_joinstyle=style)
ax.plot(xx, yy, lw=1, color='black')
ax.plot(xx[1], yy[1], 'o', color='tab:red', markersize=3)
fig, ax = plt.subplots(figsize=(5, 4), constrained_layout=True)
ax.set_title('Join style')
for x, style in enumerate(['miter', 'round', 'bevel']):
ax.text(x, 5, style)
for y, angle in enumerate([20, 45, 60, 90, 120]):
plot_angle(ax, x, y, angle, style)
if x == 0:
ax.text(-1.3, y, f'{angle} degrees')
ax.set_xlim(-1.5, 2.75)
ax.set_ylim(-.5, 5.5)
ax.set_axis_off()
fig.show()
JoinStyle.input_description = "{" \
+ ", ".join([f"'{js.name}'" for js in JoinStyle]) \
+ "}"
class CapStyle(str, _AutoStringNameEnum):
r"""
Define how the two endpoints (caps) of an unclosed line are drawn.
How to draw the start and end points of lines that represent a closed curve
(i.e. that end in a `~.path.Path.CLOSEPOLY`) is controlled by the line's
`JoinStyle`. For all other lines, how the start and end points are drawn is
controlled by the *CapStyle*.
For a visual impression of each *CapStyle*, `view these docs online
<CapStyle>` or run `CapStyle.demo`.
By default, `~.backend_bases.GraphicsContextBase` draws a stroked line as
squared off at its endpoints.
**Supported values:**
.. rst-class:: value-list
'butt'
the line is squared off at its endpoint.
'projecting'
the line is squared off as in *butt*, but the filled in area
extends beyond the endpoint a distance of ``linewidth/2``.
'round'
like *butt*, but a semicircular cap is added to the end of the
line, of radius ``linewidth/2``.
.. plot::
:alt: Demo of possible CapStyle's
from matplotlib._enums import CapStyle
CapStyle.demo()
"""
butt = auto()
projecting = auto()
round = auto()
@staticmethod
def demo():
"""Demonstrate how each CapStyle looks for a thick line segment."""
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(4, 1.2))
ax = fig.add_axes([0, 0, 1, 0.8])
ax.set_title('Cap style')
for x, style in enumerate(['butt', 'round', 'projecting']):
ax.text(x+0.25, 0.85, style, ha='center')
xx = [x, x+0.5]
yy = [0, 0]
ax.plot(xx, yy, lw=12, color='tab:blue', solid_capstyle=style)
ax.plot(xx, yy, lw=1, color='black')
ax.plot(xx, yy, 'o', color='tab:red', markersize=3)
ax.set_ylim(-.5, 1.5)
ax.set_axis_off()
fig.show()
CapStyle.input_description = "{" \
+ ", ".join([f"'{cs.name}'" for cs in CapStyle]) \
+ "}"
_docstring.interpd.update({'JoinStyle': JoinStyle.input_description,
'CapStyle': CapStyle.input_description})
|
"""
A module for converting numbers or color arguments to *RGB* or *RGBA*.
*RGB* and *RGBA* are sequences of, respectively, 3 or 4 floats in the
range 0-1.
This module includes functions and classes for color specification conversions,
and for mapping numbers to colors in a 1-D array of colors called a colormap.
Mapping data onto colors using a colormap typically involves two steps: a data
array is first mapped onto the range 0-1 using a subclass of `Normalize`,
then this number is mapped to a color using a subclass of `Colormap`. Two
subclasses of `Colormap` provided here: `LinearSegmentedColormap`, which uses
piecewise-linear interpolation to define colormaps, and `ListedColormap`, which
makes a colormap from a list of colors.
.. seealso::
:doc:`/tutorials/colors/colormap-manipulation` for examples of how to
make colormaps and
:doc:`/tutorials/colors/colormaps` for a list of built-in colormaps.
:doc:`/tutorials/colors/colormapnorms` for more details about data
normalization
More colormaps are available at palettable_.
The module also provides functions for checking whether an object can be
interpreted as a color (`is_color_like`), for converting such an object
to an RGBA tuple (`to_rgba`) or to an HTML-like hex string in the
"#rrggbb" format (`to_hex`), and a sequence of colors to an (n, 4)
RGBA array (`to_rgba_array`). Caching is used for efficiency.
Colors that Matplotlib recognizes are listed at
:doc:`/tutorials/colors/colors`.
.. _palettable: https://jiffyclub.github.io/palettable/
.. _xkcd color survey: https://xkcd.com/color/rgb/
"""
import base64
from collections.abc import Sized, Sequence, Mapping
import functools
import importlib
import inspect
import io
import itertools
from numbers import Number
import re
from PIL import Image
from PIL.PngImagePlugin import PngInfo
import matplotlib as mpl
import numpy as np
from matplotlib import _api, _cm, cbook, scale
from ._color_data import BASE_COLORS, TABLEAU_COLORS, CSS4_COLORS, XKCD_COLORS
class _ColorMapping(dict):
def __init__(self, mapping):
super().__init__(mapping)
self.cache = {}
def __setitem__(self, key, value):
super().__setitem__(key, value)
self.cache.clear()
def __delitem__(self, key):
super().__delitem__(key)
self.cache.clear()
_colors_full_map = {}
# Set by reverse priority order.
_colors_full_map.update(XKCD_COLORS)
_colors_full_map.update({k.replace('grey', 'gray'): v
for k, v in XKCD_COLORS.items()
if 'grey' in k})
_colors_full_map.update(CSS4_COLORS)
_colors_full_map.update(TABLEAU_COLORS)
_colors_full_map.update({k.replace('gray', 'grey'): v
for k, v in TABLEAU_COLORS.items()
if 'gray' in k})
_colors_full_map.update(BASE_COLORS)
_colors_full_map = _ColorMapping(_colors_full_map)
_REPR_PNG_SIZE = (512, 64)
def get_named_colors_mapping():
"""Return the global mapping of names to named colors."""
return _colors_full_map
class ColorSequenceRegistry(Mapping):
r"""
Container for sequences of colors that are known to Matplotlib by name.
The universal registry instance is `matplotlib.color_sequences`. There
should be no need for users to instantiate `.ColorSequenceRegistry`
themselves.
Read access uses a dict-like interface mapping names to lists of colors::
import matplotlib as mpl
cmap = mpl.color_sequences['tab10']
The returned lists are copies, so that their modification does not change
the global definition of the color sequence.
Additional color sequences can be added via
`.ColorSequenceRegistry.register`::
mpl.color_sequences.register('rgb', ['r', 'g', 'b'])
"""
_BUILTIN_COLOR_SEQUENCES = {
'tab10': _cm._tab10_data,
'tab20': _cm._tab20_data,
'tab20b': _cm._tab20b_data,
'tab20c': _cm._tab20c_data,
'Pastel1': _cm._Pastel1_data,
'Pastel2': _cm._Pastel2_data,
'Paired': _cm._Paired_data,
'Accent': _cm._Accent_data,
'Dark2': _cm._Dark2_data,
'Set1': _cm._Set1_data,
'Set2': _cm._Set1_data,
'Set3': _cm._Set1_data,
}
def __init__(self):
self._color_sequences = {**self._BUILTIN_COLOR_SEQUENCES}
def __getitem__(self, item):
try:
return list(self._color_sequences[item])
except KeyError:
raise KeyError(f"{item!r} is not a known color sequence name")
def __iter__(self):
return iter(self._color_sequences)
def __len__(self):
return len(self._color_sequences)
def __str__(self):
return ('ColorSequenceRegistry; available colormaps:\n' +
', '.join(f"'{name}'" for name in self))
def register(self, name, color_list):
"""
Register a new color sequence.
The color sequence registry stores a copy of the given *color_list*, so
that future changes to the original list do not affect the registered
color sequence. Think of this as the registry taking a snapshot
of *color_list* at registration.
Parameters
----------
name : str
The name for the color sequence.
color_list : list of colors
An iterable returning valid Matplotlib colors when iterating over.
Note however that the returned color sequence will always be a
list regardless of the input type.
"""
if name in self._BUILTIN_COLOR_SEQUENCES:
raise ValueError(f"{name!r} is a reserved name for a builtin "
"color sequence")
color_list = list(color_list) # force copy and coerce type to list
for color in color_list:
try:
to_rgba(color)
except ValueError:
raise ValueError(
f"{color!r} is not a valid color specification")
self._color_sequences[name] = color_list
def unregister(self, name):
"""
Remove a sequence from the registry.
You cannot remove built-in color sequences.
If the name is not registered, returns with no error.
"""
if name in self._BUILTIN_COLOR_SEQUENCES:
raise ValueError(
f"Cannot unregister builtin color sequence {name!r}")
self._color_sequences.pop(name, None)
_color_sequences = ColorSequenceRegistry()
def _sanitize_extrema(ex):
if ex is None:
return ex
try:
ret = ex.item()
except AttributeError:
ret = float(ex)
return ret
def _is_nth_color(c):
"""Return whether *c* can be interpreted as an item in the color cycle."""
return isinstance(c, str) and re.match(r"\AC[0-9]+\Z", c)
def is_color_like(c):
"""Return whether *c* can be interpreted as an RGB(A) color."""
# Special-case nth color syntax because it cannot be parsed during setup.
if _is_nth_color(c):
return True
try:
to_rgba(c)
except ValueError:
return False
else:
return True
def _has_alpha_channel(c):
"""Return whether *c* is a color with an alpha channel."""
# 4-element sequences are interpreted as r, g, b, a
return not isinstance(c, str) and len(c) == 4
def _check_color_like(**kwargs):
"""
For each *key, value* pair in *kwargs*, check that *value* is color-like.
"""
for k, v in kwargs.items():
if not is_color_like(v):
raise ValueError(f"{v!r} is not a valid value for {k}")
def same_color(c1, c2):
"""
Return whether the colors *c1* and *c2* are the same.
*c1*, *c2* can be single colors or lists/arrays of colors.
"""
c1 = to_rgba_array(c1)
c2 = to_rgba_array(c2)
n1 = max(c1.shape[0], 1) # 'none' results in shape (0, 4), but is 1-elem
n2 = max(c2.shape[0], 1) # 'none' results in shape (0, 4), but is 1-elem
if n1 != n2:
raise ValueError('Different number of elements passed.')
# The following shape test is needed to correctly handle comparisons with
# 'none', which results in a shape (0, 4) array and thus cannot be tested
# via value comparison.
return c1.shape == c2.shape and (c1 == c2).all()
def to_rgba(c, alpha=None):
"""
Convert *c* to an RGBA color.
Parameters
----------
c : Matplotlib color or ``np.ma.masked``
alpha : float, optional
If *alpha* is given, force the alpha value of the returned RGBA tuple
to *alpha*.
If None, the alpha value from *c* is used. If *c* does not have an
alpha channel, then alpha defaults to 1.
*alpha* is ignored for the color value ``"none"`` (case-insensitive),
which always maps to ``(0, 0, 0, 0)``.
Returns
-------
tuple
Tuple of floats ``(r, g, b, a)``, where each channel (red, green, blue,
alpha) can assume values between 0 and 1.
"""
# Special-case nth color syntax because it should not be cached.
if _is_nth_color(c):
prop_cycler = mpl.rcParams['axes.prop_cycle']
colors = prop_cycler.by_key().get('color', ['k'])
c = colors[int(c[1:]) % len(colors)]
try:
rgba = _colors_full_map.cache[c, alpha]
except (KeyError, TypeError): # Not in cache, or unhashable.
rgba = None
if rgba is None: # Suppress exception chaining of cache lookup failure.
rgba = _to_rgba_no_colorcycle(c, alpha)
try:
_colors_full_map.cache[c, alpha] = rgba
except TypeError:
pass
return rgba
def _to_rgba_no_colorcycle(c, alpha=None):
"""
Convert *c* to an RGBA color, with no support for color-cycle syntax.
If *alpha* is given, force the alpha value of the returned RGBA tuple
to *alpha*. Otherwise, the alpha value from *c* is used, if it has alpha
information, or defaults to 1.
*alpha* is ignored for the color value ``"none"`` (case-insensitive),
which always maps to ``(0, 0, 0, 0)``.
"""
orig_c = c
if c is np.ma.masked:
return (0., 0., 0., 0.)
if isinstance(c, str):
if c.lower() == "none":
return (0., 0., 0., 0.)
# Named color.
try:
# This may turn c into a non-string, so we check again below.
c = _colors_full_map[c]
except KeyError:
if len(orig_c) != 1:
try:
c = _colors_full_map[c.lower()]
except KeyError:
pass
if isinstance(c, str):
# hex color in #rrggbb format.
match = re.match(r"\A#[a-fA-F0-9]{6}\Z", c)
if match:
return (tuple(int(n, 16) / 255
for n in [c[1:3], c[3:5], c[5:7]])
+ (alpha if alpha is not None else 1.,))
# hex color in #rgb format, shorthand for #rrggbb.
match = re.match(r"\A#[a-fA-F0-9]{3}\Z", c)
if match:
return (tuple(int(n, 16) / 255
for n in [c[1]*2, c[2]*2, c[3]*2])
+ (alpha if alpha is not None else 1.,))
# hex color with alpha in #rrggbbaa format.
match = re.match(r"\A#[a-fA-F0-9]{8}\Z", c)
if match:
color = [int(n, 16) / 255
for n in [c[1:3], c[3:5], c[5:7], c[7:9]]]
if alpha is not None:
color[-1] = alpha
return tuple(color)
# hex color with alpha in #rgba format, shorthand for #rrggbbaa.
match = re.match(r"\A#[a-fA-F0-9]{4}\Z", c)
if match:
color = [int(n, 16) / 255
for n in [c[1]*2, c[2]*2, c[3]*2, c[4]*2]]
if alpha is not None:
color[-1] = alpha
return tuple(color)
# string gray.
try:
c = float(c)
except ValueError:
pass
else:
if not (0 <= c <= 1):
raise ValueError(
f"Invalid string grayscale value {orig_c!r}. "
f"Value must be within 0-1 range")
return c, c, c, alpha if alpha is not None else 1.
raise ValueError(f"Invalid RGBA argument: {orig_c!r}")
# turn 2-D array into 1-D array
if isinstance(c, np.ndarray):
if c.ndim == 2 and c.shape[0] == 1:
c = c.reshape(-1)
# tuple color.
if not np.iterable(c):
raise ValueError(f"Invalid RGBA argument: {orig_c!r}")
if len(c) not in [3, 4]:
raise ValueError("RGBA sequence should have length 3 or 4")
if not all(isinstance(x, Number) for x in c):
# Checks that don't work: `map(float, ...)`, `np.array(..., float)` and
# `np.array(...).astype(float)` would all convert "0.5" to 0.5.
raise ValueError(f"Invalid RGBA argument: {orig_c!r}")
# Return a tuple to prevent the cached value from being modified.
c = tuple(map(float, c))
if len(c) == 3 and alpha is None:
alpha = 1
if alpha is not None:
c = c[:3] + (alpha,)
if any(elem < 0 or elem > 1 for elem in c):
raise ValueError("RGBA values should be within 0-1 range")
return c
def to_rgba_array(c, alpha=None):
"""
Convert *c* to a (n, 4) array of RGBA colors.
Parameters
----------
c : Matplotlib color or array of colors
If *c* is a masked array, an `~numpy.ndarray` is returned with a
(0, 0, 0, 0) row for each masked value or row in *c*.
alpha : float or sequence of floats, optional
If *alpha* is given, force the alpha value of the returned RGBA tuple
to *alpha*.
If None, the alpha value from *c* is used. If *c* does not have an
alpha channel, then alpha defaults to 1.
*alpha* is ignored for the color value ``"none"`` (case-insensitive),
which always maps to ``(0, 0, 0, 0)``.
If *alpha* is a sequence and *c* is a single color, *c* will be
repeated to match the length of *alpha*.
Returns
-------
array
(n, 4) array of RGBA colors, where each channel (red, green, blue,
alpha) can assume values between 0 and 1.
"""
# Special-case inputs that are already arrays, for performance. (If the
# array has the wrong kind or shape, raise the error during one-at-a-time
# conversion.)
if np.iterable(alpha):
alpha = np.asarray(alpha).ravel()
if (isinstance(c, np.ndarray) and c.dtype.kind in "if"
and c.ndim == 2 and c.shape[1] in [3, 4]):
mask = c.mask.any(axis=1) if np.ma.is_masked(c) else None
c = np.ma.getdata(c)
if np.iterable(alpha):
if c.shape[0] == 1 and alpha.shape[0] > 1:
c = np.tile(c, (alpha.shape[0], 1))
elif c.shape[0] != alpha.shape[0]:
raise ValueError("The number of colors must match the number"
" of alpha values if there are more than one"
" of each.")
if c.shape[1] == 3:
result = np.column_stack([c, np.zeros(len(c))])
result[:, -1] = alpha if alpha is not None else 1.
elif c.shape[1] == 4:
result = c.copy()
if alpha is not None:
result[:, -1] = alpha
if mask is not None:
result[mask] = 0
if np.any((result < 0) | (result > 1)):
raise ValueError("RGBA values should be within 0-1 range")
return result
# Handle single values.
# Note that this occurs *after* handling inputs that are already arrays, as
# `to_rgba(c, alpha)` (below) is expensive for such inputs, due to the need
# to format the array in the ValueError message(!).
if cbook._str_lower_equal(c, "none"):
return np.zeros((0, 4), float)
try:
if np.iterable(alpha):
return np.array([to_rgba(c, a) for a in alpha], float)
else:
return np.array([to_rgba(c, alpha)], float)
except (ValueError, TypeError):
pass
if isinstance(c, str):
raise ValueError(f"{c!r} is not a valid color value.")
if len(c) == 0:
return np.zeros((0, 4), float)
# Quick path if the whole sequence can be directly converted to a numpy
# array in one shot.
if isinstance(c, Sequence):
lens = {len(cc) if isinstance(cc, (list, tuple)) else -1 for cc in c}
if lens == {3}:
rgba = np.column_stack([c, np.ones(len(c))])
elif lens == {4}:
rgba = np.array(c)
else:
rgba = np.array([to_rgba(cc) for cc in c])
else:
rgba = np.array([to_rgba(cc) for cc in c])
if alpha is not None:
rgba[:, 3] = alpha
return rgba
def to_rgb(c):
"""Convert *c* to an RGB color, silently dropping the alpha channel."""
return to_rgba(c)[:3]
def to_hex(c, keep_alpha=False):
"""
Convert *c* to a hex color.
Parameters
----------
c : :doc:`color </tutorials/colors/colors>` or `numpy.ma.masked`
keep_alpha : bool, default: False
If False, use the ``#rrggbb`` format, otherwise use ``#rrggbbaa``.
Returns
-------
str
``#rrggbb`` or ``#rrggbbaa`` hex color string
"""
c = to_rgba(c)
if not keep_alpha:
c = c[:3]
return "#" + "".join(format(round(val * 255), "02x") for val in c)
### Backwards-compatible color-conversion API
cnames = CSS4_COLORS
hexColorPattern = re.compile(r"\A#[a-fA-F0-9]{6}\Z")
rgb2hex = to_hex
hex2color = to_rgb
class ColorConverter:
"""
A class only kept for backwards compatibility.
Its functionality is entirely provided by module-level functions.
"""
colors = _colors_full_map
cache = _colors_full_map.cache
to_rgb = staticmethod(to_rgb)
to_rgba = staticmethod(to_rgba)
to_rgba_array = staticmethod(to_rgba_array)
colorConverter = ColorConverter()
### End of backwards-compatible color-conversion API
def _create_lookup_table(N, data, gamma=1.0):
r"""
Create an *N* -element 1D lookup table.
This assumes a mapping :math:`f : [0, 1] \rightarrow [0, 1]`. The returned
data is an array of N values :math:`y = f(x)` where x is sampled from
[0, 1].
By default (*gamma* = 1) x is equidistantly sampled from [0, 1]. The
*gamma* correction factor :math:`\gamma` distorts this equidistant
sampling by :math:`x \rightarrow x^\gamma`.
Parameters
----------
N : int
The number of elements of the created lookup table; at least 1.
data : (M, 3) array-like or callable
Defines the mapping :math:`f`.
If a (M, 3) array-like, the rows define values (x, y0, y1). The x
values must start with x=0, end with x=1, and all x values be in
increasing order.
A value between :math:`x_i` and :math:`x_{i+1}` is mapped to the range
:math:`y^1_{i-1} \ldots y^0_i` by linear interpolation.
For the simple case of a y-continuous mapping, y0 and y1 are identical.
The two values of y are to allow for discontinuous mapping functions.
E.g. a sawtooth with a period of 0.2 and an amplitude of 1 would be::
[(0, 1, 0), (0.2, 1, 0), (0.4, 1, 0), ..., [(1, 1, 0)]
In the special case of ``N == 1``, by convention the returned value
is y0 for x == 1.
If *data* is a callable, it must accept and return numpy arrays::
data(x : ndarray) -> ndarray
and map values between 0 - 1 to 0 - 1.
gamma : float
Gamma correction factor for input distribution x of the mapping.
See also https://en.wikipedia.org/wiki/Gamma_correction.
Returns
-------
array
The lookup table where ``lut[x * (N-1)]`` gives the closest value
for values of x between 0 and 1.
Notes
-----
This function is internally used for `.LinearSegmentedColormap`.
"""
if callable(data):
xind = np.linspace(0, 1, N) ** gamma
lut = np.clip(np.array(data(xind), dtype=float), 0, 1)
return lut
try:
adata = np.array(data)
except Exception as err:
raise TypeError("data must be convertible to an array") from err
_api.check_shape((None, 3), data=adata)
x = adata[:, 0]
y0 = adata[:, 1]
y1 = adata[:, 2]
if x[0] != 0. or x[-1] != 1.0:
raise ValueError(
"data mapping points must start with x=0 and end with x=1")
if (np.diff(x) < 0).any():
raise ValueError("data mapping points must have x in increasing order")
# begin generation of lookup table
if N == 1:
# convention: use the y = f(x=1) value for a 1-element lookup table
lut = np.array(y0[-1])
else:
x = x * (N - 1)
xind = (N - 1) * np.linspace(0, 1, N) ** gamma
ind = np.searchsorted(x, xind)[1:-1]
distance = (xind[1:-1] - x[ind - 1]) / (x[ind] - x[ind - 1])
lut = np.concatenate([
[y1[0]],
distance * (y0[ind] - y1[ind - 1]) + y1[ind - 1],
[y0[-1]],
])
# ensure that the lut is confined to values between 0 and 1 by clipping it
return np.clip(lut, 0.0, 1.0)
class Colormap:
"""
Baseclass for all scalar to RGBA mappings.
Typically, Colormap instances are used to convert data values (floats)
from the interval ``[0, 1]`` to the RGBA color that the respective
Colormap represents. For scaling of data into the ``[0, 1]`` interval see
`matplotlib.colors.Normalize`. Subclasses of `matplotlib.cm.ScalarMappable`
make heavy use of this ``data -> normalize -> map-to-color`` processing
chain.
"""
def __init__(self, name, N=256):
"""
Parameters
----------
name : str
The name of the colormap.
N : int
The number of RGB quantization levels.
"""
self.name = name
self.N = int(N) # ensure that N is always int
self._rgba_bad = (0.0, 0.0, 0.0, 0.0) # If bad, don't paint anything.
self._rgba_under = None
self._rgba_over = None
self._i_under = self.N
self._i_over = self.N + 1
self._i_bad = self.N + 2
self._isinit = False
#: When this colormap exists on a scalar mappable and colorbar_extend
#: is not False, colorbar creation will pick up ``colorbar_extend`` as
#: the default value for the ``extend`` keyword in the
#: `matplotlib.colorbar.Colorbar` constructor.
self.colorbar_extend = False
def __call__(self, X, alpha=None, bytes=False):
"""
Parameters
----------
X : float or int, `~numpy.ndarray` or scalar
The data value(s) to convert to RGBA.
For floats, *X* should be in the interval ``[0.0, 1.0]`` to
return the RGBA values ``X*100`` percent along the Colormap line.
For integers, *X* should be in the interval ``[0, Colormap.N)`` to
return RGBA values *indexed* from the Colormap with index ``X``.
alpha : float or array-like or None
Alpha must be a scalar between 0 and 1, a sequence of such
floats with shape matching X, or None.
bytes : bool
If False (default), the returned RGBA values will be floats in the
interval ``[0, 1]`` otherwise they will be uint8s in the interval
``[0, 255]``.
Returns
-------
Tuple of RGBA values if X is scalar, otherwise an array of
RGBA values with a shape of ``X.shape + (4, )``.
"""
if not self._isinit:
self._init()
# Take the bad mask from a masked array, or in all other cases defer
# np.isnan() to after we have converted to an array.
mask_bad = X.mask if np.ma.is_masked(X) else None
xa = np.array(X, copy=True)
if mask_bad is None:
mask_bad = np.isnan(xa)
if not xa.dtype.isnative:
xa = xa.byteswap().newbyteorder() # Native byteorder is faster.
if xa.dtype.kind == "f":
xa *= self.N
# Negative values are out of range, but astype(int) would
# truncate them towards zero.
xa[xa < 0] = -1
# xa == 1 (== N after multiplication) is not out of range.
xa[xa == self.N] = self.N - 1
# Avoid converting large positive values to negative integers.
np.clip(xa, -1, self.N, out=xa)
with np.errstate(invalid="ignore"):
# We need this cast for unsigned ints as well as floats
xa = xa.astype(int)
# Set the over-range indices before the under-range;
# otherwise the under-range values get converted to over-range.
xa[xa > self.N - 1] = self._i_over
xa[xa < 0] = self._i_under
xa[mask_bad] = self._i_bad
lut = self._lut
if bytes:
lut = (lut * 255).astype(np.uint8)
rgba = lut.take(xa, axis=0, mode='clip')
if alpha is not None:
alpha = np.clip(alpha, 0, 1)
if bytes:
alpha *= 255 # Will be cast to uint8 upon assignment.
if alpha.shape not in [(), xa.shape]:
raise ValueError(
f"alpha is array-like but its shape {alpha.shape} does "
f"not match that of X {xa.shape}")
rgba[..., -1] = alpha
# If the "bad" color is all zeros, then ignore alpha input.
if (lut[-1] == 0).all() and np.any(mask_bad):
if np.iterable(mask_bad) and mask_bad.shape == xa.shape:
rgba[mask_bad] = (0, 0, 0, 0)
else:
rgba[..., :] = (0, 0, 0, 0)
if not np.iterable(X):
rgba = tuple(rgba)
return rgba
def __copy__(self):
cls = self.__class__
cmapobject = cls.__new__(cls)
cmapobject.__dict__.update(self.__dict__)
if self._isinit:
cmapobject._lut = np.copy(self._lut)
return cmapobject
def __eq__(self, other):
if (not isinstance(other, Colormap) or self.name != other.name or
self.colorbar_extend != other.colorbar_extend):
return False
# To compare lookup tables the Colormaps have to be initialized
if not self._isinit:
self._init()
if not other._isinit:
other._init()
return np.array_equal(self._lut, other._lut)
def get_bad(self):
"""Get the color for masked values."""
if not self._isinit:
self._init()
return np.array(self._lut[self._i_bad])
def set_bad(self, color='k', alpha=None):
"""Set the color for masked values."""
self._rgba_bad = to_rgba(color, alpha)
if self._isinit:
self._set_extremes()
def get_under(self):
"""Get the color for low out-of-range values."""
if not self._isinit:
self._init()
return np.array(self._lut[self._i_under])
def set_under(self, color='k', alpha=None):
"""Set the color for low out-of-range values."""
self._rgba_under = to_rgba(color, alpha)
if self._isinit:
self._set_extremes()
def get_over(self):
"""Get the color for high out-of-range values."""
if not self._isinit:
self._init()
return np.array(self._lut[self._i_over])
def set_over(self, color='k', alpha=None):
"""Set the color for high out-of-range values."""
self._rgba_over = to_rgba(color, alpha)
if self._isinit:
self._set_extremes()
def set_extremes(self, *, bad=None, under=None, over=None):
"""
Set the colors for masked (*bad*) values and, when ``norm.clip =
False``, low (*under*) and high (*over*) out-of-range values.
"""
if bad is not None:
self.set_bad(bad)
if under is not None:
self.set_under(under)
if over is not None:
self.set_over(over)
def with_extremes(self, *, bad=None, under=None, over=None):
"""
Return a copy of the colormap, for which the colors for masked (*bad*)
values and, when ``norm.clip = False``, low (*under*) and high (*over*)
out-of-range values, have been set accordingly.
"""
new_cm = self.copy()
new_cm.set_extremes(bad=bad, under=under, over=over)
return new_cm
def _set_extremes(self):
if self._rgba_under:
self._lut[self._i_under] = self._rgba_under
else:
self._lut[self._i_under] = self._lut[0]
if self._rgba_over:
self._lut[self._i_over] = self._rgba_over
else:
self._lut[self._i_over] = self._lut[self.N - 1]
self._lut[self._i_bad] = self._rgba_bad
def _init(self):
"""Generate the lookup table, ``self._lut``."""
raise NotImplementedError("Abstract class only")
def is_gray(self):
"""Return whether the colormap is grayscale."""
if not self._isinit:
self._init()
return (np.all(self._lut[:, 0] == self._lut[:, 1]) and
np.all(self._lut[:, 0] == self._lut[:, 2]))
def resampled(self, lutsize):
"""Return a new colormap with *lutsize* entries."""
if hasattr(self, '_resample'):
_api.warn_external(
"The ability to resample a color map is now public API "
f"However the class {type(self)} still only implements "
"the previous private _resample method. Please update "
"your class."
)
return self._resample(lutsize)
raise NotImplementedError()
def reversed(self, name=None):
"""
Return a reversed instance of the Colormap.
.. note:: This function is not implemented for the base class.
Parameters
----------
name : str, optional
The name for the reversed colormap. If None, the
name is set to ``self.name + "_r"``.
See Also
--------
LinearSegmentedColormap.reversed
ListedColormap.reversed
"""
raise NotImplementedError()
def _repr_png_(self):
"""Generate a PNG representation of the Colormap."""
X = np.tile(np.linspace(0, 1, _REPR_PNG_SIZE[0]),
(_REPR_PNG_SIZE[1], 1))
pixels = self(X, bytes=True)
png_bytes = io.BytesIO()
title = self.name + ' colormap'
author = f'Matplotlib v{mpl.__version__}, https://matplotlib.org'
pnginfo = PngInfo()
pnginfo.add_text('Title', title)
pnginfo.add_text('Description', title)
pnginfo.add_text('Author', author)
pnginfo.add_text('Software', author)
Image.fromarray(pixels).save(png_bytes, format='png', pnginfo=pnginfo)
return png_bytes.getvalue()
def _repr_html_(self):
"""Generate an HTML representation of the Colormap."""
png_bytes = self._repr_png_()
png_base64 = base64.b64encode(png_bytes).decode('ascii')
def color_block(color):
hex_color = to_hex(color, keep_alpha=True)
return (f'<div title="{hex_color}" '
'style="display: inline-block; '
'width: 1em; height: 1em; '
'margin: 0; '
'vertical-align: middle; '
'border: 1px solid #555; '
f'background-color: {hex_color};"></div>')
return ('<div style="vertical-align: middle;">'
f'<strong>{self.name}</strong> '
'</div>'
'<div class="cmap"><img '
f'alt="{self.name} colormap" '
f'title="{self.name}" '
'style="border: 1px solid #555;" '
f'src="data:image/png;base64,{png_base64}"></div>'
'<div style="vertical-align: middle; '
f'max-width: {_REPR_PNG_SIZE[0]+2}px; '
'display: flex; justify-content: space-between;">'
'<div style="float: left;">'
f'{color_block(self.get_under())} under'
'</div>'
'<div style="margin: 0 auto; display: inline-block;">'
f'bad {color_block(self.get_bad())}'
'</div>'
'<div style="float: right;">'
f'over {color_block(self.get_over())}'
'</div>')
def copy(self):
"""Return a copy of the colormap."""
return self.__copy__()
class LinearSegmentedColormap(Colormap):
"""
Colormap objects based on lookup tables using linear segments.
The lookup table is generated using linear interpolation for each
primary color, with the 0-1 domain divided into any number of
segments.
"""
def __init__(self, name, segmentdata, N=256, gamma=1.0):
"""
Create colormap from linear mapping segments
segmentdata argument is a dictionary with a red, green and blue
entries. Each entry should be a list of *x*, *y0*, *y1* tuples,
forming rows in a table. Entries for alpha are optional.
Example: suppose you want red to increase from 0 to 1 over
the bottom half, green to do the same over the middle half,
and blue over the top half. Then you would use::
cdict = {'red': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0)]}
Each row in the table for a given color is a sequence of
*x*, *y0*, *y1* tuples. In each sequence, *x* must increase
monotonically from 0 to 1. For any input value *z* falling
between *x[i]* and *x[i+1]*, the output value of a given color
will be linearly interpolated between *y1[i]* and *y0[i+1]*::
row i: x y0 y1
/
/
row i+1: x y0 y1
Hence y0 in the first row and y1 in the last row are never used.
See Also
--------
LinearSegmentedColormap.from_list
Static method; factory function for generating a smoothly-varying
LinearSegmentedColormap.
"""
# True only if all colors in map are identical; needed for contouring.
self.monochrome = False
super().__init__(name, N)
self._segmentdata = segmentdata
self._gamma = gamma
def _init(self):
self._lut = np.ones((self.N + 3, 4), float)
self._lut[:-3, 0] = _create_lookup_table(
self.N, self._segmentdata['red'], self._gamma)
self._lut[:-3, 1] = _create_lookup_table(
self.N, self._segmentdata['green'], self._gamma)
self._lut[:-3, 2] = _create_lookup_table(
self.N, self._segmentdata['blue'], self._gamma)
if 'alpha' in self._segmentdata:
self._lut[:-3, 3] = _create_lookup_table(
self.N, self._segmentdata['alpha'], 1)
self._isinit = True
self._set_extremes()
def set_gamma(self, gamma):
"""Set a new gamma value and regenerate colormap."""
self._gamma = gamma
self._init()
@staticmethod
def from_list(name, colors, N=256, gamma=1.0):
"""
Create a `LinearSegmentedColormap` from a list of colors.
Parameters
----------
name : str
The name of the colormap.
colors : array-like of colors or array-like of (value, color)
If only colors are given, they are equidistantly mapped from the
range :math:`[0, 1]`; i.e. 0 maps to ``colors[0]`` and 1 maps to
``colors[-1]``.
If (value, color) pairs are given, the mapping is from *value*
to *color*. This can be used to divide the range unevenly.
N : int
The number of RGB quantization levels.
gamma : float
"""
if not np.iterable(colors):
raise ValueError('colors must be iterable')
if (isinstance(colors[0], Sized) and len(colors[0]) == 2
and not isinstance(colors[0], str)):
# List of value, color pairs
vals, colors = zip(*colors)
else:
vals = np.linspace(0, 1, len(colors))
r, g, b, a = to_rgba_array(colors).T
cdict = {
"red": np.column_stack([vals, r, r]),
"green": np.column_stack([vals, g, g]),
"blue": np.column_stack([vals, b, b]),
"alpha": np.column_stack([vals, a, a]),
}
return LinearSegmentedColormap(name, cdict, N, gamma)
def resampled(self, lutsize):
"""Return a new colormap with *lutsize* entries."""
new_cmap = LinearSegmentedColormap(self.name, self._segmentdata,
lutsize)
new_cmap._rgba_over = self._rgba_over
new_cmap._rgba_under = self._rgba_under
new_cmap._rgba_bad = self._rgba_bad
return new_cmap
# Helper ensuring picklability of the reversed cmap.
@staticmethod
def _reverser(func, x):
return func(1 - x)
def reversed(self, name=None):
"""
Return a reversed instance of the Colormap.
Parameters
----------
name : str, optional
The name for the reversed colormap. If None, the
name is set to ``self.name + "_r"``.
Returns
-------
LinearSegmentedColormap
The reversed colormap.
"""
if name is None:
name = self.name + "_r"
# Using a partial object keeps the cmap picklable.
data_r = {key: (functools.partial(self._reverser, data)
if callable(data) else
[(1.0 - x, y1, y0) for x, y0, y1 in reversed(data)])
for key, data in self._segmentdata.items()}
new_cmap = LinearSegmentedColormap(name, data_r, self.N, self._gamma)
# Reverse the over/under values too
new_cmap._rgba_over = self._rgba_under
new_cmap._rgba_under = self._rgba_over
new_cmap._rgba_bad = self._rgba_bad
return new_cmap
class ListedColormap(Colormap):
"""
Colormap object generated from a list of colors.
This may be most useful when indexing directly into a colormap,
but it can also be used to generate special colormaps for ordinary
mapping.
Parameters
----------
colors : list, array
List of Matplotlib color specifications, or an equivalent Nx3 or Nx4
floating point array (*N* RGB or RGBA values).
name : str, optional
String to identify the colormap.
N : int, optional
Number of entries in the map. The default is *None*, in which case
there is one colormap entry for each element in the list of colors.
If ::
N < len(colors)
the list will be truncated at *N*. If ::
N > len(colors)
the list will be extended by repetition.
"""
def __init__(self, colors, name='from_list', N=None):
self.monochrome = False # Are all colors identical? (for contour.py)
if N is None:
self.colors = colors
N = len(colors)
else:
if isinstance(colors, str):
self.colors = [colors] * N
self.monochrome = True
elif np.iterable(colors):
if len(colors) == 1:
self.monochrome = True
self.colors = list(
itertools.islice(itertools.cycle(colors), N))
else:
try:
gray = float(colors)
except TypeError:
pass
else:
self.colors = [gray] * N
self.monochrome = True
super().__init__(name, N)
def _init(self):
self._lut = np.zeros((self.N + 3, 4), float)
self._lut[:-3] = to_rgba_array(self.colors)
self._isinit = True
self._set_extremes()
def resampled(self, lutsize):
"""Return a new colormap with *lutsize* entries."""
colors = self(np.linspace(0, 1, lutsize))
new_cmap = ListedColormap(colors, name=self.name)
# Keep the over/under values too
new_cmap._rgba_over = self._rgba_over
new_cmap._rgba_under = self._rgba_under
new_cmap._rgba_bad = self._rgba_bad
return new_cmap
def reversed(self, name=None):
"""
Return a reversed instance of the Colormap.
Parameters
----------
name : str, optional
The name for the reversed colormap. If None, the
name is set to ``self.name + "_r"``.
Returns
-------
ListedColormap
A reversed instance of the colormap.
"""
if name is None:
name = self.name + "_r"
colors_r = list(reversed(self.colors))
new_cmap = ListedColormap(colors_r, name=name, N=self.N)
# Reverse the over/under values too
new_cmap._rgba_over = self._rgba_under
new_cmap._rgba_under = self._rgba_over
new_cmap._rgba_bad = self._rgba_bad
return new_cmap
class Normalize:
"""
A class which, when called, linearly normalizes data into the
``[0.0, 1.0]`` interval.
"""
def __init__(self, vmin=None, vmax=None, clip=False):
"""
Parameters
----------
vmin, vmax : float or None
If *vmin* and/or *vmax* is not given, they are initialized from the
minimum and maximum value, respectively, of the first input
processed; i.e., ``__call__(A)`` calls ``autoscale_None(A)``.
clip : bool, default: False
If ``True`` values falling outside the range ``[vmin, vmax]``,
are mapped to 0 or 1, whichever is closer, and masked values are
set to 1. If ``False`` masked values remain masked.
Clipping silently defeats the purpose of setting the over, under,
and masked colors in a colormap, so it is likely to lead to
surprises; therefore the default is ``clip=False``.
Notes
-----
Returns 0 if ``vmin == vmax``.
"""
self._vmin = _sanitize_extrema(vmin)
self._vmax = _sanitize_extrema(vmax)
self._clip = clip
self._scale = None
self.callbacks = cbook.CallbackRegistry(signals=["changed"])
@property
def vmin(self):
return self._vmin
@vmin.setter
def vmin(self, value):
value = _sanitize_extrema(value)
if value != self._vmin:
self._vmin = value
self._changed()
@property
def vmax(self):
return self._vmax
@vmax.setter
def vmax(self, value):
value = _sanitize_extrema(value)
if value != self._vmax:
self._vmax = value
self._changed()
@property
def clip(self):
return self._clip
@clip.setter
def clip(self, value):
if value != self._clip:
self._clip = value
self._changed()
def _changed(self):
"""
Call this whenever the norm is changed to notify all the
callback listeners to the 'changed' signal.
"""
self.callbacks.process('changed')
@staticmethod
def process_value(value):
"""
Homogenize the input *value* for easy and efficient normalization.
*value* can be a scalar or sequence.
Returns
-------
result : masked array
Masked array with the same shape as *value*.
is_scalar : bool
Whether *value* is a scalar.
Notes
-----
Float dtypes are preserved; integer types with two bytes or smaller are
converted to np.float32, and larger types are converted to np.float64.
Preserving float32 when possible, and using in-place operations,
greatly improves speed for large arrays.
"""
is_scalar = not np.iterable(value)
if is_scalar:
value = [value]
dtype = np.min_scalar_type(value)
if np.issubdtype(dtype, np.integer) or dtype.type is np.bool_:
# bool_/int8/int16 -> float32; int32/int64 -> float64
dtype = np.promote_types(dtype, np.float32)
# ensure data passed in as an ndarray subclass are interpreted as
# an ndarray. See issue #6622.
mask = np.ma.getmask(value)
data = np.asarray(value)
result = np.ma.array(data, mask=mask, dtype=dtype, copy=True)
return result, is_scalar
def __call__(self, value, clip=None):
"""
Normalize *value* data in the ``[vmin, vmax]`` interval into the
``[0.0, 1.0]`` interval and return it.
Parameters
----------
value
Data to normalize.
clip : bool, optional
If ``None``, defaults to ``self.clip`` (which defaults to
``False``).
Notes
-----
If not already initialized, ``self.vmin`` and ``self.vmax`` are
initialized using ``self.autoscale_None(value)``.
"""
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
if self.vmin is None or self.vmax is None:
self.autoscale_None(result)
# Convert at least to float, without losing precision.
(vmin,), _ = self.process_value(self.vmin)
(vmax,), _ = self.process_value(self.vmax)
if vmin == vmax:
result.fill(0) # Or should it be all masked? Or 0.5?
elif vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
else:
if clip:
mask = np.ma.getmask(result)
result = np.ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
# ma division is very slow; we can take a shortcut
resdat = result.data
resdat -= vmin
resdat /= (vmax - vmin)
result = np.ma.array(resdat, mask=result.mask, copy=False)
if is_scalar:
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until both vmin and vmax are set")
(vmin,), _ = self.process_value(self.vmin)
(vmax,), _ = self.process_value(self.vmax)
if np.iterable(value):
val = np.ma.asarray(value)
return vmin + val * (vmax - vmin)
else:
return vmin + value * (vmax - vmin)
def autoscale(self, A):
"""Set *vmin*, *vmax* to min, max of *A*."""
with self.callbacks.blocked():
# Pause callbacks while we are updating so we only get
# a single update signal at the end
self.vmin = self.vmax = None
self.autoscale_None(A)
self._changed()
def autoscale_None(self, A):
"""If vmin or vmax are not set, use the min/max of *A* to set them."""
A = np.asanyarray(A)
if self.vmin is None and A.size:
self.vmin = A.min()
if self.vmax is None and A.size:
self.vmax = A.max()
def scaled(self):
"""Return whether vmin and vmax are set."""
return self.vmin is not None and self.vmax is not None
class TwoSlopeNorm(Normalize):
def __init__(self, vcenter, vmin=None, vmax=None):
"""
Normalize data with a set center.
Useful when mapping data with an unequal rates of change around a
conceptual center, e.g., data that range from -2 to 4, with 0 as
the midpoint.
Parameters
----------
vcenter : float
The data value that defines ``0.5`` in the normalization.
vmin : float, optional
The data value that defines ``0.0`` in the normalization.
Defaults to the min value of the dataset.
vmax : float, optional
The data value that defines ``1.0`` in the normalization.
Defaults to the max value of the dataset.
Examples
--------
This maps data value -4000 to 0., 0 to 0.5, and +10000 to 1.0; data
between is linearly interpolated::
>>> import matplotlib.colors as mcolors
>>> offset = mcolors.TwoSlopeNorm(vmin=-4000.,
vcenter=0., vmax=10000)
>>> data = [-4000., -2000., 0., 2500., 5000., 7500., 10000.]
>>> offset(data)
array([0., 0.25, 0.5, 0.625, 0.75, 0.875, 1.0])
"""
super().__init__(vmin=vmin, vmax=vmax)
self._vcenter = vcenter
if vcenter is not None and vmax is not None and vcenter >= vmax:
raise ValueError('vmin, vcenter, and vmax must be in '
'ascending order')
if vcenter is not None and vmin is not None and vcenter <= vmin:
raise ValueError('vmin, vcenter, and vmax must be in '
'ascending order')
@property
def vcenter(self):
return self._vcenter
@vcenter.setter
def vcenter(self, value):
if value != self._vcenter:
self._vcenter = value
self._changed()
def autoscale_None(self, A):
"""
Get vmin and vmax, and then clip at vcenter
"""
super().autoscale_None(A)
if self.vmin > self.vcenter:
self.vmin = self.vcenter
if self.vmax < self.vcenter:
self.vmax = self.vcenter
def __call__(self, value, clip=None):
"""
Map value to the interval [0, 1]. The *clip* argument is unused.
"""
result, is_scalar = self.process_value(value)
self.autoscale_None(result) # sets self.vmin, self.vmax if None
if not self.vmin <= self.vcenter <= self.vmax:
raise ValueError("vmin, vcenter, vmax must increase monotonically")
# note that we must extrapolate for tick locators:
result = np.ma.masked_array(
np.interp(result, [self.vmin, self.vcenter, self.vmax],
[0, 0.5, 1], left=-np.inf, right=np.inf),
mask=np.ma.getmask(result))
if is_scalar:
result = np.atleast_1d(result)[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until both vmin and vmax are set")
(vmin,), _ = self.process_value(self.vmin)
(vmax,), _ = self.process_value(self.vmax)
(vcenter,), _ = self.process_value(self.vcenter)
result = np.interp(value, [0, 0.5, 1], [vmin, vcenter, vmax],
left=-np.inf, right=np.inf)
return result
class CenteredNorm(Normalize):
def __init__(self, vcenter=0, halfrange=None, clip=False):
"""
Normalize symmetrical data around a center (0 by default).
Unlike `TwoSlopeNorm`, `CenteredNorm` applies an equal rate of change
around the center.
Useful when mapping symmetrical data around a conceptual center
e.g., data that range from -2 to 4, with 0 as the midpoint, and
with equal rates of change around that midpoint.
Parameters
----------
vcenter : float, default: 0
The data value that defines ``0.5`` in the normalization.
halfrange : float, optional
The range of data values that defines a range of ``0.5`` in the
normalization, so that *vcenter* - *halfrange* is ``0.0`` and
*vcenter* + *halfrange* is ``1.0`` in the normalization.
Defaults to the largest absolute difference to *vcenter* for
the values in the dataset.
clip : bool, default: False
If ``True`` values falling outside the range ``[vmin, vmax]``,
are mapped to 0 or 1, whichever is closer, and masked values are
set to 1. If ``False`` masked values remain masked.
Examples
--------
This maps data values -2 to 0.25, 0 to 0.5, and 4 to 1.0
(assuming equal rates of change above and below 0.0):
>>> import matplotlib.colors as mcolors
>>> norm = mcolors.CenteredNorm(halfrange=4.0)
>>> data = [-2., 0., 4.]
>>> norm(data)
array([0.25, 0.5 , 1. ])
"""
super().__init__(vmin=None, vmax=None, clip=clip)
self._vcenter = vcenter
# calling the halfrange setter to set vmin and vmax
self.halfrange = halfrange
def autoscale(self, A):
"""
Set *halfrange* to ``max(abs(A-vcenter))``, then set *vmin* and *vmax*.
"""
A = np.asanyarray(A)
self.halfrange = max(self._vcenter-A.min(),
A.max()-self._vcenter)
def autoscale_None(self, A):
"""Set *vmin* and *vmax*."""
A = np.asanyarray(A)
if self.halfrange is None and A.size:
self.autoscale(A)
@property
def vmin(self):
return self._vmin
@vmin.setter
def vmin(self, value):
value = _sanitize_extrema(value)
if value != self._vmin:
self._vmin = value
self._vmax = 2*self.vcenter - value
self._changed()
@property
def vmax(self):
return self._vmax
@vmax.setter
def vmax(self, value):
value = _sanitize_extrema(value)
if value != self._vmax:
self._vmax = value
self._vmin = 2*self.vcenter - value
self._changed()
@property
def vcenter(self):
return self._vcenter
@vcenter.setter
def vcenter(self, vcenter):
if vcenter != self._vcenter:
self._vcenter = vcenter
# Trigger an update of the vmin/vmax values through the setter
self.halfrange = self.halfrange
self._changed()
@property
def halfrange(self):
if self.vmin is None or self.vmax is None:
return None
return (self.vmax - self.vmin) / 2
@halfrange.setter
def halfrange(self, halfrange):
if halfrange is None:
self.vmin = None
self.vmax = None
else:
self.vmin = self.vcenter - abs(halfrange)
self.vmax = self.vcenter + abs(halfrange)
def make_norm_from_scale(scale_cls, base_norm_cls=None, *, init=None):
"""
Decorator for building a `.Normalize` subclass from a `~.scale.ScaleBase`
subclass.
After ::
@make_norm_from_scale(scale_cls)
class norm_cls(Normalize):
...
*norm_cls* is filled with methods so that normalization computations are
forwarded to *scale_cls* (i.e., *scale_cls* is the scale that would be used
for the colorbar of a mappable normalized with *norm_cls*).
If *init* is not passed, then the constructor signature of *norm_cls*
will be ``norm_cls(vmin=None, vmax=None, clip=False)``; these three
parameters will be forwarded to the base class (``Normalize.__init__``),
and a *scale_cls* object will be initialized with no arguments (other than
a dummy axis).
If the *scale_cls* constructor takes additional parameters, then *init*
should be passed to `make_norm_from_scale`. It is a callable which is
*only* used for its signature. First, this signature will become the
signature of *norm_cls*. Second, the *norm_cls* constructor will bind the
parameters passed to it using this signature, extract the bound *vmin*,
*vmax*, and *clip* values, pass those to ``Normalize.__init__``, and
forward the remaining bound values (including any defaults defined by the
signature) to the *scale_cls* constructor.
"""
if base_norm_cls is None:
return functools.partial(make_norm_from_scale, scale_cls, init=init)
if isinstance(scale_cls, functools.partial):
scale_args = scale_cls.args
scale_kwargs_items = tuple(scale_cls.keywords.items())
scale_cls = scale_cls.func
else:
scale_args = scale_kwargs_items = ()
if init is None:
def init(vmin=None, vmax=None, clip=False): pass
return _make_norm_from_scale(
scale_cls, scale_args, scale_kwargs_items,
base_norm_cls, inspect.signature(init))
@functools.lru_cache(None)
def _make_norm_from_scale(
scale_cls, scale_args, scale_kwargs_items,
base_norm_cls, bound_init_signature,
):
"""
Helper for `make_norm_from_scale`.
This function is split out to enable caching (in particular so that
different unpickles reuse the same class). In order to do so,
- ``functools.partial`` *scale_cls* is expanded into ``func, args, kwargs``
to allow memoizing returned norms (partial instances always compare
unequal, but we can check identity based on ``func, args, kwargs``;
- *init* is replaced by *init_signature*, as signatures are picklable,
unlike to arbitrary lambdas.
"""
class Norm(base_norm_cls):
def __reduce__(self):
cls = type(self)
# If the class is toplevel-accessible, it is possible to directly
# pickle it "by name". This is required to support norm classes
# defined at a module's toplevel, as the inner base_norm_cls is
# otherwise unpicklable (as it gets shadowed by the generated norm
# class). If either import or attribute access fails, fall back to
# the general path.
try:
if cls is getattr(importlib.import_module(cls.__module__),
cls.__qualname__):
return (_create_empty_object_of_class, (cls,), vars(self))
except (ImportError, AttributeError):
pass
return (_picklable_norm_constructor,
(scale_cls, scale_args, scale_kwargs_items,
base_norm_cls, bound_init_signature),
vars(self))
def __init__(self, *args, **kwargs):
ba = bound_init_signature.bind(*args, **kwargs)
ba.apply_defaults()
super().__init__(
**{k: ba.arguments.pop(k) for k in ["vmin", "vmax", "clip"]})
self._scale = functools.partial(
scale_cls, *scale_args, **dict(scale_kwargs_items))(
axis=None, **ba.arguments)
self._trf = self._scale.get_transform()
__init__.__signature__ = bound_init_signature.replace(parameters=[
inspect.Parameter("self", inspect.Parameter.POSITIONAL_OR_KEYWORD),
*bound_init_signature.parameters.values()])
def __call__(self, value, clip=None):
value, is_scalar = self.process_value(value)
if self.vmin is None or self.vmax is None:
self.autoscale_None(value)
if self.vmin > self.vmax:
raise ValueError("vmin must be less or equal to vmax")
if self.vmin == self.vmax:
return np.full_like(value, 0)
if clip is None:
clip = self.clip
if clip:
value = np.clip(value, self.vmin, self.vmax)
t_value = self._trf.transform(value).reshape(np.shape(value))
t_vmin, t_vmax = self._trf.transform([self.vmin, self.vmax])
if not np.isfinite([t_vmin, t_vmax]).all():
raise ValueError("Invalid vmin or vmax")
t_value -= t_vmin
t_value /= (t_vmax - t_vmin)
t_value = np.ma.masked_invalid(t_value, copy=False)
return t_value[0] if is_scalar else t_value
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
if self.vmin > self.vmax:
raise ValueError("vmin must be less or equal to vmax")
t_vmin, t_vmax = self._trf.transform([self.vmin, self.vmax])
if not np.isfinite([t_vmin, t_vmax]).all():
raise ValueError("Invalid vmin or vmax")
value, is_scalar = self.process_value(value)
rescaled = value * (t_vmax - t_vmin)
rescaled += t_vmin
value = (self._trf
.inverted()
.transform(rescaled)
.reshape(np.shape(value)))
return value[0] if is_scalar else value
def autoscale_None(self, A):
# i.e. A[np.isfinite(...)], but also for non-array A's
in_trf_domain = np.extract(np.isfinite(self._trf.transform(A)), A)
if in_trf_domain.size == 0:
in_trf_domain = np.ma.masked
return super().autoscale_None(in_trf_domain)
if base_norm_cls is Normalize:
Norm.__name__ = f"{scale_cls.__name__}Norm"
Norm.__qualname__ = f"{scale_cls.__qualname__}Norm"
else:
Norm.__name__ = base_norm_cls.__name__
Norm.__qualname__ = base_norm_cls.__qualname__
Norm.__module__ = base_norm_cls.__module__
Norm.__doc__ = base_norm_cls.__doc__
return Norm
def _create_empty_object_of_class(cls):
return cls.__new__(cls)
def _picklable_norm_constructor(*args):
return _create_empty_object_of_class(_make_norm_from_scale(*args))
@make_norm_from_scale(
scale.FuncScale,
init=lambda functions, vmin=None, vmax=None, clip=False: None)
class FuncNorm(Normalize):
"""
Arbitrary normalization using functions for the forward and inverse.
Parameters
----------
functions : (callable, callable)
two-tuple of the forward and inverse functions for the normalization.
The forward function must be monotonic.
Both functions must have the signature ::
def forward(values: array-like) -> array-like
vmin, vmax : float or None
If *vmin* and/or *vmax* is not given, they are initialized from the
minimum and maximum value, respectively, of the first input
processed; i.e., ``__call__(A)`` calls ``autoscale_None(A)``.
clip : bool, default: False
If ``True`` values falling outside the range ``[vmin, vmax]``,
are mapped to 0 or 1, whichever is closer, and masked values are
set to 1. If ``False`` masked values remain masked.
Clipping silently defeats the purpose of setting the over, under,
and masked colors in a colormap, so it is likely to lead to
surprises; therefore the default is ``clip=False``.
"""
LogNorm = make_norm_from_scale(
functools.partial(scale.LogScale, nonpositive="mask"))(Normalize)
LogNorm.__name__ = LogNorm.__qualname__ = "LogNorm"
LogNorm.__doc__ = "Normalize a given value to the 0-1 range on a log scale."
@make_norm_from_scale(
scale.SymmetricalLogScale,
init=lambda linthresh, linscale=1., vmin=None, vmax=None, clip=False, *,
base=10: None)
class SymLogNorm(Normalize):
"""
The symmetrical logarithmic scale is logarithmic in both the
positive and negative directions from the origin.
Since the values close to zero tend toward infinity, there is a
need to have a range around zero that is linear. The parameter
*linthresh* allows the user to specify the size of this range
(-*linthresh*, *linthresh*).
Parameters
----------
linthresh : float
The range within which the plot is linear (to avoid having the plot
go to infinity around zero).
linscale : float, default: 1
This allows the linear range (-*linthresh* to *linthresh*) to be
stretched relative to the logarithmic range. Its value is the
number of decades to use for each half of the linear range. For
example, when *linscale* == 1.0 (the default), the space used for
the positive and negative halves of the linear range will be equal
to one decade in the logarithmic range.
base : float, default: 10
"""
@property
def linthresh(self):
return self._scale.linthresh
@linthresh.setter
def linthresh(self, value):
self._scale.linthresh = value
@make_norm_from_scale(
scale.AsinhScale,
init=lambda linear_width=1, vmin=None, vmax=None, clip=False: None)
class AsinhNorm(Normalize):
"""
The inverse hyperbolic sine scale is approximately linear near
the origin, but becomes logarithmic for larger positive
or negative values. Unlike the `SymLogNorm`, the transition between
these linear and logarithmic regions is smooth, which may reduce
the risk of visual artifacts.
.. note::
This API is provisional and may be revised in the future
based on early user feedback.
Parameters
----------
linear_width : float, default: 1
The effective width of the linear region, beyond which
the transformation becomes asymptotically logarithmic
"""
@property
def linear_width(self):
return self._scale.linear_width
@linear_width.setter
def linear_width(self, value):
self._scale.linear_width = value
class PowerNorm(Normalize):
"""
Linearly map a given value to the 0-1 range and then apply
a power-law normalization over that range.
"""
def __init__(self, gamma, vmin=None, vmax=None, clip=False):
super().__init__(vmin, vmax, clip)
self.gamma = gamma
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
self.autoscale_None(result)
gamma = self.gamma
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin == vmax:
result.fill(0)
else:
if clip:
mask = np.ma.getmask(result)
result = np.ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
resdat = result.data
resdat -= vmin
resdat[resdat < 0] = 0
np.power(resdat, gamma, resdat)
resdat /= (vmax - vmin) ** gamma
result = np.ma.array(resdat, mask=result.mask, copy=False)
if is_scalar:
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
gamma = self.gamma
vmin, vmax = self.vmin, self.vmax
if np.iterable(value):
val = np.ma.asarray(value)
return np.ma.power(val, 1. / gamma) * (vmax - vmin) + vmin
else:
return pow(value, 1. / gamma) * (vmax - vmin) + vmin
class BoundaryNorm(Normalize):
"""
Generate a colormap index based on discrete intervals.
Unlike `Normalize` or `LogNorm`, `BoundaryNorm` maps values to integers
instead of to the interval 0-1.
"""
# Mapping to the 0-1 interval could have been done via piece-wise linear
# interpolation, but using integers seems simpler, and reduces the number
# of conversions back and forth between int and float.
def __init__(self, boundaries, ncolors, clip=False, *, extend='neither'):
"""
Parameters
----------
boundaries : array-like
Monotonically increasing sequence of at least 2 bin edges: data
falling in the n-th bin will be mapped to the n-th color.
ncolors : int
Number of colors in the colormap to be used.
clip : bool, optional
If clip is ``True``, out of range values are mapped to 0 if they
are below ``boundaries[0]`` or mapped to ``ncolors - 1`` if they
are above ``boundaries[-1]``.
If clip is ``False``, out of range values are mapped to -1 if
they are below ``boundaries[0]`` or mapped to *ncolors* if they are
above ``boundaries[-1]``. These are then converted to valid indices
by `Colormap.__call__`.
extend : {'neither', 'both', 'min', 'max'}, default: 'neither'
Extend the number of bins to include one or both of the
regions beyond the boundaries. For example, if ``extend``
is 'min', then the color to which the region between the first
pair of boundaries is mapped will be distinct from the first
color in the colormap, and by default a
`~matplotlib.colorbar.Colorbar` will be drawn with
the triangle extension on the left or lower end.
Notes
-----
If there are fewer bins (including extensions) than colors, then the
color index is chosen by linearly interpolating the ``[0, nbins - 1]``
range onto the ``[0, ncolors - 1]`` range, effectively skipping some
colors in the middle of the colormap.
"""
if clip and extend != 'neither':
raise ValueError("'clip=True' is not compatible with 'extend'")
super().__init__(vmin=boundaries[0], vmax=boundaries[-1], clip=clip)
self.boundaries = np.asarray(boundaries)
self.N = len(self.boundaries)
if self.N < 2:
raise ValueError("You must provide at least 2 boundaries "
f"(1 region) but you passed in {boundaries!r}")
self.Ncmap = ncolors
self.extend = extend
self._scale = None # don't use the default scale.
self._n_regions = self.N - 1 # number of colors needed
self._offset = 0
if extend in ('min', 'both'):
self._n_regions += 1
self._offset = 1
if extend in ('max', 'both'):
self._n_regions += 1
if self._n_regions > self.Ncmap:
raise ValueError(f"There are {self._n_regions} color bins "
"including extensions, but ncolors = "
f"{ncolors}; ncolors must equal or exceed the "
"number of bins")
def __call__(self, value, clip=None):
"""
This method behaves similarly to `.Normalize.__call__`, except that it
returns integers or arrays of int16.
"""
if clip is None:
clip = self.clip
xx, is_scalar = self.process_value(value)
mask = np.ma.getmaskarray(xx)
# Fill masked values a value above the upper boundary
xx = np.atleast_1d(xx.filled(self.vmax + 1))
if clip:
np.clip(xx, self.vmin, self.vmax, out=xx)
max_col = self.Ncmap - 1
else:
max_col = self.Ncmap
# this gives us the bins in the lookup table in the range
# [0, _n_regions - 1] (the offset is set in the init)
iret = np.digitize(xx, self.boundaries) - 1 + self._offset
# if we have more colors than regions, stretch the region
# index computed above to full range of the color bins. This
# will make use of the full range (but skip some of the colors
# in the middle) such that the first region is mapped to the
# first color and the last region is mapped to the last color.
if self.Ncmap > self._n_regions:
if self._n_regions == 1:
# special case the 1 region case, pick the middle color
iret[iret == 0] = (self.Ncmap - 1) // 2
else:
# otherwise linearly remap the values from the region index
# to the color index spaces
iret = (self.Ncmap - 1) / (self._n_regions - 1) * iret
# cast to 16bit integers in all cases
iret = iret.astype(np.int16)
iret[xx < self.vmin] = -1
iret[xx >= self.vmax] = max_col
ret = np.ma.array(iret, mask=mask)
if is_scalar:
ret = int(ret[0]) # assume python scalar
return ret
def inverse(self, value):
"""
Raises
------
ValueError
BoundaryNorm is not invertible, so calling this method will always
raise an error
"""
raise ValueError("BoundaryNorm is not invertible")
class NoNorm(Normalize):
"""
Dummy replacement for `Normalize`, for the case where we want to use
indices directly in a `~matplotlib.cm.ScalarMappable`.
"""
def __call__(self, value, clip=None):
return value
def inverse(self, value):
return value
def rgb_to_hsv(arr):
"""
Convert float RGB values (in the range [0, 1]), in a numpy array to HSV
values.
Parameters
----------
arr : (..., 3) array-like
All values must be in the range [0, 1]
Returns
-------
(..., 3) `~numpy.ndarray`
Colors converted to HSV values in range [0, 1]
"""
arr = np.asarray(arr)
# check length of the last dimension, should be _some_ sort of rgb
if arr.shape[-1] != 3:
raise ValueError("Last dimension of input array must be 3; "
"shape {} was found.".format(arr.shape))
in_shape = arr.shape
arr = np.array(
arr, copy=False,
dtype=np.promote_types(arr.dtype, np.float32), # Don't work on ints.
ndmin=2, # In case input was 1D.
)
out = np.zeros_like(arr)
arr_max = arr.max(-1)
ipos = arr_max > 0
delta = arr.ptp(-1)
s = np.zeros_like(delta)
s[ipos] = delta[ipos] / arr_max[ipos]
ipos = delta > 0
# red is max
idx = (arr[..., 0] == arr_max) & ipos
out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx]
# green is max
idx = (arr[..., 1] == arr_max) & ipos
out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0]) / delta[idx]
# blue is max
idx = (arr[..., 2] == arr_max) & ipos
out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1]) / delta[idx]
out[..., 0] = (out[..., 0] / 6.0) % 1.0
out[..., 1] = s
out[..., 2] = arr_max
return out.reshape(in_shape)
def hsv_to_rgb(hsv):
"""
Convert HSV values to RGB.
Parameters
----------
hsv : (..., 3) array-like
All values assumed to be in range [0, 1]
Returns
-------
(..., 3) `~numpy.ndarray`
Colors converted to RGB values in range [0, 1]
"""
hsv = np.asarray(hsv)
# check length of the last dimension, should be _some_ sort of rgb
if hsv.shape[-1] != 3:
raise ValueError("Last dimension of input array must be 3; "
"shape {shp} was found.".format(shp=hsv.shape))
in_shape = hsv.shape
hsv = np.array(
hsv, copy=False,
dtype=np.promote_types(hsv.dtype, np.float32), # Don't work on ints.
ndmin=2, # In case input was 1D.
)
h = hsv[..., 0]
s = hsv[..., 1]
v = hsv[..., 2]
r = np.empty_like(h)
g = np.empty_like(h)
b = np.empty_like(h)
i = (h * 6.0).astype(int)
f = (h * 6.0) - i
p = v * (1.0 - s)
q = v * (1.0 - s * f)
t = v * (1.0 - s * (1.0 - f))
idx = i % 6 == 0
r[idx] = v[idx]
g[idx] = t[idx]
b[idx] = p[idx]
idx = i == 1
r[idx] = q[idx]
g[idx] = v[idx]
b[idx] = p[idx]
idx = i == 2
r[idx] = p[idx]
g[idx] = v[idx]
b[idx] = t[idx]
idx = i == 3
r[idx] = p[idx]
g[idx] = q[idx]
b[idx] = v[idx]
idx = i == 4
r[idx] = t[idx]
g[idx] = p[idx]
b[idx] = v[idx]
idx = i == 5
r[idx] = v[idx]
g[idx] = p[idx]
b[idx] = q[idx]
idx = s == 0
r[idx] = v[idx]
g[idx] = v[idx]
b[idx] = v[idx]
rgb = np.stack([r, g, b], axis=-1)
return rgb.reshape(in_shape)
def _vector_magnitude(arr):
# things that don't work here:
# * np.linalg.norm: drops mask from ma.array
# * np.sum: drops mask from ma.array unless entire vector is masked
sum_sq = 0
for i in range(arr.shape[-1]):
sum_sq += arr[..., i, np.newaxis] ** 2
return np.sqrt(sum_sq)
class LightSource:
"""
Create a light source coming from the specified azimuth and elevation.
Angles are in degrees, with the azimuth measured
clockwise from north and elevation up from the zero plane of the surface.
`shade` is used to produce "shaded" RGB values for a data array.
`shade_rgb` can be used to combine an RGB image with an elevation map.
`hillshade` produces an illumination map of a surface.
"""
def __init__(self, azdeg=315, altdeg=45, hsv_min_val=0, hsv_max_val=1,
hsv_min_sat=1, hsv_max_sat=0):
"""
Specify the azimuth (measured clockwise from south) and altitude
(measured up from the plane of the surface) of the light source
in degrees.
Parameters
----------
azdeg : float, default: 315 degrees (from the northwest)
The azimuth (0-360, degrees clockwise from North) of the light
source.
altdeg : float, default: 45 degrees
The altitude (0-90, degrees up from horizontal) of the light
source.
Notes
-----
For backwards compatibility, the parameters *hsv_min_val*,
*hsv_max_val*, *hsv_min_sat*, and *hsv_max_sat* may be supplied at
initialization as well. However, these parameters will only be used if
"blend_mode='hsv'" is passed into `shade` or `shade_rgb`.
See the documentation for `blend_hsv` for more details.
"""
self.azdeg = azdeg
self.altdeg = altdeg
self.hsv_min_val = hsv_min_val
self.hsv_max_val = hsv_max_val
self.hsv_min_sat = hsv_min_sat
self.hsv_max_sat = hsv_max_sat
@property
def direction(self):
"""The unit vector direction towards the light source."""
# Azimuth is in degrees clockwise from North. Convert to radians
# counterclockwise from East (mathematical notation).
az = np.radians(90 - self.azdeg)
alt = np.radians(self.altdeg)
return np.array([
np.cos(az) * np.cos(alt),
np.sin(az) * np.cos(alt),
np.sin(alt)
])
def hillshade(self, elevation, vert_exag=1, dx=1, dy=1, fraction=1.):
"""
Calculate the illumination intensity for a surface using the defined
azimuth and elevation for the light source.
This computes the normal vectors for the surface, and then passes them
on to `shade_normals`
Parameters
----------
elevation : 2D array-like
The height values used to generate an illumination map
vert_exag : number, optional
The amount to exaggerate the elevation values by when calculating
illumination. This can be used either to correct for differences in
units between the x-y coordinate system and the elevation
coordinate system (e.g. decimal degrees vs. meters) or to
exaggerate or de-emphasize topographic effects.
dx : number, optional
The x-spacing (columns) of the input *elevation* grid.
dy : number, optional
The y-spacing (rows) of the input *elevation* grid.
fraction : number, optional
Increases or decreases the contrast of the hillshade. Values
greater than one will cause intermediate values to move closer to
full illumination or shadow (and clipping any values that move
beyond 0 or 1). Note that this is not visually or mathematically
the same as vertical exaggeration.
Returns
-------
`~numpy.ndarray`
A 2D array of illumination values between 0-1, where 0 is
completely in shadow and 1 is completely illuminated.
"""
# Because most image and raster GIS data has the first row in the array
# as the "top" of the image, dy is implicitly negative. This is
# consistent to what `imshow` assumes, as well.
dy = -dy
# compute the normal vectors from the partial derivatives
e_dy, e_dx = np.gradient(vert_exag * elevation, dy, dx)
# .view is to keep subclasses
normal = np.empty(elevation.shape + (3,)).view(type(elevation))
normal[..., 0] = -e_dx
normal[..., 1] = -e_dy
normal[..., 2] = 1
normal /= _vector_magnitude(normal)
return self.shade_normals(normal, fraction)
def shade_normals(self, normals, fraction=1.):
"""
Calculate the illumination intensity for the normal vectors of a
surface using the defined azimuth and elevation for the light source.
Imagine an artificial sun placed at infinity in some azimuth and
elevation position illuminating our surface. The parts of the surface
that slope toward the sun should brighten while those sides facing away
should become darker.
Parameters
----------
fraction : number, optional
Increases or decreases the contrast of the hillshade. Values
greater than one will cause intermediate values to move closer to
full illumination or shadow (and clipping any values that move
beyond 0 or 1). Note that this is not visually or mathematically
the same as vertical exaggeration.
Returns
-------
`~numpy.ndarray`
A 2D array of illumination values between 0-1, where 0 is
completely in shadow and 1 is completely illuminated.
"""
intensity = normals.dot(self.direction)
# Apply contrast stretch
imin, imax = intensity.min(), intensity.max()
intensity *= fraction
# Rescale to 0-1, keeping range before contrast stretch
# If constant slope, keep relative scaling (i.e. flat should be 0.5,
# fully occluded 0, etc.)
if (imax - imin) > 1e-6:
# Strictly speaking, this is incorrect. Negative values should be
# clipped to 0 because they're fully occluded. However, rescaling
# in this manner is consistent with the previous implementation and
# visually appears better than a "hard" clip.
intensity -= imin
intensity /= (imax - imin)
intensity = np.clip(intensity, 0, 1)
return intensity
def shade(self, data, cmap, norm=None, blend_mode='overlay', vmin=None,
vmax=None, vert_exag=1, dx=1, dy=1, fraction=1, **kwargs):
"""
Combine colormapped data values with an illumination intensity map
(a.k.a. "hillshade") of the values.
Parameters
----------
data : 2D array-like
The height values used to generate a shaded map.
cmap : `~matplotlib.colors.Colormap`
The colormap used to color the *data* array. Note that this must be
a `~matplotlib.colors.Colormap` instance. For example, rather than
passing in ``cmap='gist_earth'``, use
``cmap=plt.get_cmap('gist_earth')`` instead.
norm : `~matplotlib.colors.Normalize` instance, optional
The normalization used to scale values before colormapping. If
None, the input will be linearly scaled between its min and max.
blend_mode : {'hsv', 'overlay', 'soft'} or callable, optional
The type of blending used to combine the colormapped data
values with the illumination intensity. Default is
"overlay". Note that for most topographic surfaces,
"overlay" or "soft" appear more visually realistic. If a
user-defined function is supplied, it is expected to
combine an MxNx3 RGB array of floats (ranging 0 to 1) with
an MxNx1 hillshade array (also 0 to 1). (Call signature
``func(rgb, illum, **kwargs)``) Additional kwargs supplied
to this function will be passed on to the *blend_mode*
function.
vmin : float or None, optional
The minimum value used in colormapping *data*. If *None* the
minimum value in *data* is used. If *norm* is specified, then this
argument will be ignored.
vmax : float or None, optional
The maximum value used in colormapping *data*. If *None* the
maximum value in *data* is used. If *norm* is specified, then this
argument will be ignored.
vert_exag : number, optional
The amount to exaggerate the elevation values by when calculating
illumination. This can be used either to correct for differences in
units between the x-y coordinate system and the elevation
coordinate system (e.g. decimal degrees vs. meters) or to
exaggerate or de-emphasize topography.
dx : number, optional
The x-spacing (columns) of the input *elevation* grid.
dy : number, optional
The y-spacing (rows) of the input *elevation* grid.
fraction : number, optional
Increases or decreases the contrast of the hillshade. Values
greater than one will cause intermediate values to move closer to
full illumination or shadow (and clipping any values that move
beyond 0 or 1). Note that this is not visually or mathematically
the same as vertical exaggeration.
**kwargs
Additional kwargs are passed on to the *blend_mode* function.
Returns
-------
`~numpy.ndarray`
An MxNx4 array of floats ranging between 0-1.
"""
if vmin is None:
vmin = data.min()
if vmax is None:
vmax = data.max()
if norm is None:
norm = Normalize(vmin=vmin, vmax=vmax)
rgb0 = cmap(norm(data))
rgb1 = self.shade_rgb(rgb0, elevation=data, blend_mode=blend_mode,
vert_exag=vert_exag, dx=dx, dy=dy,
fraction=fraction, **kwargs)
# Don't overwrite the alpha channel, if present.
rgb0[..., :3] = rgb1[..., :3]
return rgb0
def shade_rgb(self, rgb, elevation, fraction=1., blend_mode='hsv',
vert_exag=1, dx=1, dy=1, **kwargs):
"""
Use this light source to adjust the colors of the *rgb* input array to
give the impression of a shaded relief map with the given *elevation*.
Parameters
----------
rgb : array-like
An (M, N, 3) RGB array, assumed to be in the range of 0 to 1.
elevation : array-like
An (M, N) array of the height values used to generate a shaded map.
fraction : number
Increases or decreases the contrast of the hillshade. Values
greater than one will cause intermediate values to move closer to
full illumination or shadow (and clipping any values that move
beyond 0 or 1). Note that this is not visually or mathematically
the same as vertical exaggeration.
blend_mode : {'hsv', 'overlay', 'soft'} or callable, optional
The type of blending used to combine the colormapped data values
with the illumination intensity. For backwards compatibility, this
defaults to "hsv". Note that for most topographic surfaces,
"overlay" or "soft" appear more visually realistic. If a
user-defined function is supplied, it is expected to combine an
MxNx3 RGB array of floats (ranging 0 to 1) with an MxNx1 hillshade
array (also 0 to 1). (Call signature
``func(rgb, illum, **kwargs)``)
Additional kwargs supplied to this function will be passed on to
the *blend_mode* function.
vert_exag : number, optional
The amount to exaggerate the elevation values by when calculating
illumination. This can be used either to correct for differences in
units between the x-y coordinate system and the elevation
coordinate system (e.g. decimal degrees vs. meters) or to
exaggerate or de-emphasize topography.
dx : number, optional
The x-spacing (columns) of the input *elevation* grid.
dy : number, optional
The y-spacing (rows) of the input *elevation* grid.
**kwargs
Additional kwargs are passed on to the *blend_mode* function.
Returns
-------
`~numpy.ndarray`
An (m, n, 3) array of floats ranging between 0-1.
"""
# Calculate the "hillshade" intensity.
intensity = self.hillshade(elevation, vert_exag, dx, dy, fraction)
intensity = intensity[..., np.newaxis]
# Blend the hillshade and rgb data using the specified mode
lookup = {
'hsv': self.blend_hsv,
'soft': self.blend_soft_light,
'overlay': self.blend_overlay,
}
if blend_mode in lookup:
blend = lookup[blend_mode](rgb, intensity, **kwargs)
else:
try:
blend = blend_mode(rgb, intensity, **kwargs)
except TypeError as err:
raise ValueError('"blend_mode" must be callable or one of {}'
.format(lookup.keys)) from err
# Only apply result where hillshade intensity isn't masked
if np.ma.is_masked(intensity):
mask = intensity.mask[..., 0]
for i in range(3):
blend[..., i][mask] = rgb[..., i][mask]
return blend
def blend_hsv(self, rgb, intensity, hsv_max_sat=None, hsv_max_val=None,
hsv_min_val=None, hsv_min_sat=None):
"""
Take the input data array, convert to HSV values in the given colormap,
then adjust those color values to give the impression of a shaded
relief map with a specified light source. RGBA values are returned,
which can then be used to plot the shaded image with imshow.
The color of the resulting image will be darkened by moving the (s, v)
values (in HSV colorspace) toward (hsv_min_sat, hsv_min_val) in the
shaded regions, or lightened by sliding (s, v) toward (hsv_max_sat,
hsv_max_val) in regions that are illuminated. The default extremes are
chose so that completely shaded points are nearly black (s = 1, v = 0)
and completely illuminated points are nearly white (s = 0, v = 1).
Parameters
----------
rgb : `~numpy.ndarray`
An MxNx3 RGB array of floats ranging from 0 to 1 (color image).
intensity : `~numpy.ndarray`
An MxNx1 array of floats ranging from 0 to 1 (grayscale image).
hsv_max_sat : number, default: 1
The maximum saturation value that the *intensity* map can shift the
output image to.
hsv_min_sat : number, optional
The minimum saturation value that the *intensity* map can shift the
output image to. Defaults to 0.
hsv_max_val : number, optional
The maximum value ("v" in "hsv") that the *intensity* map can shift
the output image to. Defaults to 1.
hsv_min_val : number, optional
The minimum value ("v" in "hsv") that the *intensity* map can shift
the output image to. Defaults to 0.
Returns
-------
`~numpy.ndarray`
An MxNx3 RGB array representing the combined images.
"""
# Backward compatibility...
if hsv_max_sat is None:
hsv_max_sat = self.hsv_max_sat
if hsv_max_val is None:
hsv_max_val = self.hsv_max_val
if hsv_min_sat is None:
hsv_min_sat = self.hsv_min_sat
if hsv_min_val is None:
hsv_min_val = self.hsv_min_val
# Expects a 2D intensity array scaled between -1 to 1...
intensity = intensity[..., 0]
intensity = 2 * intensity - 1
# Convert to rgb, then rgb to hsv
hsv = rgb_to_hsv(rgb[:, :, 0:3])
hue, sat, val = np.moveaxis(hsv, -1, 0)
# Modify hsv values (in place) to simulate illumination.
# putmask(A, mask, B) <=> A[mask] = B[mask]
np.putmask(sat, (np.abs(sat) > 1.e-10) & (intensity > 0),
(1 - intensity) * sat + intensity * hsv_max_sat)
np.putmask(sat, (np.abs(sat) > 1.e-10) & (intensity < 0),
(1 + intensity) * sat - intensity * hsv_min_sat)
np.putmask(val, intensity > 0,
(1 - intensity) * val + intensity * hsv_max_val)
np.putmask(val, intensity < 0,
(1 + intensity) * val - intensity * hsv_min_val)
np.clip(hsv[:, :, 1:], 0, 1, out=hsv[:, :, 1:])
# Convert modified hsv back to rgb.
return hsv_to_rgb(hsv)
def blend_soft_light(self, rgb, intensity):
"""
Combine an RGB image with an intensity map using "soft light" blending,
using the "pegtop" formula.
Parameters
----------
rgb : `~numpy.ndarray`
An MxNx3 RGB array of floats ranging from 0 to 1 (color image).
intensity : `~numpy.ndarray`
An MxNx1 array of floats ranging from 0 to 1 (grayscale image).
Returns
-------
`~numpy.ndarray`
An MxNx3 RGB array representing the combined images.
"""
return 2 * intensity * rgb + (1 - 2 * intensity) * rgb**2
def blend_overlay(self, rgb, intensity):
"""
Combine an RGB image with an intensity map using "overlay" blending.
Parameters
----------
rgb : `~numpy.ndarray`
An MxNx3 RGB array of floats ranging from 0 to 1 (color image).
intensity : `~numpy.ndarray`
An MxNx1 array of floats ranging from 0 to 1 (grayscale image).
Returns
-------
ndarray
An MxNx3 RGB array representing the combined images.
"""
low = 2 * intensity * rgb
high = 1 - 2 * (1 - intensity) * (1 - rgb)
return np.where(rgb <= 0.5, low, high)
def from_levels_and_colors(levels, colors, extend='neither'):
"""
A helper routine to generate a cmap and a norm instance which
behave similar to contourf's levels and colors arguments.
Parameters
----------
levels : sequence of numbers
The quantization levels used to construct the `BoundaryNorm`.
Value ``v`` is quantized to level ``i`` if ``lev[i] <= v < lev[i+1]``.
colors : sequence of colors
The fill color to use for each level. If *extend* is "neither" there
must be ``n_level - 1`` colors. For an *extend* of "min" or "max" add
one extra color, and for an *extend* of "both" add two colors.
extend : {'neither', 'min', 'max', 'both'}, optional
The behaviour when a value falls out of range of the given levels.
See `~.Axes.contourf` for details.
Returns
-------
cmap : `~matplotlib.colors.Normalize`
norm : `~matplotlib.colors.Colormap`
"""
slice_map = {
'both': slice(1, -1),
'min': slice(1, None),
'max': slice(0, -1),
'neither': slice(0, None),
}
_api.check_in_list(slice_map, extend=extend)
color_slice = slice_map[extend]
n_data_colors = len(levels) - 1
n_expected = n_data_colors + color_slice.start - (color_slice.stop or 0)
if len(colors) != n_expected:
raise ValueError(
f'With extend == {extend!r} and {len(levels)} levels, '
f'expected {n_expected} colors, but got {len(colors)}')
cmap = ListedColormap(colors[color_slice], N=n_data_colors)
if extend in ['min', 'both']:
cmap.set_under(colors[0])
else:
cmap.set_under('none')
if extend in ['max', 'both']:
cmap.set_over(colors[-1])
else:
cmap.set_over('none')
cmap.colorbar_extend = extend
norm = BoundaryNorm(levels, ncolors=n_data_colors)
return cmap, norm
|
"""
Non-separable transforms that map from data space to screen space.
Projections are defined as `~.axes.Axes` subclasses. They include the
following elements:
- A transformation from data coordinates into display coordinates.
- An inverse of that transformation. This is used, for example, to convert
mouse positions from screen space back into data space.
- Transformations for the gridlines, ticks and ticklabels. Custom projections
will often need to place these elements in special locations, and Matplotlib
has a facility to help with doing so.
- Setting up default values (overriding `~.axes.Axes.cla`), since the defaults
for a rectilinear axes may not be appropriate.
- Defining the shape of the axes, for example, an elliptical axes, that will be
used to draw the background of the plot and for clipping any data elements.
- Defining custom locators and formatters for the projection. For example, in
a geographic projection, it may be more convenient to display the grid in
degrees, even if the data is in radians.
- Set up interactive panning and zooming. This is left as an "advanced"
feature left to the reader, but there is an example of this for polar plots
in `matplotlib.projections.polar`.
- Any additional methods for additional convenience or features.
Once the projection axes is defined, it can be used in one of two ways:
- By defining the class attribute ``name``, the projection axes can be
registered with `matplotlib.projections.register_projection` and subsequently
simply invoked by name::
fig.add_subplot(projection="my_proj_name")
- For more complex, parameterisable projections, a generic "projection" object
may be defined which includes the method ``_as_mpl_axes``. ``_as_mpl_axes``
should take no arguments and return the projection's axes subclass and a
dictionary of additional arguments to pass to the subclass' ``__init__``
method. Subsequently a parameterised projection can be initialised with::
fig.add_subplot(projection=MyProjection(param1=param1_value))
where MyProjection is an object which implements a ``_as_mpl_axes`` method.
A full-fledged and heavily annotated example is in
:doc:`/gallery/misc/custom_projection`. The polar plot functionality in
`matplotlib.projections.polar` may also be of interest.
"""
from .. import axes, _docstring
from .geo import AitoffAxes, HammerAxes, LambertAxes, MollweideAxes
from .polar import PolarAxes
from mpl_toolkits.mplot3d import Axes3D
class ProjectionRegistry:
"""A mapping of registered projection names to projection classes."""
def __init__(self):
self._all_projection_types = {}
def register(self, *projections):
"""Register a new set of projections."""
for projection in projections:
name = projection.name
self._all_projection_types[name] = projection
def get_projection_class(self, name):
"""Get a projection class from its *name*."""
return self._all_projection_types[name]
def get_projection_names(self):
"""Return the names of all projections currently registered."""
return sorted(self._all_projection_types)
projection_registry = ProjectionRegistry()
projection_registry.register(
axes.Axes,
PolarAxes,
AitoffAxes,
HammerAxes,
LambertAxes,
MollweideAxes,
Axes3D,
)
def register_projection(cls):
projection_registry.register(cls)
def get_projection_class(projection=None):
"""
Get a projection class from its name.
If *projection* is None, a standard rectilinear projection is returned.
"""
if projection is None:
projection = 'rectilinear'
try:
return projection_registry.get_projection_class(projection)
except KeyError as err:
raise ValueError("Unknown projection %r" % projection) from err
get_projection_names = projection_registry.get_projection_names
_docstring.interpd.update(projection_names=get_projection_names())
|
"""
Cycler
======
Cycling through combinations of values, producing dictionaries.
You can add cyclers::
from cycler import cycler
cc = (cycler(color=list('rgb')) +
cycler(linestyle=['-', '--', '-.']))
for d in cc:
print(d)
Results in::
{'color': 'r', 'linestyle': '-'}
{'color': 'g', 'linestyle': '--'}
{'color': 'b', 'linestyle': '-.'}
You can multiply cyclers::
from cycler import cycler
cc = (cycler(color=list('rgb')) *
cycler(linestyle=['-', '--', '-.']))
for d in cc:
print(d)
Results in::
{'color': 'r', 'linestyle': '-'}
{'color': 'r', 'linestyle': '--'}
{'color': 'r', 'linestyle': '-.'}
{'color': 'g', 'linestyle': '-'}
{'color': 'g', 'linestyle': '--'}
{'color': 'g', 'linestyle': '-.'}
{'color': 'b', 'linestyle': '-'}
{'color': 'b', 'linestyle': '--'}
{'color': 'b', 'linestyle': '-.'}
"""
import copy
from functools import reduce
from itertools import product, cycle
from operator import mul, add
__version__ = '0.10.0'
def _process_keys(left, right):
"""
Helper function to compose cycler keys.
Parameters
----------
left, right : iterable of dictionaries or None
The cyclers to be composed.
Returns
-------
keys : set
The keys in the composition of the two cyclers.
"""
l_peek = next(iter(left)) if left is not None else {}
r_peek = next(iter(right)) if right is not None else {}
l_key = set(l_peek.keys())
r_key = set(r_peek.keys())
if l_key & r_key:
raise ValueError("Can not compose overlapping cycles")
return l_key | r_key
def concat(left, right):
r"""
Concatenate `Cycler`\s, as if chained using `itertools.chain`.
The keys must match exactly.
Examples
--------
>>> num = cycler('a', range(3))
>>> let = cycler('a', 'abc')
>>> num.concat(let)
cycler('a', [0, 1, 2, 'a', 'b', 'c'])
Returns
-------
`Cycler`
The concatenated cycler.
"""
if left.keys != right.keys:
raise ValueError("Keys do not match:\n"
"\tIntersection: {both!r}\n"
"\tDisjoint: {just_one!r}".format(
both=left.keys & right.keys,
just_one=left.keys ^ right.keys))
_l = left.by_key()
_r = right.by_key()
return reduce(add, (_cycler(k, _l[k] + _r[k]) for k in left.keys))
class Cycler:
"""
Composable cycles.
This class has compositions methods:
``+``
for 'inner' products (zip)
``+=``
in-place ``+``
``*``
for outer products (`itertools.product`) and integer multiplication
``*=``
in-place ``*``
and supports basic slicing via ``[]``.
Parameters
----------
left, right : Cycler or None
The 'left' and 'right' cyclers.
op : func or None
Function which composes the 'left' and 'right' cyclers.
"""
def __call__(self):
return cycle(self)
def __init__(self, left, right=None, op=None):
"""
Semi-private init.
Do not use this directly, use `cycler` function instead.
"""
if isinstance(left, Cycler):
self._left = Cycler(left._left, left._right, left._op)
elif left is not None:
# Need to copy the dictionary or else that will be a residual
# mutable that could lead to strange errors
self._left = [copy.copy(v) for v in left]
else:
self._left = None
if isinstance(right, Cycler):
self._right = Cycler(right._left, right._right, right._op)
elif right is not None:
# Need to copy the dictionary or else that will be a residual
# mutable that could lead to strange errors
self._right = [copy.copy(v) for v in right]
else:
self._right = None
self._keys = _process_keys(self._left, self._right)
self._op = op
def __contains__(self, k):
return k in self._keys
@property
def keys(self):
"""The keys this Cycler knows about."""
return set(self._keys)
def change_key(self, old, new):
"""
Change a key in this cycler to a new name.
Modification is performed in-place.
Does nothing if the old key is the same as the new key.
Raises a ValueError if the new key is already a key.
Raises a KeyError if the old key isn't a key.
"""
if old == new:
return
if new in self._keys:
raise ValueError(
"Can't replace {old} with {new}, {new} is already a key"
.format(old=old, new=new)
)
if old not in self._keys:
raise KeyError("Can't replace {old} with {new}, {old} is not a key"
.format(old=old, new=new))
self._keys.remove(old)
self._keys.add(new)
if self._right is not None and old in self._right.keys:
self._right.change_key(old, new)
# self._left should always be non-None
# if self._keys is non-empty.
elif isinstance(self._left, Cycler):
self._left.change_key(old, new)
else:
# It should be completely safe at this point to
# assume that the old key can be found in each
# iteration.
self._left = [{new: entry[old]} for entry in self._left]
@classmethod
def _from_iter(cls, label, itr):
"""
Class method to create 'base' Cycler objects
that do not have a 'right' or 'op' and for which
the 'left' object is not another Cycler.
Parameters
----------
label : str
The property key.
itr : iterable
Finite length iterable of the property values.
Returns
-------
`Cycler`
New 'base' cycler.
"""
ret = cls(None)
ret._left = list({label: v} for v in itr)
ret._keys = {label}
return ret
def __getitem__(self, key):
# TODO : maybe add numpy style fancy slicing
if isinstance(key, slice):
trans = self.by_key()
return reduce(add, (_cycler(k, v[key]) for k, v in trans.items()))
else:
raise ValueError("Can only use slices with Cycler.__getitem__")
def __iter__(self):
if self._right is None:
for left in self._left:
yield dict(left)
else:
for a, b in self._op(self._left, self._right):
out = {}
out.update(a)
out.update(b)
yield out
def __add__(self, other):
"""
Pair-wise combine two equal length cyclers (zip).
Parameters
----------
other : Cycler
"""
if len(self) != len(other):
raise ValueError("Can only add equal length cycles, "
f"not {len(self)} and {len(other)}")
return Cycler(self, other, zip)
def __mul__(self, other):
"""
Outer product of two cyclers (`itertools.product`) or integer
multiplication.
Parameters
----------
other : Cycler or int
"""
if isinstance(other, Cycler):
return Cycler(self, other, product)
elif isinstance(other, int):
trans = self.by_key()
return reduce(add, (_cycler(k, v*other) for k, v in trans.items()))
else:
return NotImplemented
def __rmul__(self, other):
return self * other
def __len__(self):
op_dict = {zip: min, product: mul}
if self._right is None:
return len(self._left)
l_len = len(self._left)
r_len = len(self._right)
return op_dict[self._op](l_len, r_len)
def __iadd__(self, other):
"""
In-place pair-wise combine two equal length cyclers (zip).
Parameters
----------
other : Cycler
"""
if not isinstance(other, Cycler):
raise TypeError("Cannot += with a non-Cycler object")
# True shallow copy of self is fine since this is in-place
old_self = copy.copy(self)
self._keys = _process_keys(old_self, other)
self._left = old_self
self._op = zip
self._right = Cycler(other._left, other._right, other._op)
return self
def __imul__(self, other):
"""
In-place outer product of two cyclers (`itertools.product`).
Parameters
----------
other : Cycler
"""
if not isinstance(other, Cycler):
raise TypeError("Cannot *= with a non-Cycler object")
# True shallow copy of self is fine since this is in-place
old_self = copy.copy(self)
self._keys = _process_keys(old_self, other)
self._left = old_self
self._op = product
self._right = Cycler(other._left, other._right, other._op)
return self
def __eq__(self, other):
if len(self) != len(other):
return False
if self.keys ^ other.keys:
return False
return all(a == b for a, b in zip(self, other))
def __ne__(self, other):
return not (self == other)
__hash__ = None
def __repr__(self):
op_map = {zip: '+', product: '*'}
if self._right is None:
lab = self.keys.pop()
itr = list(v[lab] for v in self)
return f"cycler({lab!r}, {itr!r})"
else:
op = op_map.get(self._op, '?')
msg = "({left!r} {op} {right!r})"
return msg.format(left=self._left, op=op, right=self._right)
def _repr_html_(self):
# an table showing the value of each key through a full cycle
output = "<table>"
sorted_keys = sorted(self.keys, key=repr)
for key in sorted_keys:
output += f"<th>{key!r}</th>"
for d in iter(self):
output += "<tr>"
for k in sorted_keys:
output += f"<td>{d[k]!r}</td>"
output += "</tr>"
output += "</table>"
return output
def by_key(self):
"""
Values by key.
This returns the transposed values of the cycler. Iterating
over a `Cycler` yields dicts with a single value for each key,
this method returns a `dict` of `list` which are the values
for the given key.
The returned value can be used to create an equivalent `Cycler`
using only `+`.
Returns
-------
transpose : dict
dict of lists of the values for each key.
"""
# TODO : sort out if this is a bottle neck, if there is a better way
# and if we care.
keys = self.keys
out = {k: list() for k in keys}
for d in self:
for k in keys:
out[k].append(d[k])
return out
# for back compatibility
_transpose = by_key
def simplify(self):
"""
Simplify the cycler into a sum (but no products) of cyclers.
Returns
-------
simple : Cycler
"""
# TODO: sort out if it is worth the effort to make sure this is
# balanced. Currently it is is
# (((a + b) + c) + d) vs
# ((a + b) + (c + d))
# I would believe that there is some performance implications
trans = self.by_key()
return reduce(add, (_cycler(k, v) for k, v in trans.items()))
concat = concat
def cycler(*args, **kwargs):
"""
Create a new `Cycler` object from a single positional argument,
a pair of positional arguments, or the combination of keyword arguments.
cycler(arg)
cycler(label1=itr1[, label2=iter2[, ...]])
cycler(label, itr)
Form 1 simply copies a given `Cycler` object.
Form 2 composes a `Cycler` as an inner product of the
pairs of keyword arguments. In other words, all of the
iterables are cycled simultaneously, as if through zip().
Form 3 creates a `Cycler` from a label and an iterable.
This is useful for when the label cannot be a keyword argument
(e.g., an integer or a name that has a space in it).
Parameters
----------
arg : Cycler
Copy constructor for Cycler (does a shallow copy of iterables).
label : name
The property key. In the 2-arg form of the function,
the label can be any hashable object. In the keyword argument
form of the function, it must be a valid python identifier.
itr : iterable
Finite length iterable of the property values.
Can be a single-property `Cycler` that would
be like a key change, but as a shallow copy.
Returns
-------
cycler : Cycler
New `Cycler` for the given property
"""
if args and kwargs:
raise TypeError("cyl() can only accept positional OR keyword "
"arguments -- not both.")
if len(args) == 1:
if not isinstance(args[0], Cycler):
raise TypeError("If only one positional argument given, it must "
"be a Cycler instance.")
return Cycler(args[0])
elif len(args) == 2:
return _cycler(*args)
elif len(args) > 2:
raise TypeError("Only a single Cycler can be accepted as the lone "
"positional argument. Use keyword arguments instead.")
if kwargs:
return reduce(add, (_cycler(k, v) for k, v in kwargs.items()))
raise TypeError("Must have at least a positional OR keyword arguments")
def _cycler(label, itr):
"""
Create a new `Cycler` object from a property name and iterable of values.
Parameters
----------
label : hashable
The property key.
itr : iterable
Finite length iterable of the property values.
Returns
-------
cycler : Cycler
New `Cycler` for the given property
"""
if isinstance(itr, Cycler):
keys = itr.keys
if len(keys) != 1:
msg = "Can not create Cycler from a multi-property Cycler"
raise ValueError(msg)
lab = keys.pop()
# Doesn't need to be a new list because
# _from_iter() will be creating that new list anyway.
itr = (v[lab] for v in itr)
return Cycler._from_iter(label, itr)
|
# -*- coding: utf-8 -*-
"""
This module offers general convenience and utility functions for dealing with
datetimes.
.. versionadded:: 2.7.0
"""
from __future__ import unicode_literals
from datetime import datetime, time
def today(tzinfo=None):
"""
Returns a :py:class:`datetime` representing the current day at midnight
:param tzinfo:
The time zone to attach (also used to determine the current day).
:return:
A :py:class:`datetime.datetime` object representing the current day
at midnight.
"""
dt = datetime.now(tzinfo)
return datetime.combine(dt.date(), time(0, tzinfo=tzinfo))
def default_tzinfo(dt, tzinfo):
"""
Sets the ``tzinfo`` parameter on naive datetimes only
This is useful for example when you are provided a datetime that may have
either an implicit or explicit time zone, such as when parsing a time zone
string.
.. doctest::
>>> from dateutil.tz import tzoffset
>>> from dateutil.parser import parse
>>> from dateutil.utils import default_tzinfo
>>> dflt_tz = tzoffset("EST", -18000)
>>> print(default_tzinfo(parse('2014-01-01 12:30 UTC'), dflt_tz))
2014-01-01 12:30:00+00:00
>>> print(default_tzinfo(parse('2014-01-01 12:30'), dflt_tz))
2014-01-01 12:30:00-05:00
:param dt:
The datetime on which to replace the time zone
:param tzinfo:
The :py:class:`datetime.tzinfo` subclass instance to assign to
``dt`` if (and only if) it is naive.
:return:
Returns an aware :py:class:`datetime.datetime`.
"""
if dt.tzinfo is not None:
return dt
else:
return dt.replace(tzinfo=tzinfo)
def within_delta(dt1, dt2, delta):
"""
Useful for comparing two datetimes that may have a negligible difference
to be considered equal.
"""
delta = abs(delta)
difference = dt1 - dt2
return -delta <= difference <= delta
|
"""
The classes here provide support for using custom classes with
Matplotlib, e.g., those that do not expose the array interface but know
how to convert themselves to arrays. It also supports classes with
units and units conversion. Use cases include converters for custom
objects, e.g., a list of datetime objects, as well as for objects that
are unit aware. We don't assume any particular units implementation;
rather a units implementation must register with the Registry converter
dictionary and provide a `ConversionInterface`. For example,
here is a complete implementation which supports plotting with native
datetime objects::
import matplotlib.units as units
import matplotlib.dates as dates
import matplotlib.ticker as ticker
import datetime
class DateConverter(units.ConversionInterface):
@staticmethod
def convert(value, unit, axis):
"Convert a datetime value to a scalar or array."
return dates.date2num(value)
@staticmethod
def axisinfo(unit, axis):
"Return major and minor tick locators and formatters."
if unit != 'date':
return None
majloc = dates.AutoDateLocator()
majfmt = dates.AutoDateFormatter(majloc)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='date')
@staticmethod
def default_units(x, axis):
"Return the default unit for x or None."
return 'date'
# Finally we register our object type with the Matplotlib units registry.
units.registry[datetime.date] = DateConverter()
"""
from decimal import Decimal
from numbers import Number
import numpy as np
from numpy import ma
from matplotlib import cbook
class ConversionError(TypeError):
pass
def _is_natively_supported(x):
"""
Return whether *x* is of a type that Matplotlib natively supports or an
array of objects of such types.
"""
# Matplotlib natively supports all number types except Decimal.
if np.iterable(x):
# Assume lists are homogeneous as other functions in unit system.
for thisx in x:
if thisx is ma.masked:
continue
return isinstance(thisx, Number) and not isinstance(thisx, Decimal)
else:
return isinstance(x, Number) and not isinstance(x, Decimal)
class AxisInfo:
"""
Information to support default axis labeling, tick labeling, and limits.
An instance of this class must be returned by
`ConversionInterface.axisinfo`.
"""
def __init__(self, majloc=None, minloc=None,
majfmt=None, minfmt=None, label=None,
default_limits=None):
"""
Parameters
----------
majloc, minloc : Locator, optional
Tick locators for the major and minor ticks.
majfmt, minfmt : Formatter, optional
Tick formatters for the major and minor ticks.
label : str, optional
The default axis label.
default_limits : optional
The default min and max limits of the axis if no data has
been plotted.
Notes
-----
If any of the above are ``None``, the axis will simply use the
default value.
"""
self.majloc = majloc
self.minloc = minloc
self.majfmt = majfmt
self.minfmt = minfmt
self.label = label
self.default_limits = default_limits
class ConversionInterface:
"""
The minimal interface for a converter to take custom data types (or
sequences) and convert them to values Matplotlib can use.
"""
@staticmethod
def axisinfo(unit, axis):
"""Return an `.AxisInfo` for the axis with the specified units."""
return None
@staticmethod
def default_units(x, axis):
"""Return the default unit for *x* or ``None`` for the given axis."""
return None
@staticmethod
def convert(obj, unit, axis):
"""
Convert *obj* using *unit* for the specified *axis*.
If *obj* is a sequence, return the converted sequence. The output must
be a sequence of scalars that can be used by the numpy array layer.
"""
return obj
class DecimalConverter(ConversionInterface):
"""Converter for decimal.Decimal data to float."""
@staticmethod
def convert(value, unit, axis):
"""
Convert Decimals to floats.
The *unit* and *axis* arguments are not used.
Parameters
----------
value : decimal.Decimal or iterable
Decimal or list of Decimal need to be converted
"""
if isinstance(value, Decimal):
return float(value)
# value is Iterable[Decimal]
elif isinstance(value, ma.MaskedArray):
return ma.asarray(value, dtype=float)
else:
return np.asarray(value, dtype=float)
# axisinfo and default_units can be inherited as Decimals are Numbers.
class Registry(dict):
"""Register types with conversion interface."""
def get_converter(self, x):
"""Get the converter interface instance for *x*, or None."""
# Unpack in case of e.g. Pandas or xarray object
x = cbook._unpack_to_numpy(x)
if isinstance(x, np.ndarray):
# In case x in a masked array, access the underlying data (only its
# type matters). If x is a regular ndarray, getdata() just returns
# the array itself.
x = np.ma.getdata(x).ravel()
# If there are no elements in x, infer the units from its dtype
if not x.size:
return self.get_converter(np.array([0], dtype=x.dtype))
for cls in type(x).__mro__: # Look up in the cache.
try:
return self[cls]
except KeyError:
pass
try: # If cache lookup fails, look up based on first element...
first = cbook._safe_first_finite(x)
except (TypeError, StopIteration):
pass
else:
# ... and avoid infinite recursion for pathological iterables for
# which indexing returns instances of the same iterable class.
if type(first) is not type(x):
return self.get_converter(first)
return None
registry = Registry()
registry[Decimal] = DecimalConverter()
|
import builtins
import numpy as np
from numpy.testing import suppress_warnings
from operator import index
from collections import namedtuple
__all__ = ['binned_statistic',
'binned_statistic_2d',
'binned_statistic_dd']
BinnedStatisticResult = namedtuple('BinnedStatisticResult',
('statistic', 'bin_edges', 'binnumber'))
def binned_statistic(x, values, statistic='mean',
bins=10, range=None):
"""
Compute a binned statistic for one or more sets of data.
This is a generalization of a histogram function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a set of sequences - each the same shape as
`x`. If `values` is a set of sequences, the statistic will be computed
on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'std' : compute the standard deviation within each bin. This
is implicitly calculated with ddof=0.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width bins in the
given range (10 by default). If `bins` is a sequence, it defines the
bin edges, including the rightmost edge, allowing for non-uniform bin
widths. Values in `x` that are smaller than lowest bin edge are
assigned to bin number 0, values beyond the highest bin are assigned to
``bins[-1]``. If the bin edges are specified, the number of bins will
be, (nx = len(bins)-1).
range : (float, float) or [(float, float)], optional
The lower and upper range of the bins. If not provided, range
is simply ``(x.min(), x.max())``. Values outside the range are
ignored.
Returns
-------
statistic : array
The values of the selected statistic in each bin.
bin_edges : array of dtype float
Return the bin edges ``(length(statistic)+1)``.
binnumber: 1-D ndarray of ints
Indices of the bins (corresponding to `bin_edges`) in which each value
of `x` belongs. Same length as `values`. A binnumber of `i` means the
corresponding value is between (bin_edges[i-1], bin_edges[i]).
See Also
--------
numpy.digitize, numpy.histogram, binned_statistic_2d, binned_statistic_dd
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
First some basic examples:
Create two evenly spaced bins in the range of the given sample, and sum the
corresponding values in each of those bins:
>>> values = [1.0, 1.0, 2.0, 1.5, 3.0]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
BinnedStatisticResult(statistic=array([4. , 4.5]),
bin_edges=array([1., 4., 7.]), binnumber=array([1, 1, 1, 2, 2]))
Multiple arrays of values can also be passed. The statistic is calculated
on each set independently:
>>> values = [[1.0, 1.0, 2.0, 1.5, 3.0], [2.0, 2.0, 4.0, 3.0, 6.0]]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
BinnedStatisticResult(statistic=array([[4. , 4.5],
[8. , 9. ]]), bin_edges=array([1., 4., 7.]),
binnumber=array([1, 1, 1, 2, 2]))
>>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
... bins=3)
BinnedStatisticResult(statistic=array([1., 2., 4.]),
bin_edges=array([1., 2., 3., 4.]),
binnumber=array([1, 2, 1, 2, 3]))
As a second example, we now generate some random data of sailing boat speed
as a function of wind speed, and then determine how fast our boat is for
certain wind speeds:
>>> rng = np.random.default_rng()
>>> windspeed = 8 * rng.random(500)
>>> boatspeed = .3 * windspeed**.5 + .2 * rng.random(500)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,
... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])
>>> plt.figure()
>>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,
... label='binned statistic of data')
>>> plt.legend()
Now we can use ``binnumber`` to select all datapoints with a windspeed
below 1:
>>> low_boatspeed = boatspeed[binnumber == 0]
As a final example, we will use ``bin_edges`` and ``binnumber`` to make a
plot of a distribution that shows the mean and distribution around that
mean per bin, on top of a regular histogram and the probability
distribution function:
>>> x = np.linspace(0, 5, num=500)
>>> x_pdf = stats.maxwell.pdf(x)
>>> samples = stats.maxwell.rvs(size=10000)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,
... statistic='mean', bins=25)
>>> bin_width = (bin_edges[1] - bin_edges[0])
>>> bin_centers = bin_edges[1:] - bin_width/2
>>> plt.figure()
>>> plt.hist(samples, bins=50, density=True, histtype='stepfilled',
... alpha=0.2, label='histogram of data')
>>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,
... label='binned statistic of data')
>>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)
>>> plt.legend(fontsize=10)
>>> plt.show()
"""
try:
N = len(bins)
except TypeError:
N = 1
if N != 1:
bins = [np.asarray(bins, float)]
if range is not None:
if len(range) == 2:
range = [range]
medians, edges, binnumbers = binned_statistic_dd(
[x], values, statistic, bins, range)
return BinnedStatisticResult(medians, edges[0], binnumbers)
BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult',
('statistic', 'x_edge', 'y_edge',
'binnumber'))
def binned_statistic_2d(x, y, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False):
"""
Compute a bidimensional binned statistic for one or more sets of data.
This is a generalization of a histogram2d function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned along the first dimension.
y : (N,) array_like
A sequence of values to be binned along the second dimension.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a list of sequences - each with the same
shape as `x`. If `values` is such a list, the statistic will be
computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'std' : compute the standard deviation within each bin. This
is implicitly calculated with ddof=0.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* the number of bins for the two dimensions (nx = ny = bins),
* the number of bins in each dimension (nx, ny = bins),
* the bin edges for the two dimensions (x_edge = y_edge = bins),
* the bin edges in each dimension (x_edge, y_edge = bins).
If the bin edges are specified, the number of bins will be,
(nx = len(x_edge)-1, ny = len(y_edge)-1).
range : (2,2) array_like, optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
[[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (2,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section.
.. versionadded:: 0.17.0
Returns
-------
statistic : (nx, ny) ndarray
The values of the selected statistic in each two-dimensional bin.
x_edge : (nx + 1) ndarray
The bin edges along the first dimension.
y_edge : (ny + 1) ndarray
The bin edges along the second dimension.
binnumber : (N,) array of ints or (2,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (2,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
Calculate the counts with explicit bin-edges:
>>> x = [0.1, 0.1, 0.1, 0.6]
>>> y = [2.1, 2.6, 2.1, 2.1]
>>> binx = [0.0, 0.5, 1.0]
>>> biny = [2.0, 2.5, 3.0]
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx, biny])
>>> ret.statistic
array([[2., 1.],
[1., 0.]])
The bin in which each sample is placed is given by the `binnumber`
returned parameter. By default, these are the linearized bin indices:
>>> ret.binnumber
array([5, 6, 5, 9])
The bin indices can also be expanded into separate entries for each
dimension using the `expand_binnumbers` parameter:
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx, biny],
... expand_binnumbers=True)
>>> ret.binnumber
array([[1, 1, 1, 2],
[1, 2, 1, 1]])
Which shows that the first three elements belong in the xbin 1, and the
fourth into xbin 2; and so on for y.
"""
# This code is based on np.histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = np.asarray(bins, float)
bins = [xedges, yedges]
medians, edges, binnumbers = binned_statistic_dd(
[x, y], values, statistic, bins, range,
expand_binnumbers=expand_binnumbers)
return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers)
BinnedStatisticddResult = namedtuple('BinnedStatisticddResult',
('statistic', 'bin_edges',
'binnumber'))
def binned_statistic_dd(sample, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False,
binned_statistic_result=None):
"""
Compute a multidimensional binned statistic for a set of data.
This is a generalization of a histogramdd function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
sample : array_like
Data to histogram passed as a sequence of N arrays of length D, or
as an (N,D) array.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `sample`, or a list of sequences - each with the
same shape as `sample`. If `values` is such a list, the statistic
will be computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'std' : compute the standard deviation within each bin. This
is implicitly calculated with ddof=0. If the number of values
within a given bin is 0 or 1, the computed standard deviation value
will be 0 for the bin.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : sequence or positive int, optional
The bin specification must be in one of the following forms:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... = bins).
* The number of bins for all dimensions (nx = ny = ... = bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (D,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section of
`binned_statistic_2d`.
binned_statistic_result : binnedStatisticddResult
Result of a previous call to the function in order to reuse bin edges
and bin numbers with new values and/or a different statistic.
To reuse bin numbers, `expand_binnumbers` must have been set to False
(the default)
.. versionadded:: 0.17.0
Returns
-------
statistic : ndarray, shape(nx1, nx2, nx3,...)
The values of the selected statistic in each two-dimensional bin.
bin_edges : list of ndarrays
A list of D arrays describing the (nxi + 1) bin edges for each
dimension.
binnumber : (N,) array of ints or (D,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open in each dimension. In
other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is
``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The
last bin, however, is ``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (D,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.mplot3d import Axes3D
Take an array of 600 (x, y) coordinates as an example.
`binned_statistic_dd` can handle arrays of higher dimension `D`. But a plot
of dimension `D+1` is required.
>>> mu = np.array([0., 1.])
>>> sigma = np.array([[1., -0.5],[-0.5, 1.5]])
>>> multinormal = stats.multivariate_normal(mu, sigma)
>>> data = multinormal.rvs(size=600, random_state=235412)
>>> data.shape
(600, 2)
Create bins and count how many arrays fall in each bin:
>>> N = 60
>>> x = np.linspace(-3, 3, N)
>>> y = np.linspace(-3, 4, N)
>>> ret = stats.binned_statistic_dd(data, np.arange(600), bins=[x, y],
... statistic='count')
>>> bincounts = ret.statistic
Set the volume and the location of bars:
>>> dx = x[1] - x[0]
>>> dy = y[1] - y[0]
>>> x, y = np.meshgrid(x[:-1]+dx/2, y[:-1]+dy/2)
>>> z = 0
>>> bincounts = bincounts.ravel()
>>> x = x.ravel()
>>> y = y.ravel()
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111, projection='3d')
>>> with np.errstate(divide='ignore'): # silence random axes3d warning
... ax.bar3d(x, y, z, dx, dy, bincounts)
Reuse bin numbers and bin edges with new values:
>>> ret2 = stats.binned_statistic_dd(data, -np.arange(600),
... binned_statistic_result=ret,
... statistic='mean')
"""
known_stats = ['mean', 'median', 'count', 'sum', 'std', 'min', 'max']
if not callable(statistic) and statistic not in known_stats:
raise ValueError('invalid statistic %r' % (statistic,))
try:
bins = index(bins)
except TypeError:
# bins is not an integer
pass
# If bins was an integer-like object, now it is an actual Python int.
# NOTE: for _bin_edges(), see e.g. gh-11365
if isinstance(bins, int) and not np.isfinite(sample).all():
raise ValueError('%r contains non-finite values.' % (sample,))
# `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`)
# `Dlen` is the length of elements along each dimension.
# This code is based on np.histogramdd
try:
# `sample` is an ND-array.
Dlen, Ndim = sample.shape
except (AttributeError, ValueError):
# `sample` is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
Dlen, Ndim = sample.shape
# Store initial shape of `values` to preserve it in the output
values = np.asarray(values)
input_shape = list(values.shape)
# Make sure that `values` is 2D to iterate over rows
values = np.atleast_2d(values)
Vdim, Vlen = values.shape
# Make sure `values` match `sample`
if(statistic != 'count' and Vlen != Dlen):
raise AttributeError('The number of `values` elements must match the '
'length of each `sample` dimension.')
try:
M = len(bins)
if M != Ndim:
raise AttributeError('The dimension of bins must be equal '
'to the dimension of the sample x.')
except TypeError:
bins = Ndim * [bins]
if binned_statistic_result is None:
nbin, edges, dedges = _bin_edges(sample, bins, range)
binnumbers = _bin_numbers(sample, nbin, edges, dedges)
else:
edges = binned_statistic_result.bin_edges
nbin = np.array([len(edges[i]) + 1 for i in builtins.range(Ndim)])
# +1 for outlier bins
dedges = [np.diff(edges[i]) for i in builtins.range(Ndim)]
binnumbers = binned_statistic_result.binnumber
result = np.empty([Vdim, nbin.prod()], float)
if statistic == 'mean':
result.fill(np.nan)
flatcount = np.bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in builtins.range(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
result[vv, a] = flatsum[a] / flatcount[a]
elif statistic == 'std':
result.fill(0)
_calc_binned_statistic(Vdim, binnumbers, result, values, np.std)
elif statistic == 'count':
result.fill(0)
flatcount = np.bincount(binnumbers, None)
a = np.arange(len(flatcount))
result[:, a] = flatcount[np.newaxis, :]
elif statistic == 'sum':
result.fill(0)
for vv in builtins.range(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
a = np.arange(len(flatsum))
result[vv, a] = flatsum
elif statistic == 'median':
result.fill(np.nan)
_calc_binned_statistic(Vdim, binnumbers, result, values, np.median)
elif statistic == 'min':
result.fill(np.nan)
_calc_binned_statistic(Vdim, binnumbers, result, values, np.min)
elif statistic == 'max':
result.fill(np.nan)
_calc_binned_statistic(Vdim, binnumbers, result, values, np.max)
elif callable(statistic):
with np.errstate(invalid='ignore'), suppress_warnings() as sup:
sup.filter(RuntimeWarning)
try:
null = statistic([])
except Exception:
null = np.nan
result.fill(null)
_calc_binned_statistic(Vdim, binnumbers, result, values, statistic,
is_callable=True)
# Shape into a proper matrix
result = result.reshape(np.append(Vdim, nbin))
# Remove outliers (indices 0 and -1 for each bin-dimension).
core = tuple([slice(None)] + Ndim * [slice(1, -1)])
result = result[core]
# Unravel binnumbers into an ndarray, each row the bins for each dimension
if(expand_binnumbers and Ndim > 1):
binnumbers = np.asarray(np.unravel_index(binnumbers, nbin))
if np.any(result.shape[1:] != nbin - 2):
raise RuntimeError('Internal Shape Error')
# Reshape to have output (`result`) match input (`values`) shape
result = result.reshape(input_shape[:-1] + list(nbin-2))
return BinnedStatisticddResult(result, edges, binnumbers)
def _calc_binned_statistic(Vdim, bin_numbers, result, values, stat_func,
is_callable=False):
unique_bin_numbers = np.unique(bin_numbers)
for vv in builtins.range(Vdim):
bin_map = _create_binned_data(bin_numbers, unique_bin_numbers,
values, vv)
for i in unique_bin_numbers:
# if the stat_func is callable, all results should be updated
# if the stat_func is np.std, calc std only when binned data is 2
# or more for speed up.
if is_callable or not (stat_func is np.std and
len(bin_map[i]) < 2):
result[vv, i] = stat_func(np.array(bin_map[i]))
def _create_binned_data(bin_numbers, unique_bin_numbers, values, vv):
""" Create hashmap of bin ids to values in bins
key: bin number
value: list of binned data
"""
bin_map = dict()
for i in unique_bin_numbers:
bin_map[i] = []
for i in builtins.range(len(bin_numbers)):
bin_map[bin_numbers[i]].append(values[vv, i])
return bin_map
def _bin_edges(sample, bins=None, range=None):
""" Create edge arrays
"""
Dlen, Ndim = sample.shape
nbin = np.empty(Ndim, int) # Number of bins in each dimension
edges = Ndim * [None] # Bin edges for each dim (will be 2D array)
dedges = Ndim * [None] # Spacing between edges (will be 2D array)
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = np.atleast_1d(np.array(sample.min(axis=0), float))
smax = np.atleast_1d(np.array(sample.max(axis=0), float))
else:
if len(range) != Ndim:
raise ValueError(
f"range given for {len(range)} dimensions; {Ndim} required")
smin = np.empty(Ndim)
smax = np.empty(Ndim)
for i in builtins.range(Ndim):
if range[i][1] < range[i][0]:
raise ValueError(
"In {}range, start must be <= stop".format(
f"dimension {i + 1} of " if Ndim > 1 else ""))
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in builtins.range(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Preserve sample floating point precision in bin edges
edges_dtype = (sample.dtype if np.issubdtype(sample.dtype, np.floating)
else float)
# Create edge arrays
for i in builtins.range(Ndim):
if np.isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1,
dtype=edges_dtype)
else:
edges[i] = np.asarray(bins[i], edges_dtype)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = np.diff(edges[i])
nbin = np.asarray(nbin)
return nbin, edges, dedges
def _bin_numbers(sample, nbin, edges, dedges):
"""Compute the bin number each sample falls into, in each dimension
"""
Dlen, Ndim = sample.shape
sampBin = [
np.digitize(sample[:, i], edges[i])
for i in range(Ndim)
]
# Using `digitize`, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in range(Ndim):
# Find the rounding precision
dedges_min = dedges[i].min()
if dedges_min == 0:
raise ValueError('The smallest edge difference is numerically 0.')
decimal = int(-np.log10(dedges_min)) + 6
# Find which points are on the rightmost edge.
on_edge = np.where(np.around(sample[:, i], decimal) ==
np.around(edges[i][-1], decimal))[0]
# Shift these points one bin to the left.
sampBin[i][on_edge] -= 1
# Compute the sample indices in the flattened statistic matrix.
binnumbers = np.ravel_multi_index(sampBin, nbin)
return binnumbers
|
"""
Common code used in multiple modules.
"""
class weekday(object):
__slots__ = ["weekday", "n"]
def __init__(self, weekday, n=None):
self.weekday = weekday
self.n = n
def __call__(self, n):
if n == self.n:
return self
else:
return self.__class__(self.weekday, n)
def __eq__(self, other):
try:
if self.weekday != other.weekday or self.n != other.n:
return False
except AttributeError:
return False
return True
def __hash__(self):
return hash((
self.weekday,
self.n,
))
def __ne__(self, other):
return not (self == other)
def __repr__(self):
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
if not self.n:
return s
else:
return "%s(%+d)" % (s, self.n)
# vim:ts=4:sw=4:et
|
import sys
print 1, 2 >> sys.stdout
foo = ur'This is not possible in Python 3.'
# This is actually printing a tuple.
#: E275:5
print(1, 2)
# True and False are not keywords in Python 2 and therefore there's no need for
# a space.
norman = True+False
|
import typing as t
from . import Markup
def escape(s: t.Any) -> Markup:
"""Replace the characters ``&``, ``<``, ``>``, ``'``, and ``"`` in
the string with HTML-safe sequences. Use this if you need to display
text that might contain such characters in HTML.
If the object has an ``__html__`` method, it is called and the
return value is assumed to already be safe for HTML.
:param s: An object to be converted to a string and escaped.
:return: A :class:`Markup` string with the escaped text.
"""
if hasattr(s, "__html__"):
return Markup(s.__html__())
return Markup(
str(s)
.replace("&", "&")
.replace(">", ">")
.replace("<", "<")
.replace("'", "'")
.replace('"', """)
)
def escape_silent(s: t.Optional[t.Any]) -> Markup:
"""Like :func:`escape` but treats ``None`` as the empty string.
Useful with optional values, as otherwise you get the string
``'None'`` when the value is ``None``.
>>> escape(None)
Markup('None')
>>> escape_silent(None)
Markup('')
"""
if s is None:
return Markup()
return escape(s)
def soft_str(s: t.Any) -> str:
"""Convert an object to a string if it isn't already. This preserves
a :class:`Markup` string rather than converting it back to a basic
string, so it will still be marked as safe and won't be escaped
again.
>>> value = escape("<User 1>")
>>> value
Markup('<User 1>')
>>> escape(str(value))
Markup('&lt;User 1&gt;')
>>> escape(soft_str(value))
Markup('<User 1>')
"""
if not isinstance(s, str):
return str(s)
return s
|
def print_table(albums):
print("-" * 101 + "\n" + " | ".join(("\033[32m" + "artist name".center(30) + "\033[0m", "\033[32m" + "album name".center(30) + "\033[0m", "\033[32m" + "year".center(4) + "\033[0m", "\033[32m" + "genre".center(18) + "\033[0m", "\033[32m" + "length".center(6) + "\033[0m")) + "\n" + "-" * 101)
for artist_name, album_name, release_year, genre, length in albums:
print(' | '.join((artist_name.ljust(30), album_name.ljust(30), str(release_year).ljust(4), genre.ljust(18), length.ljust(6))))
print('-' * 101)
# album not found
if len(albums) == 0:
print("No albums found")
def choose_option():
available_options = [
"1: I want to view all imported albums",
"2: I want to find all albums by genre",
"3: I want to find all albums from given time range",
"4: I want to find shortest/longest album",
"5: I want to find all albums created by given artist",
"6: I want to find album by album name",
"7: I want to get full report in form of set of given statistics",
"add: Add new album",
"q: Quit",
]
for option in available_options:
print(option)
user_option = input("\nPlease enter option: ")
return user_option.lower()
|
# CS 177 – labprep4.py
# Nicholas Koontz
# This program takes a string and builds a new string from that string
# User inputs
print("Enter a String:", end='')
x = str(input())
print("Enter number of sub segments:", end='')
y = int(input())
print("Enter the separator:", end='')
z = str(input())
seq = []
t = len(x)//y
for i in range(len(x)//t):
seq.append(x[i*t:i*t+t])
q = seq[::-1]
r = z.join(q)
print(r)
print("")
#Open a file read its continents and print a new Purdue email address for every name
print("This program opens a file reads the file and creates a new Purdue email from the names in the file")
#User input
print("Type what file you would like to open:", end = '')
infile = str(input())
k = []
b = []
email = []
#open file and read
file = open(infile, 'r')
#creating a list of names
b = file
#Open file and write in list of email addresses
h = open("emails.txt", 'w')
#Create email addresses
for i in file.readlines():
k = i.lower()
p = k[0]#First Letter of the name
#Need the first 7 of the last name
a = k.split(' ')[1]
a = a[0:len(a)-1 if len(a) <= 7 else 7]
email = (p + a + "@purdue.edu")
h.write(email + '\n')
#close the file
file.close()
#Close the file with the emails
h.close()
|
# CS 177 – labprep3.py
# Nicholas Koontz
# This program prompts the user for the number of columns to display and then displays a table of calculations for each one
import math
#Inputs
print('What is the starting value?')
x = eval(input())
print('What is the endding value?')
y = eval(input())
print('What is the step of the values?')
step = eval(input())
print('How many columns should there be?')
columns = eval(input())
print()
#Variables
j1 = 0
k1 = 0
l1 = 0
m1 = 0
n1 = 0
o1 = 0
p1 = 0
q1 = 0
#Print basic information
print("This program displays math tables for multiples of", step)
print("How many columns should be displayed:", columns)
print()
#for loops
print("Num:", end='\t')
for i in range(x,y + 1, step):
print(i, end='\t')
print()
print("=====================================")
print("Sqr", end='\t')
for j in range(x,y + 1, step):
j1 = j ** 2
print(j1, end='\t')
print()
print("SqRt", end='\t')
for k in range(x,y + 1, step):
k1 = math.sqrt(k)
k2 = round(k1, 2)
print(k2, end='\t')
print()
print("Sin", end='\t')
for l in range(x,y + 1, step):
l1 = math.sin(math.radians(l))
l2 = round(l1, 2)
print(l2, end='\t')
print()
print("Cos", end='\t')
for m in range(x,y + 1, step):
m1 = math.cos(math.radians(m))
m2 = round(m1, 2)
print(m2, end='\t')
print()
print("Tan", end='\t')
for n in range(x,y + 1, step):
n1 = math.tan(math.radians(n))
n2 = round(n1, 2)
print(n2, end='\t')
print()
print("Log", end='\t')
for p in range(x,y + 1, step):
p1 = math.log(p)
p2 = round(p1, 2)
print(p2, end='\t')
print()
print("Log10", end='\t')
for q in range(x,y + 1, step):
q1 = math.log10(q)
q2 = round(q1, 2)
print(q2, end='\t')
print()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.