repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
Belyanova/Python_-training
|
test/test_phones.py
|
<reponame>Belyanova/Python_-training
import re
import pytest
import allure
def test_phones_on_home_page(app):
with allure.step('Given a user from home and edit page'):
user_from_home_page = app.user.get_user_list()[0]
user_from_edit_page = app.user.get_user_info_from_edit_page(0)
with allure.step('Then the user from home page is equal to the user from edit page '):
assert user_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(user_from_edit_page)
def test_phones_on_view_page(app):
with allure.step('Given a user from home and edit page'):
user_from_view_page = app.user.get_user_from_view_page(0)
user_from_edit_page = app.user.get_user_info_from_edit_page(0)
with allure.step('Then the user from home page is equal to the user from edit page '):
assert user_from_view_page.phone_home == user_from_edit_page.phone_home
assert user_from_view_page.phone_mobile == user_from_edit_page.phone_mobile
assert user_from_view_page.phone_work == user_from_edit_page.phone_work
assert user_from_view_page.phone2 == user_from_edit_page.phone2
def clear(s):
return re.sub("[() -]","",s)
def merge_phones_like_on_home_page(user):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x), filter(lambda x: x is not None,
[user.phone_home, user.phone_mobile, user.phone_work, user.phone2]))))
|
Belyanova/Python_-training
|
model/configurations_login.py
|
<gh_stars>0
class Configurations_login:
def __init__(self, username, password):
self.username = username
self.password = password
|
Belyanova/Python_-training
|
model/user_in_group.py
|
class UserGroup:
def __init__(self, id=None, group_id=None,):
self.group_id = group_id
self.id = id
|
695195646/robot_eyes
|
untitled1/Hash_img.py
|
<reponame>695195646/robot_eyes<gh_stars>0
#coding-utf-8
import cv2
import numpy as np
import os
import shutil
#存放图片目录
path = "D:\\untitled1\\photo"
#用户传输原图
imgPathSrc = "\\untitled1\\photo\\p1.jpg"
#相似比例值
simil = 0.85
#相似图片存入地址
saveFile = "C:\\Users\\EDZ\\Desktop\\test_consult\\"
def ORBImgSimilarity(imgPathSrc,imgPathCom):
"""
:param imgPathSrc: 原图路径
:param imgPathCom: 待比较图片路径
:return:图片相似度
"""
try:
#读取图片
imgSrc = cv2.imread(imgPathSrc,cv2.IMREAD_GRAYSCALE)
imgCom = cv2.imread(imgPathCom,cv2.IMREAD_GRAYSCALE)
#初始化ORB检测器
orb = cv2.ORB_create()
kpSrc,deSrc = orb.detectAndCompute(imgSrc,None)
kpCom,deCom = orb.detectAndCompute(imgCom,None)
#提取并计算特征点
bf = cv2.BFMatcher(cv2.NORM_HAMMING)
#knn筛选结果
matches = bf.knnMatch(deSrc,trainDescriptors=deCom,k=2)
#查看最大匹配点数目
good = [m for (m,n) in matches if m.distance < 0.75 * n.distance]
similary = len(good) / len(matches)
return similary
except:
return '0'
#------------------------------PHash-------------------------------------#
#缩小图片尺寸并且图片灰度化
def imgResize(imgSrc):
"""
:param imgSrc: 传输原图
:return:缩小成8*8的图片
"""
newImg = cv2.resize(imgSrc,(8,8))
#cv2.imshow("newImg:",newImg)
#将图片灰度化处理
imgGray = cv2.cvtColor(newImg,cv2.COLOR_RGB2GRAY)
return imgGray
#计算图片灰度平均值
def transformImg(imgGray):
"""
:param imgGray: 处理后的8*8图片
:return:图片的二维数组转一维数组
"""
#获取灰度值
imgTemp = np.array(imgGray)
sum = 0
for i in range(0,8):
for j in range(0,8):
sum += imgTemp[i,j]
print("灰度值总和为: ",sum)
avg = sum / 64
print("灰度值均值为: ",avg)
#二维化图像转为一维化图像
imgArr = []
for i in range(0,8):
for j in range(0,8):
if imgTemp[i][j] > avg:
imgArr.append("1")
else:
imgArr.append("0")
return imgArr
def comPercent(arrA,arrB):
"""
:param arrA:传输原图的一维数组
:param arrB:库图的一维数组
:return:相似点总和
"""
sum = 0
for i in range(0,64):
if arrA[i] == arrB[i]:
sum += 1
return sum
#获取图片
files = os.listdir(path)
#print("files: ",files)
#待比较库图
for i in range(0,len(files)):
print("待比较图片为:" + files[i])
imgPathCom = "\\untitled1\\photo\\" + files[i]
imgSrc = cv2.imread(imgPathSrc)
imgCom = cv2.imread(imgPathCom)
imgGrayA = imgResize(imgSrc) #原图
imgArrA = transformImg(imgGrayA)
imgGrayB = imgResize(imgCom) #待比较图片
imgArrB = transformImg(imgGrayB)
#计算两者之差
sum = 0
sum = comPercent(imgArrA,imgArrB)
# print(imgArrA)
# print(imgArrB)
print(sum / 64)
#cv2.imshow("imgGrayA:" ,imgGrayA)
#cv2.imshow("imgGrayB:" ,imgGrayB)
#ORB算法
like = ORBImgSimilarity(imgPathSrc,imgPathCom)
print("ORB_like:",like)
like = max(float(like),float(sum / 64))
print("图片最大相似性为:",like)
if like > simil:
shutil.copy(imgPathCom,saveFile + str(like) + "_" + files[i])
#shutil.copy("C:\\Users\\EDZ\\Desktop\\test_consult\\" + files[i],"C:\\Users\\EDZ\\Desktop\\test_consult\\" + like)
print("---------------------------------------------")
#打印图片尺寸大小
#print("img_gray shape:{}".format(np.shape(imgray)))
cv2.waitKey()
|
jamiebyer/mohrs
|
app.py
|
# -*- coding: utf-8 -*-
# Run this app with `python app3.py` and
# visit http://127.0.0.1:8050/ in your web browser.
# documentation at https://dash.plotly.com/
from flask import Flask
from os import environ
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
import plotly.graph_objects as go
import numpy as np
from numpy import random
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
server = Flask(__name__)
app = dash.Dash(
server=server,
url_base_pathname=environ.get('JUPYTERHUB_SERVICE_PREFIX', '/'),
external_stylesheets=external_stylesheets
)
# load introduction text
intro = open('introduction.md', 'r')
intro_md = intro.read()
xmin = -300
xmax = 500
ymin = -300
ymax = 300
app.layout = html.Div([
html.Div([
dcc.Markdown(
children=intro_md
),
]),
#Tabs: https://dash.plotly.com/dash-core-components/tabs
html.Div([
dcc.Tabs(id='tabs', value='tab1', children=[
dcc.Tab(label='Set Mean and Deviatoric Stresses', value='tab1'),
dcc.Tab(label='Set sigma1 and sigma2', value='tab2'),
]),
html.Div(id='tabs-content')
], style={'width': '100%', 'display': 'inline-block', 'padding': '0 20', 'vertical-align': 'middle', 'margin-bottom': 30, 'margin-right': 50, 'margin-left': 20}),
html.Div([
dcc.Markdown('''
----
### Sources
''')
])
], style={'width': '1000px'})
@app.callback(Output('tabs-content', 'children'),
Input('tabs', 'value'))
def render_content(tab):
if tab == 'tab1':
return html.Div([
html.Div([
dcc.Markdown('''
**Mohr's circle parameters**
'''),
html.Label(children='Mean stress', style={'margin-top': '20px'}),
dcc.Slider(id='s_m', min=0, max=xmax, value=xmax / 2, step=20,
marks={0: '0', 100: '100', 200: '200', 300: '300', 400: '400', 500: '500'}
# tooltip={'always_visible':True, 'placement':'topLeft'}
),
html.Label(children='deviatoric stress', style={'margin-top': '20px'}),
dcc.Slider(id='s_d', min=0.0, max=150, value=80, step=10,
marks={0: '0', 25: '25', 50: '50', 75: '75', 100: '100', 125: '125', 150: '150'}
# tooltip={'always_visible':True, 'placement':'topLeft'}
),
html.Label(children='theta (degrees)', style={'margin-top': '20px'}),
dcc.Slider(id='theta', min=0, max=np.pi / 2, value=np.pi / 4, step=np.pi / 24,
marks={0: '0', np.pi / 8: '22.5', np.pi / 4: '45', 3 * np.pi / 8: '66.5', np.pi / 2: '90'},
# tooltip={'always_visible':True, 'placement':'topLeft'}
),
dcc.Checklist(
id='circle_checkbox',
options=[
{'label': 'Show Mohrs Circle', 'value': 'circle'}
],
value=['circle'],
style={'margin-top': '20px'}
)
], style={'width': '45%', 'display': 'inline-block', 'margin-left': '30px', 'margin-right': '30px',
'vertical-align': 'top'}),
html.Div([
dcc.Markdown('''
**Failure envelope parameters**
'''),
html.Label(children='coh_stren', style={'margin-top': '20px'}),
dcc.Slider(id='s_o', min=0.0, max=150.0, value=50.0, step=10.0,
marks={0: '0', 25: '25', 50: '50', 75: '75', 100: '100', 125: '125', 150: '150'}
),
html.Label(children='coeff. int frict', style={'margin-top': '20px'}),
dcc.Slider(id='mu', min=0.0, max=2.0, value=0.5, step=0.1,
marks={0: '0', 0.5: '0.5', 1: '1', 1.5: '1.5', 2: '2'}
),
dcc.Checklist(
id='coulomb_checkbox',
options=[
{'label': 'Show failure envelope', 'value': 'coulomb'},
],
value=['coulomb'],
style={'margin-top': '20px'}
)
], style={'width': '45%', 'display': 'inline-block', 'vertical-align': 'top'}),
html.Div([
dcc.Graph(id='mean_dev_graph'),
]),
])
elif tab == 'tab2':
return html.Div([
html.Div([
dcc.Markdown('''
**Mohr's circle parameters**
'''),
html.Label(children='sigma1:', style={'margin-top': '20px'}),
dcc.Slider(id='s1', min=0, max=xmax, value=350, step=20,
marks={0: '0', 100: '100', 200: '200', 300: '300', 400: '400', 500: '500'},
tooltip={'always_visible': True, 'placement': 'topLeft'}
),
html.Label(children='sigma3:', style={'margin-top': '20px'}),
dcc.Slider(id='s3', min=0, max=xmax, value=120, step=20,
marks={0: '0', 100: '100', 200: '200', 300: '300', 400: '400', 500: '500'},
tooltip={'always_visible': True, 'placement': 'topLeft'}
),
html.Label(children='theta (degrees):', style={'margin-top': '20px'}),
dcc.Slider(id='theta', min=0, max=np.pi / 2, value=np.pi / 4, step=np.pi / 24,
marks={0: '0', np.pi / 8: '22.5', np.pi / 4: '45', 3 * np.pi / 8: '66.5', np.pi / 2: '90'},
# tooltip={'always_visible':True, 'placement':'topLeft'}
),
dcc.Checklist(
id='circle_checkbox',
options=[
{'label': 'Show Mohrs Circle', 'value': 'circle'}
],
value=['circle'],
style={'margin-top': '20px'}
)
], style={'width': '45%', 'display': 'inline-block', 'margin-left': '30px', 'margin-right': '30px',
'vertical-align': 'top'}),
html.Div([
dcc.Markdown('''
**Failure envelope parameters**
'''),
html.Label(children='coh_stren:', style={'margin-top': '20px'}),
dcc.Slider(id='s_o', min=0.0, max=150.0, value=50.0, step=10.0,
marks={0: '0', 25: '25', 50: '50', 75: '75', 100: '100', 125: '125', 150: '150'},
tooltip={'always_visible': True, 'placement': 'topLeft'}
),
html.Label(children='coeff. int. frict:', style={'margin-top': '20px'}),
dcc.Slider(id='mu', min=0.0, max=2.0, value=0.5, step=0.1,
marks={0: '0', 0.5: '0.5', 1: '1', 1.5: '1.5', 2: '2'},
tooltip={'always_visible': True, 'placement': 'topLeft'}
),
dcc.Checklist(
id='coulomb_checkbox',
options=[
{'label': 'Show failure envelope', 'value': 'coulomb'},
],
value=['coulomb'],
style={'margin-top': '20px'}
)
], style={'width': '45%', 'display': 'inline-block', 'vertical-align': 'top'}),
html.Div([
dcc.Graph(id='s1s3_graph'),
]),
])
### TAB 1
# The callback function with it's app.callback wrapper.
@app.callback(
Output('mean_dev_graph', 'figure'),
Input('circle_checkbox', 'value'),
Input('coulomb_checkbox', 'value'),
Input('s_m', 'value'),
Input('s_d', 'value'),
Input('theta', 'value'),
Input('s_o', 'value'),
Input('mu', 'value')
)
def update_graph(circle_checkbox, coulomb_checkbox, s_m, s_d, theta, s_o, mu, ):
# array for drawing a circle, angle going from 0 to 90 since 2*angle is used.
# for a whole circle, use np.pi, not np.pi/2
angle = np.linspace(0, np.pi, 100)
# build the mohr's circle and coulomb failure line
s3 = s_m - s_d
s1 = s_m + s_d
coulx1 = np.linspace(0, xmax, 50)
couly1 = s_o + mu * coulx1
coulx2 = np.linspace(0, xmax, 50)
couly2 = -s_o - mu * coulx2
s_n = 0.5 * (s1 + s3) + 0.5 * (s1 - s3) * np.cos(2 * angle)
s_s = 0.5 * (s1 - s3) * np.sin(2 * angle)
# draw the angle representing the plane of interest
x = np.array([s_m, 0.5 * (s1 + s3) + 0.5 * (s1 - s3) * np.cos(2 * theta)])
y = np.array([0, 0.5 * (s1 - s3) * np.sin(2 * theta)])
# generate the plot.
fig = go.Figure()
if circle_checkbox == ['circle']:
fig.add_trace(go.Scatter(x=s_n, y=s_s, mode='lines', name='circle'))
fig.add_trace(go.Scatter(x=x, y=y, name="linear", line_shape='linear', line=dict(color='green')))
if coulomb_checkbox == ['coulomb']:
fig.add_trace(go.Scatter(x=coulx1, y=couly1, mode='lines', name='Coulomb+', line=dict(color='red')))
fig.add_trace(go.Scatter(x=coulx2, y=couly2, mode='lines', name='Coulomb-', line=dict(color='red')))
# We want a "square" figure so the circle is seen as a circle
# Ranges for xaxis and yaxis, and the plot width/height must be be chosen for a square graph.
# width and height are in pixels.
fig.update_layout(xaxis_title='Sigma_n', yaxis_title='Sigma_s', width=600, height=500, showlegend=False)
# fig.update_layout(xaxis_title='Sigma_n', yaxis_title='Sigma_s', width=800, height=660, showlegend=False)
fig.update_xaxes(range=[xmin, xmax])
fig.update_yaxes(range=[ymin, ymax])
return fig
### TAB 2
# The callback function with it's app.callback wrapper.
@app.callback(
Output('s1s3_graph', 'figure'),
Input('circle_checkbox', 'value'),
Input('coulomb_checkbox', 'value'),
Input('s1', 'value'),
Input('s3', 'value'),
Input('theta', 'value'),
Input('s_o', 'value'),
Input('mu', 'value')
)
def update_graph(circle_checkbox, coulomb_checkbox, s1, s3, theta, s_o, mu, ):
# array for drawing a circle, angle going from 0 to 90 since 2*angle is used.
# for a whole circle, use np.pi, not np.pi/2
angle = np.linspace(0, np.pi, 100)
# build the mohr's circle and coulomb failure line
s_m = (s1 + s3) / 2
s_d = s1 + s3
s_n = 0.5 * (s1 + s3) + 0.5 * (s1 - s3) * np.cos(2 * angle)
s_s = 0.5 * (s1 - s3) * np.sin(2 * angle)
# draw the angle representing the plane of interest
x = np.array([s_m, 0.5 * (s1 + s3) + 0.5 * (s1 - s3) * np.cos(2 * theta)])
y = np.array([0, 0.5 * (s1 - s3) * np.sin(2 * theta)])
coulx1 = np.linspace(0, xmax, 50)
couly1 = s_o + mu * coulx1
coulx2 = np.linspace(0, xmax, 50)
couly2 = -s_o - mu * coulx2
# generate the plot.
fig = go.Figure()
if circle_checkbox == ['circle']:
fig.add_trace(go.Scatter(x=s_n, y=s_s, mode='lines', name='circle'))
fig.add_trace(go.Scatter(x=x, y=y, name="linear", line_shape='linear', line=dict(color='green')))
if coulomb_checkbox == ['coulomb']:
fig.add_trace(go.Scatter(x=coulx1, y=couly1, mode='lines', name='Coulomb+', line=dict(color='red')))
fig.add_trace(go.Scatter(x=coulx2, y=couly2, mode='lines', name='Coulomb-', line=dict(color='red')))
# We want a "square" figure so the circle is seen as a circle
# Ranges for xaxis and yaxis, and the plot width/height must be be chosen for a square graph.
# width and height are in pixels.
fig.update_layout(xaxis_title='Sigma_n', yaxis_title='Sigma_s', width=600, height=500, showlegend=False)
# fig.update_layout(xaxis_title='Sigma_n', yaxis_title='Sigma_s', width=800, height=660, showlegend=False)
fig.update_xaxes(range=[xmin, xmax])
fig.update_yaxes(range=[ymin, ymax])
return fig
if __name__ == '__main__':
app.run_server(debug=True, host='0.0.0.0', port=8050)
|
tomasMasson/sfgv_proteomics
|
scripts/create_proteins_table.py
|
<gh_stars>0
#!/usr/bin/env python3
"Aggregates individual SfGV proteomics datasets into a summary table"
import click
import pandas as pd
def create_proteins_table(dataset1, dataset2, dataset3, output):
"""
Merges the three protein tables of each replicate into a summary table.
"""
# Read the three datasets and parse protein names
df1 = pd.read_csv(dataset1)
df1["Protein"] = df1["Description"].str.split(" ", expand=True)[0]
df2 = pd.read_csv(dataset2)
df2["Protein"] = df2["Description"].str.split(" ", expand=True)[0]
df3 = pd.read_csv(dataset3)
df3["Protein"] = df3["Description"].str.split(" ", expand=True)[0]
# Merge datasets
summary = pd.concat([df1, df2, df3])
# Get de-duplicated accession IDs
accessions = summary["Accession"].drop_duplicates()
# Get protein names
protein_names = summary["Protein"].drop_duplicates().set_axis(accessions)
# Get protein lengths
aa = summary.groupby("Accession")["# AAs"].mean()
# Get protein coverages
coverage = summary.groupby("Accession")["Coverage"].mean()
# Get protein emPAIs
empai = summary.groupby("Accession")["emPAI"].mean()
# Write summary table
table = pd.DataFrame({"Protein name": protein_names,
"Protein Size (amino acids)": aa,
"Average Coverage (%)": coverage,
"Average emPAI": empai})
table = table.round(decimals=2)
table.to_csv(output)
# CLI options
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.command(context_settings=CONTEXT_SETTINGS)
@click.option("-d1",
"--data1",
help="First dataset")
@click.option("-d2",
"--data2",
help="Second dataset")
@click.option("-d3",
"--data3",
help="Third dataset")
@click.option("-o",
"--output",
help="Output table name")
# CLI main function
def cli(data1, data2, data3, output):
"""
Command line interface
"""
create_proteins_table(data1, data2, data3, output)
if __name__ == "__main__":
cli()
|
gplssm/ding0_viz
|
utils/retrieve_data.py
|
<reponame>gplssm/ding0_viz
import os
from ding0.core import NetworkDing0
from utils.process_data import read_config_yaml, to_list_of_ints
from egoio.tools import db
from sqlalchemy.orm import sessionmaker
import argparse
def generate_ding0_data(grid_id, save_path):
engine = db.connection(readonly=True)
session = sessionmaker(bind=engine)()
nd = NetworkDing0(name='network')
# run DING0 on selected MV Grid District
nd.run_ding0(session=session,mv_grid_districts_no=[grid_id])
nd.to_csv(save_path)
def create_data_folder(data_path):
os.makedirs(data_path, exist_ok=True)
if __name__ == '__main__':
# Parse command-line input
parser = argparse.ArgumentParser(
description='Retrieve data for visualization',
formatter_class=argparse.RawTextHelpFormatter,
epilog="Alternatively, you can provide all required input by a config file.\n" \
"Use the argument `conf` to include a customs config file")
parser.add_argument('--grid_id', type=str, help='ID of the grid. Following input formats are valid\n' \
'\t--grid_id=645 (single grid)\n' \
'\t--grid_id=645,655 (list of grid IDs)\n'
'\t--grid_id=645..655 (range of grid IDs)\n'
'Must be either given by command-line or by config file.',
default=str())
parser.add_argument('--csv_data_path', type=str, help="Path to save ding0 grid data in CSV format")
parser.add_argument('--geojson_data_path', type=str, help="Path to save processed grid data in GeoJSON format")
parser.add_argument('--conf', type=read_config_yaml, help="Config file in YAML format", default=dict())
args = parser.parse_args()
args.grid_id = to_list_of_ints(args.grid_id)
# Read-in cmd-line args and custom config file args
settings_custom_config = {k: v for k,v in vars(args)["conf"].items() if k != "exclude"}
settings_cmd = vars(args)
# Load config file
settings_default_conf = read_config_yaml("_config.yml")
# Merge three settings dicts with the following overwrite order
# 1. CMD args
# 2. Custom config file args
# 3. Default config file args
settings = {k: v for k, v in settings_default_conf.items() if v is not None and k != "exclude"}
for k, v in settings_custom_config.items():
if v:
settings.update({k: v})
for k, v in settings_cmd.items():
if v and k != 'conf':
settings.update({k: v})
# create project and data folder
create_data_folder(settings['csv_data_path'])
create_data_folder(settings['geojson_data_path'])
for g in settings['grid_id']:
# generate ding0 data
ding0_data = generate_ding0_data(g, settings['csv_data_path'])
|
gplssm/ding0_viz
|
utils/process_data.py
|
import os
import requests
import pandas as pd
import json
from geojson import Feature, MultiPolygon, FeatureCollection, Point, LineString
from shapely.wkb import loads as wkb_loads
from shapely.wkt import loads as wkt_loads
from shapely.ops import transform
import pyproj
from functools import partial
import argparse
import yaml
display_names = {
"p_nom": "Nominal power in kW",
"s_nom": "Nominal apparent power in kVA",
"bus": "Bus",
"bus0": "Bus 0",
"bus_open": "Bus 0",
"bus1": "Bus 1",
"bus_closed": "Bus 1",
"mv_grid_id": "MV grid id",
"lv_grid_id": "LV grid id",
"v_nom": "Nominal voltage in kV",
"lat": "Latitude",
"lon": "Longitude",
"control": "Type of control",
"type": "Technology",
"subtype": "Specific technology",
"weather_cell_id": "Weather cell id",
"length": "Length in km",
"num_parallel": "Parallel lines",
"subst_id": "Substation id",
"mv_grid_district_population": "Population",
"annual_consumption": "Annual consumption in kWh",
"dea_capacity": "Generation capacity in kW",
"mv_dea_capacity": "MV generation capacity in kW",
"lv_dea_capacity": "LV generation capacity kW",
"peak_load": "Peak load in kW",
"sector": "Sector",
"type_info": "Type"
}
display_roundings = {
"Annual consumption in kWh": 0,
"Peak load in kW": 2,
"Nominal apparent power in kVA": 0,
"Nominal power in kV": 0,
"Area in km²": 2,
"Generation capacity in kW": 0,
"MV generation capacity in kW": 0,
"LV generation capacity in kW": 0,
"x": 5,
"r": 5,
"Length in km": 3,
"Latitude": 6,
"Longitude": 6,
}
def retrieve_mv_grid_polygon(subst_id, geojson_path, version='v0.4.5'):
os.makedirs(os.path.join(geojson_path, str(subst_id)), exist_ok=True)
# prepare query
oep_url= 'http://oep.iks.cs.ovgu.de/'
schema = "grid"
table = "ego_dp_mv_griddistrict"
where_version = 'where=version=' + version
where_ids = '&where=subst_id=' + str(subst_id)
# retrieve data and reformat geo data
get_data = requests.get(
oep_url+'/api/v0/schema/'+schema+'/tables/'+table+'/rows/?{version}{where_ids}'.format(version=where_version, where_ids=where_ids)
)
data = get_data.json()[0]
return {"Area in km²": float(data["area_ha"]) / 100}
def retrieve_mv_grid_info(grid_id, csv_path, geojson_path, enrich_data):
network = pd.read_csv(os.path.join(csv_path, str(grid_id), 'network_{}.csv'.format(grid_id)))
geom = wkt_loads(network.loc[0, "mv_grid_district_geom"])
network = network.drop(["name", "srid", "mv_grid_district_geom"], axis=1).to_dict(orient='records')[0]
coords = list(geom.geoms[0].exterior.coords)
coords_list = []
for coord in coords:
coords_list.append(list(coord))
network['coordinates'] = [[coords_list]]
network.update(**enrich_data)
for k in network.keys():
if k in display_names.keys():
network[display_names[k]] = network.pop(k)
for k, v in network.items():
if k in display_roundings.keys() and v is not None:
network[k] = round(float(v), display_roundings[k])
feature_collection = to_geojson([network], geom_type='MultiPolygon')
with open(os.path.join(geojson_path, str(grid_id), 'mv_grid_district_{}.geojson'.format(grid_id)), 'w') as outfile:
json.dump(feature_collection, outfile)
def to_geojson(data, geom_type):
"""Convert JSON to GeoJSON"""
collection = []
for dat in data:
if geom_type == 'MultiPolygon':
coordinates = MultiPolygon(dat['coordinates'])
elif geom_type == 'Point':
coordinates = Point(dat['coordinates'])
elif geom_type == 'LineString':
coordinates = LineString(dat['coordinates'])
else:
raise NotImplementedError()
properties = {key: value for key, value in dat.items()
if key not in ['geom', 'coordinates', 'geom_type']}
feature_coordinates = Feature(geometry=coordinates, properties=properties)
collection.append(feature_coordinates)
feature_collection = FeatureCollection(collection)
return feature_collection
def create_data_folder(data_path):
os.makedirs(data_path, exist_ok=True)
def geom_to_coords(geom):
coordinates_shp = wkb_loads(geom, hex=True)
coordinates = [coordinates_shp.x, coordinates_shp.y]
return coordinates
def reformat_ding0_grid_data(bus_file, transformer_file, generators_file, lines_file, loads_file, switches_file):
buses = pd.read_csv(bus_file)
transformers = pd.read_csv(transformer_file)
# hvmv_transformers = transformers[transformers['s_nom'] > 1000]
# mvlv_transformers = transformers[transformers['s_nom'] <= 1000]
lines = pd.read_csv(lines_file)
generators = pd.read_csv(generators_file)
loads = pd.read_csv(loads_file)
switches = pd.read_csv(switches_file)
geo_referenced_buses = buses.loc[~buses['geom'].isna(), 'geom']
geo_referenced_buses = pd.DataFrame(geo_referenced_buses.apply(geom_to_coords).rename('coordinates'), index=geo_referenced_buses.index)
geo_referenced_buses['lat'] = geo_referenced_buses['coordinates'].apply(lambda x: x[0])
geo_referenced_buses['lon'] = geo_referenced_buses['coordinates'].apply(lambda x: x[1])
buses = (buses.join(geo_referenced_buses, how='inner')).set_index('name')
transformers["s_nom"] = transformers["s_nom"] * 1e3
transformers_df = transformers.join(buses, on='bus0', how='inner').rename(
columns=display_names).round(display_roundings).fillna('NaN')
transformers_dict = transformers_df.to_dict(orient='records')
lines_df_0 = lines.join(buses, on='bus0', how='inner').rename(columns={'coordinates': 'coordinates_0'}).set_index('name')
lines_df_1 = lines.join(buses, on='bus1', how='inner').rename(columns={'coordinates': 'coordinates_1'}).set_index('name')
lines_df = pd.concat([lines_df_0, lines_df_1], axis=1, sort=True).dropna(subset=['coordinates_0', 'coordinates_1'])
lines_df['coordinates'] = [[tuple(row['coordinates_0']), tuple(row['coordinates_1'])] for it, row in lines_df.iterrows()]
lines_df = lines_df[lines_df.columns[~lines_df.columns.str.endswith('_0')]]
lines_df = lines_df.reset_index()
lines_df["s_nom"] = lines_df["s_nom"] * 1e3
lines_df_processed = lines_df.loc[:,~lines_df.columns.duplicated()]
lines_dict = lines_df_processed.fillna('NaN').rename(
columns=display_names).round(display_roundings).to_dict(orient='records')
generators_df = generators.join(buses, on='bus', how='inner').fillna('NaN').rename(
columns=display_names).round(display_roundings)
generators_mv = generators_df.loc[generators_df['Nominal voltage in kV'] > 0.4]
generators_mv = generators_mv.drop("LV grid id", axis=1)
generators_dict = generators_mv.to_dict(orient='records')
loads_df = loads.join(buses, on='bus', how='inner').fillna('NaN').rename(
columns=display_names).round(display_roundings)
loads_mv = loads_df.loc[loads_df['Nominal voltage in kV'] > 0.4]
loads_mv = loads_mv.drop("LV grid id", axis=1)
loads_dict = loads_mv.to_dict(orient='records')
switches_df_0 = switches.join(buses, on='bus_open', how='inner').rename(columns={'coordinates': 'coordinates_0'}).set_index('name')
switches_df_1 = switches.join(buses, on='bus_closed', how='inner').rename(columns={'coordinates': 'coordinates_1'}).set_index('name')
switches_df = pd.concat([switches_df_0, switches_df_1], axis=1, sort=True).dropna(subset=['coordinates_0', 'coordinates_1'])
switches_df = switches_df.loc[:,~switches_df.columns.duplicated()]
switches_df['coordinates'] = [
[(row['coordinates_0'][0] + row['coordinates_1'][0]) / 2,
(row['coordinates_0'][1] + row['coordinates_1'][1]) / 2]
for it, row in switches_df.iterrows()]
switches_dict = switches_df[["coordinates", "bus_open", "bus_closed"]].reset_index().fillna('NaN').rename(
columns=display_names).round(display_roundings).to_dict(orient='records')
enrich_data = {
"MV generation capacity in kW": 1e3 * sum(generators_mv["Nominal power in kW"]),
"LV generation capacity in kW": 1e3 * sum(generators_df.loc[generators_df["Nominal voltage in kV"] <= 0.4, "Nominal power in kW"]),
"Peak load in kW": sum(loads_df["Peak load in kW"]),
"Annual consumption in kWh": sum(loads_df["Annual consumption in kWh"]),
}
return transformers_dict, generators_dict, lines_dict, loads_dict, switches_dict, enrich_data
def list_available_grid_data(csv_path, geojson_path):
dirs = [name for name in os.listdir(csv_path) if os.path.isdir(os.path.join(csv_path, name))]
with open(os.path.join(geojson_path, 'available_grid_data.txt'), 'w') as f:
f.write("gridids\n")
for item in dirs:
f.write("%s\n" % item)
def csv_to_geojson(grid_id, csv_path, geojson_path):
os.makedirs(os.path.join(geojson_path, str(grid_id)), exist_ok=True)
# reformat ding0 data
ding0_node_data_reformated, \
ding0_generator_data_reformated, \
ding0_line_data_reformated, \
ding0_load_data_reformated, \
ding0_switch_data_reformated, \
enrich_data = reformat_ding0_grid_data(
os.path.join(csv_path, str(grid_id), 'buses_{}.csv'.format(grid_id)),
os.path.join(csv_path, str(grid_id), 'transformers_{}.csv'.format(grid_id)),
os.path.join(csv_path, str(grid_id), 'generators_{}.csv'.format(grid_id)),
os.path.join(csv_path, str(grid_id), 'lines_{}.csv'.format(grid_id)),
os.path.join(csv_path, str(grid_id), 'loads_{}.csv'.format(grid_id)),
os.path.join(csv_path, str(grid_id), 'switches_{}.csv'.format(grid_id))
)
# Convert to GeoJSON and save to file
ding0_node_data_geojson = to_geojson(ding0_node_data_reformated, geom_type='Point')
ding0_generator_data_geojson = to_geojson(ding0_generator_data_reformated, geom_type='Point')
ding0_line_data_geojson = to_geojson(ding0_line_data_reformated, geom_type='LineString')
ding0_load_data_geojson = to_geojson(ding0_load_data_reformated, geom_type='Point')
ding0_switch_data_geojson = to_geojson(ding0_switch_data_reformated, geom_type='Point')
with open(os.path.join(geojson_path, str(grid_id), 'mv_visualization_transformer_data_{}.geojson'.format(grid_id)), 'w') as outfile:
json.dump(ding0_node_data_geojson, outfile)
with open(os.path.join(geojson_path, str(grid_id), 'mv_visualization_generator_data_{}.geojson'.format(grid_id)), 'w') as outfile:
json.dump(ding0_generator_data_geojson, outfile)
with open(os.path.join(geojson_path, str(grid_id), 'mv_visualization_line_data_{}.geojson'.format(grid_id)), 'w') as outfile:
json.dump(ding0_line_data_geojson, outfile)
with open(os.path.join(geojson_path, str(grid_id), 'mv_visualization_load_data_{}.geojson'.format(grid_id)), 'w') as outfile:
json.dump(ding0_load_data_geojson, outfile)
with open(os.path.join(geojson_path, str(grid_id), 'mv_visualization_switch_data_{}.geojson'.format(grid_id)), 'w') as outfile:
json.dump(ding0_switch_data_geojson, outfile)
# Write list of available grid data
list_available_grid_data(csv_path, geojson_path)
return enrich_data
def to_list_of_ints(grid_id):
if grid_id:
assume_list = grid_id.split(",")
assume_range = grid_id.split("..")
if len(assume_list) > 1:
grid_id_list = [int(_) for _ in assume_list]
elif len(assume_range) > 1:
grid_id_list = list(range(int(assume_range[0]), int(assume_range[1]) + 1))
else:
grid_id_list = [int(grid_id)]
return grid_id_list
else:
return []
def read_config_yaml(conf_file):
conf_settings = yaml.load(open(conf_file), Loader=yaml.SafeLoader)
if isinstance(conf_settings.get('grid_id', None), str):
conf_settings['grid_id'] = [int(i) for i in conf_settings['grid_id'].split("..")]
elif isinstance(conf_settings.get('grid_id', None), int):
conf_settings['grid_id'] = [conf_settings['grid_id']]
return conf_settings
if __name__ == '__main__':
# Parse command-line input
parser = argparse.ArgumentParser(
description='Process data for visualization\n\n' \
'- CSV files are converted to GeoJSON\n' \
'- A list of grid ids is generated',
formatter_class=argparse.RawTextHelpFormatter,
epilog="Alternatively, you can provide all required input by a config file.\n" \
"Use the argument `conf` to include a customs config file")
parser.add_argument('--grid_id', type=str, help='IDs of the grid that should processed. Following input formats are valid\n' \
'\t--grid_id=645 (single grid)\n' \
'\t--grid_id=645,655 (list of grid IDs)\n'
'\t--grid_id=645..655 (range of grid IDs)\n'
'Must be either given by command-line or by config file.',
default=str())
parser.add_argument('--csv_data_path', type=str, help="Path to read ding0 grid data (in CSV format) from")
parser.add_argument('--geojson_data_path', type=str, help="Path to save processed grid data in GeoJSON format")
parser.add_argument('--conf', type=read_config_yaml, help="Config file in YAML format", default=dict())
args = parser.parse_args()
args.grid_id = to_list_of_ints(args.grid_id)
# Read-in cmd-line args and custom config file args
settings_custom_config = {k: v for k,v in vars(args)["conf"].items() if k != "exclude"}
settings_cmd = vars(args)
# Load config file
settings_default_conf = read_config_yaml("_config.yml")
# Merge three settings dicts with the following overwrite order
# 1. CMD args
# 2. Custom config file args
# 3. Default config file args
settings = {k: v for k, v in settings_default_conf.items() if v is not None and k != "exclude"}
for k, v in settings_custom_config.items():
if v:
settings.update({k: v})
for k, v in settings_cmd.items():
if v and k != 'conf':
settings.update({k: v})
# create project and data folder
create_data_folder(settings['geojson_data_path'])
# Process data and convert to CSV to geojson
if not settings.get('grid_id', None):
settings['grid_id'] = [name for name in os.listdir(settings['csv_data_path'])
if os.path.isdir(os.path.join(settings['csv_data_path'], name))]
for g in settings['grid_id']:
# Convert CSV to GeoJSON
enrich_data_map_data = csv_to_geojson(g, settings['csv_data_path'], settings['geojson_data_path'])
# retrieve mv grid district polygon
enrich_data_area = retrieve_mv_grid_polygon(g, settings['geojson_data_path'])
retrieve_mv_grid_info(g, settings['csv_data_path'], settings['geojson_data_path'], {**enrich_data_area, **enrich_data_map_data})
|
Naighu/python-screenrecorder
|
resources/screencapture.py
|
<filename>resources/screencapture.py<gh_stars>1-10
import cv2
import numpy as np
import pyautogui
class WriteVideoToFile(cv2.VideoWriter):
# def __init__(self, filename, fourcc, fps, framesize):
# calls the super class __init__ function
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.release()
class ScreenCapture:
def __init__(self):
# if it is False then the recoding should stop .Intially it is true indicating that the recording should start
self.isRecord = True
self.FOURCC = cv2.VideoWriter_fourcc(*'mp4v')
img = pyautogui.screenshot()
self.height, self.width, self.channels = cv2.cvtColor(
np.array(img), cv2.COLOR_RGB2BGR).shape
def recordScreen(self, filename):
print("Recording Video")
with WriteVideoToFile(filename, self.FOURCC, 8.0, (self.width, self.height)) as out:
while(True):
image = cv2.cvtColor(
np.array(pyautogui.screenshot()), cv2.COLOR_RGB2BGR)
out.write(image)
StopIteration(1)
if not self.isRecord:
break
print("stopped Recording")
|
Naighu/python-screenrecorder
|
resources/decor.py
|
# decorators for functions
def audio_record_thread(func):
def inner(s):
print("AudioCapture: Started Recording Audio")
func(s)
print("AudioCapture: Stopped Recording Audio")
return
return inner
def file_saving(func):
def wrap(filename):
print("Saving file...")
func(filename)
print("File saved")
return
return wrap
|
Naighu/python-screenrecorder
|
resources/audiocapture.py
|
<filename>resources/audiocapture.py<gh_stars>1-10
import pyaudio
import wave
import cv2
# from decor import *
class CaptureAudio:
def __init__(self):
self.FORMAT = pyaudio.paInt16
self.CHANNELS = 2
self.RATE = 44100
self.CHUNK = 1024
# if it is False then the recoding should stop .Intially it is true indicating that the recording should start
self.isRecord = True
self.audio = pyaudio.PyAudio() # opens an audio channel
self.frames = []
def startRecoding(self, filename):
# initialises audio stream and starts recording
stream = self.audio.open(format=self.FORMAT, channels=self.CHANNELS,
rate=self.RATE, input=True, frames_per_buffer=self.CHUNK)
print("Started Recording Audio")
while True:
data = stream.read(self.CHUNK)
self.frames.append(data)
if not self.isRecord:
break
stream.stop_stream()
stream.close()
self.saveAudioFile(filename)
# todo: apply end_record event here
# along with stream.stop_stream()
def saveAudioFile(self, filename):
print("Saving Recorded Audio")
wavfile = wave.open(filename, 'wb')
wavfile.setnchannels(self.CHANNELS)
wavfile.setsampwidth(self.audio.get_sample_size(self.FORMAT))
wavfile.setframerate(self.RATE)
wavfile.writeframes(b''.join(self.frames))
wavfile.close()
def __del__(self):
self.audio.terminate() # terminates the pyaudio object
|
Naighu/python-screenrecorder
|
resources/screenrec.py
|
import numpy
import cv2
from threading import Thread
from screencapture import *
from audiocapture import *
import tkinter as tk
from tkinter.font import Font
class ScreenRecorder(object):
def __init__(self):
self.audio = CaptureAudio()
self.video = ScreenCapture()
def startRecoding(self):
T1 = Thread(target=self.video.recordScreen,
args=("video.avi",))
T2 = Thread(target=self.audio.startRecoding, args=("audio.wav",))
T1.start()
T2.start()
def stopRecording(self):
self.audio.isRecord = False
self.video.isRecord = False
def pause(self):
print("Not implemented yet")
def main():
recorder = ScreenRecorder()
# GUI
root = tk.Tk()
root.geometry('300x80')
root.title("Recorder")
root.configure(background='black')
btnStart = tk.Button(root, text='Start', bd='3', background='white',
command=recorder.startRecoding)
btnStop = tk.Button(root, text='Pause/Resume', bd='3', background='green', command=recorder.pause
)
btnPause = tk.Button(root, text='Stop', bd='3', background='red', command=recorder.stopRecording
)
btnStart.place(x=3, y=4)
btnStop.place(x=100, y=4)
btnPause.place(x=250, y=4)
root.mainloop()
main()
|
bezyakina/my_django_blog
|
posts/migrations/0007_auto_20200817_1747.py
|
# Generated by Django 2.2.13 on 2020-08-17 17:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("posts", "0006_auto_20200811_1551"),
]
operations = [
migrations.AlterModelOptions(
name="post",
options={"ordering": ("-pub_date",)},
),
]
|
bezyakina/my_django_blog
|
posts/migrations/0010_auto_20200821_1104.py
|
# Generated by Django 2.2.13 on 2020-08-21 11:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("posts", "0009_comment"),
]
operations = [
migrations.AlterField(
model_name="post",
name="pub_date",
field=models.DateTimeField(
auto_now_add=True,
db_index=True,
verbose_name="Дата публикации",
),
),
]
|
bezyakina/my_django_blog
|
posts/migrations/0003_post_group.py
|
<filename>posts/migrations/0003_post_group.py<gh_stars>0
# Generated by Django 2.2 on 2020-07-09 12:04
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("posts", "0002_group"),
]
operations = [
migrations.AddField(
model_name="post",
name="group",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="posts.Group",
),
),
]
|
bezyakina/my_django_blog
|
users/admin.py
|
<filename>users/admin.py
from ckeditor.widgets import CKEditorWidget
from django.contrib import admin
from django.contrib.flatpages.admin import FlatPageAdmin
from django.contrib.flatpages.models import FlatPage
from django.db import models
# Define a new FlatPageAdmin
class FlatPageAdmin(FlatPageAdmin):
formfield_overrides = {models.TextField: {"widget": CKEditorWidget}}
# Re-register FlatPageAdmin
admin.site.unregister(FlatPage)
admin.site.register(FlatPage, FlatPageAdmin)
|
bezyakina/my_django_blog
|
posts/tests.py
|
<reponame>bezyakina/my_django_blog
from time import sleep
from django.core.cache import cache
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import Client, TestCase
from django.urls import reverse
from posts.models import Follow, Group, Post, User
class PostAppTest(TestCase):
def setUp(self):
# создание авторизованного пользователя
self.authorized_client = Client()
self.user = User.objects.create_user(
username="test", email="<EMAIL>", password="<PASSWORD>"
)
self.authorized_client.force_login(self.user)
# создание неавторизованного пользователя
self.unauthorized_client = Client()
self.unauthorized_client.logout()
# создание тестовой группы
self.group = Group.objects.create(
title="title", slug="slug", description="description"
)
# создание тестовых текстов для постов
self.text_1 = "some text..."
self.text_2 = "yet another text..."
# тестовые данные для тестирования постов на наличие изображений
self.tag = "<img "
self.error_message = (
"Загрузите правильное изображение. Файл, "
"который вы загрузили, поврежден или не "
"является изображением."
)
# создание автора и поста для подписки
self.author_1 = User.objects.create_user(
username="author_1", password=<PASSWORD>
)
self.post_1_author_1 = Post.objects.create(
text=self.text_1,
group=self.group,
author=self.author_1,
)
# тестовый комментарий
self.comment_text = "some comments..."
# тестовый пост
self.post_without_image = Post.objects.create(
text=self.text_1,
group=self.group,
author=self.user,
)
def test_user_profile_page(self):
"""
Проверка наличия персональной страницы пользователя после его
регистрации.
"""
response = self.authorized_client.get(
reverse("profile", kwargs={"username": self.user.username})
)
self.assertEqual(response.status_code, 200)
self.assertEqual("profile.html", response.templates[0].name)
self.assertIn("author", response.context)
self.assertIn("paginator", response.context)
def test_new_post_with_auth(self):
"""
Проверка возможности опубликовать пост для авторизованного
пользователя.
"""
response = self.authorized_client.get("new_post")
self.assertEqual(response.status_code, 404)
response = self.authorized_client.post(
reverse("new_post"),
data={"text": self.text_1, "group": self.group.id},
follow=True,
)
self.assertEqual(response.status_code, 200)
post = Post.objects.all().first()
self.assertEqual(Post.objects.count(), post.id)
def test_new_post_without_auth(self):
"""
Проверка невозможности опубликовать пост для неавторизованного
пользователя и последующего редиректа пользователя на страницу входа.
"""
response = self.unauthorized_client.post(
reverse("new_post"),
data={"text": self.text_1, "group": self.group.id},
follow=True,
)
login_url = reverse("login")
new_post_url = reverse("new_post")
target_url = f"{login_url}?next={new_post_url}"
self.assertRedirects(response, target_url)
def post_contains_params_on_all_pages(
self, post_id, text=None, image=None
):
"""
Вспомогательная функция для проверки отображения постов и их
содержимого на всех страницах.
"""
urls = [
reverse("index"),
reverse("profile", kwargs={"username": self.user.username}),
reverse(
"post_view",
kwargs={"username": self.user.username, "post_id": post_id},
),
reverse("group_posts", kwargs={"slug": self.group.slug}),
]
for url in urls:
with self.subTest(url=url):
response = self.authorized_client.get(url)
if image:
self.assertContains(response, image)
else:
self.assertContains(response, self.group)
self.assertContains(response, text)
def test_new_post_display_on_all_pages(self):
"""
Проверка отображения опубликованного поста на главной странице сайта,
на персональной странице пользователя, на отдельной странице поста и
на странице группы.
"""
self.authorized_client.post(
reverse("new_post"),
data={"text": self.text_1, "group": self.group.id},
follow=True,
)
post = Post.objects.all().first()
self.post_contains_params_on_all_pages(post.id, text=self.text_1)
def test_edited_post_display_on_all_pages(self):
"""
Проверка отображения отредактированного поста на главной странице
сайта, на персональной странице пользователя, на отдельной странице
поста и на странице группы.
"""
self.authorized_client.post(
reverse(
"post_edit",
kwargs={
"username": self.post_without_image.author,
"post_id": self.post_without_image.id,
},
),
data={"text": self.text_2, "group": self.group.id},
follow=True,
)
self.post_contains_params_on_all_pages(
self.post_without_image.id, text=self.text_2
)
def test_404(self):
"""
Проверка возвращения кода 404 при переходе на несуществующую страницу.
"""
response = self.unauthorized_client.get("/404/")
self.assertEqual(response.status_code, 404)
response = self.authorized_client.get("/404/")
self.assertEqual(response.status_code, 404)
def test_post_view_image_display_on_all_pages(self):
"""
Проверка загрузки и отображения изображеения на всех страницах с
постами.
"""
small_gif = (
b"\x47\x49\x46\x38\x39\x61\x01\x00\x01\x00\x00\x00\x00\x21\xf9\x04"
b"\x01\x0a\x00\x01\x00\x2c\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02"
b"\x02\x4c\x01\x00\x3b"
)
img = SimpleUploadedFile(
"small.gif", small_gif, content_type="image/gif"
)
post_with_image = Post.objects.create(
text=self.text_1,
group=self.group,
author=self.user,
image=img,
)
cache.clear()
self.post_contains_params_on_all_pages(
post_with_image.id, image=self.tag
)
def test_non_image_file_upload_protection(self):
"""
Проверка срабатывания защиты при загрузке неграфических файлов.
"""
not_img = SimpleUploadedFile(
"small.txt", b"test", content_type="text/plain"
)
url = reverse("new_post")
response = self.authorized_client.post(
url, {"text": self.text_1, "image": not_img}
)
self.assertFormError(response, "form", "image", self.error_message)
def test_index_page_cache(self):
"""
Проверка кеширования главной страницы.
"""
cache.clear()
Post.objects.create(
text="cached",
author=self.user,
)
response = self.authorized_client.get(reverse("index"))
self.assertContains(response, "cached")
Post.objects.create(
text="not_cached",
author=self.user,
)
response = self.authorized_client.get(reverse("index"))
self.assertNotContains(response, "not_cached")
sleep(20)
response = self.authorized_client.get(reverse("index"))
self.assertContains(response, "not_cached")
def test_auth_user_follow(self):
"""
Авторизованный пользователь может подписываться на других
пользователей.
"""
self.assertEqual(Follow.objects.count(), 0)
response = self.authorized_client.get(
reverse(
"profile_follow", kwargs={"username": self.author_1.username}
),
follow=True,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(Follow.objects.count(), 1)
def test_auth_user_unfollow(self):
"""
Авторизованный пользователь может удалять других пользователей
из подписок.
"""
self.authorized_client.get(
reverse(
"profile_follow", kwargs={"username": self.author_1.username}
)
)
self.assertEqual(Follow.objects.count(), 1)
response = self.authorized_client.get(
reverse(
"profile_unfollow", kwargs={"username": self.author_1.username}
),
follow=True,
)
self.assertEqual(response.status_code, 200)
self.assertEqual(Follow.objects.count(), 0)
def test_post_in_follower_index(self):
"""
Новая запись пользователя появляется в ленте тех, кто на него подписан.
"""
response = self.authorized_client.get(reverse("follow_index"))
self.assertNotContains(response, self.text_1)
cache.clear()
self.authorized_client.get(
reverse(
"profile_follow", kwargs={"username": self.author_1.username}
)
)
response = self.authorized_client.get(reverse("follow_index"))
self.assertContains(response, self.text_1)
def test_post_not_in_follower_index(self):
"""
Новая запись пользователя не появляется в ленте тех, кто не подписан
на него.
"""
cache.clear()
response = self.authorized_client.get(reverse("follow_index"))
self.assertNotContains(response, self.text_1)
def test_only_auth_user_add_comment(self):
"""
Только авторизированный пользователь может комментировать посты.
"""
response = self.authorized_client.post(
reverse(
"add_comment",
kwargs={
"username": self.user.username,
"post_id": self.post_without_image.id,
},
),
{"text": self.comment_text},
follow=True,
)
self.assertContains(response, self.comment_text)
response = self.unauthorized_client.post(
reverse(
"add_comment",
kwargs={
"username": self.user.username,
"post_id": self.post_without_image.id,
},
),
{"text": self.comment_text},
follow=True,
)
self.assertNotContains(response, self.comment_text)
|
bryanwills/netbox
|
netbox/ipam/tables/fhrp.py
|
<gh_stars>0
import django_tables2 as tables
from utilities.tables import BaseTable, ButtonsColumn, MarkdownColumn, TagColumn, ToggleColumn
from ipam.models import *
__all__ = (
'FHRPGroupTable',
'FHRPGroupAssignmentTable',
)
IPADDRESSES = """
{% for ip in record.ip_addresses.all %}
<a href="{{ ip.get_absolute_url }}">{{ ip }}</a><br />
{% endfor %}
"""
class FHRPGroupTable(BaseTable):
pk = ToggleColumn()
group_id = tables.Column(
linkify=True
)
comments = MarkdownColumn()
ip_addresses = tables.TemplateColumn(
template_code=IPADDRESSES,
orderable=False,
verbose_name='IP Addresses'
)
interface_count = tables.Column(
verbose_name='Interfaces'
)
tags = TagColumn(
url_name='ipam:fhrpgroup_list'
)
class Meta(BaseTable.Meta):
model = FHRPGroup
fields = (
'pk', 'group_id', 'protocol', 'auth_type', 'auth_key', 'description', 'ip_addresses', 'interface_count',
'tags', 'created', 'last_updated',
)
default_columns = ('pk', 'group_id', 'protocol', 'auth_type', 'description', 'ip_addresses', 'interface_count')
class FHRPGroupAssignmentTable(BaseTable):
pk = ToggleColumn()
interface_parent = tables.Column(
accessor=tables.A('interface.parent_object'),
linkify=True,
orderable=False,
verbose_name='Parent'
)
interface = tables.Column(
linkify=True,
orderable=False
)
group = tables.Column(
linkify=True
)
actions = ButtonsColumn(
model=FHRPGroupAssignment,
buttons=('edit', 'delete')
)
class Meta(BaseTable.Meta):
model = FHRPGroupAssignment
fields = ('pk', 'group', 'interface_parent', 'interface', 'priority')
exclude = ('id',)
|
ZLLentz/pcds-envs
|
scripts/update_tags.py
|
import argparse
import json
import re
import requests
import subprocess
from pathlib import Path
from packaging import version
CHANNELS = ['conda-forge', 'pcds-tag', 'lcls-ii']
def latest_version(package):
try:
info = subprocess.check_output(['conda', 'search', '--json', package],
universal_newlines=True)
except Exception as exc:
raise
info_list = json.loads(info)[package]
channel_versions = {}
latest_version = "0.0.0"
for info_item in info_list:
item_version = info_item['version']
item_channel = info_item['channel']
for ch in CHANNELS:
if ch in item_channel:
item_channel = ch
break
if version.parse(item_version) > version.parse(latest_version):
latest_version = item_version
latest_ch_ver = channel_versions.setdefault(item_channel, "0.0.0")
if version.parse(item_version) > version.parse(latest_ch_ver):
channel_versions[item_channel] = item_version
if channel_versions.get('conda-forge', latest_version) != latest_version:
print(
f'Warning! {package}={latest_version} '
'is not ready on conda-forge! '
'Building with this config is likely to fail!'
)
return latest_version
pypi_version_re = re.compile(f'-(\d\.\d\.\d).tar.gz')
def pypi_latest_version_no_search(package):
req = requests.get(f'https://pypi.org/project/{package}')
matches = set(pypi_version_re.findall(req.text))
if not matches:
raise RuntimeError(f'{package} not found on pypi.')
latest_version = '0.0.0'
for ver in matches:
if version.parse(ver) > version.parse(latest_version):
latest_version = ver
return ver
def update_specs(path, versions_dict, dry_run=False):
if not path.exists():
print(f'{path} does not exist, skipping')
return
print(f'Updating {path} specs...')
with path.open('r') as fd:
specs = fd.readlines()
changed_spec = False
for i, spec in enumerate(specs):
package = re.split('\=|>|<| |\n', spec)[0]
try:
latest = versions_dict[package]
spec = spec.strip('\n')
new_spec = f'{package}>={latest}'
if new_spec == spec:
print(f'Will keep existing {package} spec {spec}')
else:
print(f'Will change {package} spec from {spec} to {new_spec}')
specs[i] = new_spec + '\n'
changed_spec = True
except KeyError:
pass
if changed_spec:
print(f'Writing changes for package specs')
if dry_run:
print('Skip write because this is a dry run')
else:
with path.open('w') as fd:
fd.writelines(specs)
else:
print('No changes found')
def main(args):
env = args.env
here = Path(__file__).resolve().parent
env_folder = here.parent / 'envs' / env
conda_packages = env_folder / 'conda-packages.txt'
pip_packages = env_folder / 'pip-packages.txt'
keep_updated = env_folder / 'keep-updated.txt'
packages = []
if keep_updated.exists():
with keep_updated.open('r') as fd:
packages = fd.readlines()
else:
print(f'{keep_updated} does not exist')
if not packages:
print(f'Found no packages in {keep_updated}, nothing to do')
return
if args.debug:
conda_info = subprocess.check_output(['conda', 'info', '-a'],
universal_newlines=True)
print(conda_info)
versions_dict = {}
for package in packages:
package = package.strip('\n')
try:
latest = latest_version(package)
except Exception:
latest = pypi_latest_version_no_search(package)
versions_dict[package] = latest
print(f'Latest version of {package} is {latest}')
print('Updating specs. Make sure to verify and commit')
update_specs(conda_packages, versions_dict, dry_run=args.dryrun)
update_specs(pip_packages, versions_dict, dry_run=args.dryrun)
print('Done')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('env')
parser.add_argument('--dryrun', action='store_true')
parser.add_argument('--debug', action='store_true')
main(parser.parse_args())
|
ZLLentz/pcds-envs
|
scripts/check_master_tags.py
|
# Helper script to check which packages we need to tag
import os
import pathlib
import shutil
import subprocess
import sys
import time
def get_master_tag(repo):
tmp_dir = 'check_tag_tmp'
shutil.rmtree(tmp_dir, ignore_errors=True)
clone_tries = 10
while clone_tries > 0:
try:
subprocess.run(['git', 'clone', '--depth', '1', f'<EMAIL>:{repo}',
tmp_dir], check=True)
break
except subprocess.CalledProcessError:
time.sleep(5)
clone_tries -= 1
if clone_tries <= 0:
raise
os.chdir(tmp_dir)
try:
tag = subprocess.check_output(['git', 'describe', '--tags'],
universal_newlines=True)
tag = tag.strip()
except subprocess.CalledProcessError:
tag = ''
os.chdir('..')
shutil.rmtree(tmp_dir)
return tag
def collect_repos(filename):
with open(filename, 'r') as fd:
return fd.read().splitlines()
def main():
try:
env = sys.argv[1]
except Exception:
env = 'pcds'
here = pathlib.Path(__file__).resolve().parent
test_repos_file = here.parent / 'envs' / env / 'package-tests.txt'
repos = collect_repos(test_repos_file)
tagged = {}
untagged = []
for repo in repos:
tag = get_master_tag(repo)
if tag:
tagged[repo] = tag
else:
untagged.append(repo)
print()
for repo, tag in tagged.items():
print(f'{repo} is tagged at {tag}')
print()
for repo in untagged:
print(f'{repo} is not tagged')
if __name__ == '__main__':
main()
|
ZLLentz/pcds-envs
|
scripts/release_notes_table.py
|
<gh_stars>0
import dataclasses
import pathlib
import re
import subprocess
import sys
import typing
import prettytable
# How much of a change is enough to include in the table?
VER_DEPTH = {
'pcds': 3,
'slac': 3,
'lab': 3,
'community': 2,
'other': 1,
}
# Section headers
HEADERS = {
'pcds': 'PCDS Package Updates',
'slac': 'SLAC Package Updates',
'lab': 'Lab Community Package Updates',
'community': 'Python Community Core Package Updates',
'other': 'Other Python Community Major Updates',
}
# List of packages to include in PCDS table
PCDS_PACKAGES = [
'ads-async',
'blark',
'happi',
'hutch-python',
'hxrsnd',
'lightpath',
'lucid',
'nabs',
'pcdscalc',
'pcdsdaq',
'pcdsdevices',
'pcdsutils',
'pcdswidgets',
'pmgr',
'pswalker',
'pyca',
'pytmc',
'tc_release',
'transfocate',
'typhos',
'whatrecord',
]
# List of packages to include in SLAC table
SLAC_PACKAGES = [
'elog',
'psdaq-control-minimal',
'psdm_qs_cli',
'pydm',
'timechart',
]
# List of packages to include in LAB table
LAB_PACKAGES = [
'bluesky',
'bluesky-live',
'caproto',
'databroker',
'epicscorelibs',
'ophyd',
'pcaspy',
'pyepics',
]
# List of packages to include in (notable) COMMUNITY table
COMMUNITY_PACKAGES = [
'doctr',
'flake8',
'ipython',
'jupyter',
'numpy',
'opencv',
'pandas',
'pytest',
'scikit-image',
'scikit-learn',
'scipy',
'sphinx',
'xarray',
]
# If missing from all above, beloings in OTHER table
# TODO if any of the above strings are not found in the env, error
PACKAGES = {
'pcds': PCDS_PACKAGES,
'slac': SLAC_PACKAGES,
'lab': LAB_PACKAGES,
'community': COMMUNITY_PACKAGES,
}
# For looking through a git diff
# First capture group is + or - (new version or old version)
# Second capture group is package name
# Third capture group is version string
ver_change_regex = re.compile(r'^(\+|\-)\s+\- ([^=\n]*)=+([^=\n]*)$', flags=re.M)
# For looking through the normal file
# Capture group is the package name
package_name_regex = re.compile(r'\s+\- ([^=\n]*)=+[^=\n]*$', flags=re.M)
@dataclasses.dataclass
class Update:
package_name: str
old_version: typing.Optional[str] = None
new_version: typing.Optional[str] = None
def ver_depth(self) -> int:
"""
Return a number indicating how big of an update it was.
-1 = removed package
0 = new package
1 = Major release
2 = Minor release
3 = Bugfix
4 or higher: non-semantic nonsense
"""
if self.new_version is None:
return -1
if self.old_version is None:
return 0
old_parts = self.old_version.split('.')
new_parts = self.new_version.split('.')
for depth, (old, new) in enumerate(zip(old_parts, new_parts)):
if old != new:
return depth + 1
raise RuntimeError(f'No update for package {self.package_name}')
def get_row(self) -> list[str]:
return [self.package_name, self.old_version, self.new_version]
def release_link(self) -> str:
return (
f'https://github.com/pcdshub/{self.package_name}'
f'/releases/tag/v{self.new_version}'
)
@property
def added(self) -> bool:
return self.old_version is None and self.new_version is not None
@property
def removed(self) -> bool:
return self.new_version is None and self.old_version is not None
def get_package_updates(
path: typing.Union[str, pathlib.Path],
) -> dict[str, Update]:
"""Scans a git diff of the env.yaml file for changes."""
diff_output = subprocess.check_output(
['git', 'diff', 'master', str(path)],
universal_newlines=True,
)
matches = ver_change_regex.findall(diff_output)
updates = {}
for diff_type, package_name, version_str in matches:
try:
entry = updates[package_name]
except KeyError:
entry = Update(package_name=package_name)
updates[package_name] = entry
if diff_type == '+':
entry.new_version = version_str
elif diff_type == '-':
entry.old_version = version_str
else:
raise RuntimeError(
f'Unexpected diff_type {diff_type} for {package_name}'
)
return updates
def build_tables(
updates: dict[str, Update]
) -> dict[str, prettytable.PrettyTable]:
"""Makes the tables that we'd like to display in the update notes."""
headers = ('Package', 'Old', 'New')
table_names = ('pcds', 'slac', 'lab', 'community', 'other')
tables = {name: prettytable.PrettyTable() for name in table_names}
tables['pcds'].field_names = list(headers) + ['Release_Notes']
for name in table_names[1:]:
tables[name].field_names = headers
for update in updates.values():
if update.added or update.removed:
continue
row_added = False
for group, package_list in PACKAGES.items():
if update.package_name in package_list:
if update.ver_depth() <= VER_DEPTH[group]:
# Include this in the table
row = update.get_row()
if group == 'pcds':
row += [update.release_link()]
tables[group].add_row(row)
row_added = True
break
if not row_added and update.ver_depth() <= VER_DEPTH['other']:
tables['other'].add_row(update.get_row())
return tables
def audit_package_lists(path):
"""Find typos in the package list globals."""
with open(path, 'r') as fd:
lines = fd.read()
packages = set(package_name_regex.findall(lines))
err = []
for package_list in PACKAGES.values():
for package_name in package_list:
if package_name not in packages:
err.append(package_name)
if err:
raise RuntimeError(
'Found package names that are not installed! '
'Check your spelling and environment! '
f'{err}'
)
def main(args):
env_name = args[0]
path = '../envs/pcds/env.yaml'
audit_package_lists(path)
updates = get_package_updates(path)
# First, added/removed packages
added_pkgs = []
removed_pkgs = []
for update in updates.values():
if update.added:
added_pkgs.append(update.package_name)
elif update.removed:
removed_pkgs.append(update.package_name)
if added_pkgs:
header = 'Added the Following Packages'
print(header)
print('-' * len(header))
print()
for pkg in sorted(added_pkgs):
print(f'- {pkg}')
print()
if removed_pkgs:
header = 'Removed the Following Packages'
print(header)
print('-' * len(header))
print()
for pkg in sorted(removed_pkgs):
print(f'- {pkg}')
print()
# Next, updates by category
tables = build_tables(updates)
for name, table in tables.items():
if len(list(table)) > 0:
print(HEADERS[name])
divider = '-' * len(HEADERS[name])
print(divider)
print()
table.set_style(prettytable.MARKDOWN)
print(table)
print()
if __name__ == '__main__':
main(sys.argv)
|
ZLLentz/pcds-envs
|
scripts/test_setup.py
|
import argparse
import configparser
import contextlib
import json
import os
import requests
import subprocess
from pathlib import Path
URL_BASE = 'https://github.com/{}.git'
parser = argparse.ArgumentParser()
parser.add_argument('env')
parser.add_argument('--tag', action='store_true')
def version_info():
conda_list = subprocess.check_output(['conda', 'list', '--json'],
universal_newlines=True)
info_list = json.loads(conda_list)
version_dict = {}
for item in info_list:
version_dict[item['name']] = item['version']
return version_dict
def setup_all_tests(repo_file, tags=None):
url_base = 'https://github.com/{}.git'
repo_file = Path(repo_file)
with repo_file.open('r') as fd:
repos = fd.read().strip().splitlines()
for repo in repos:
pkg = repo.split('/')[-1]
if tags is None:
setup_one_test(repo, pkg)
else:
try:
tag = tags[pkg]
except KeyError as err:
msg = f'Did not find package {pkg} in environment'
raise RuntimeError(msg) from err
setup_one_test(repo, pkg, tag=tags[pkg])
def setup_one_test(repo, pkg, tag=None):
url = URL_BASE.format(repo)
try:
subprocess.run(['git', 'clone', '--recursive', url, '--depth', '1'],
check=True)
except subprocess.CalledProcessError as err:
raise RuntimeError(f'Error cloning from {url}') from err
if tag is not None:
print('Checking out package tag')
with pushd(pkg):
config = configparser.ConfigParser()
config.read('setup.cfg')
try:
tag_prefix = config['versioneer']['tag_prefix']
except KeyError:
tag_prefix = ''
try:
subprocess.run(['git', 'fetch', '--tags'], check=True)
subprocess.run(['git', 'checkout', tag_prefix + tag],
check=True)
except KeyError as err:
raise ValueError(f'Did not have tag for {pkg}') from err
except subprocess.CalledProcessError as err:
raise RuntimeError(f'Error checking out tag') from err
@contextlib.contextmanager
def pushd(new_dir):
previous_dir = os.getcwd()
os.chdir(new_dir)
yield
os.chdir(previous_dir)
def main(args):
print('Running pcds-envs test setup')
pcds_envs = Path(__file__).resolve().parent.parent
repo_file = pcds_envs / 'envs' / args.env / 'package-tests.txt'
if args.tag:
tags = version_info()
if len(tags) == 0:
print('No packages in current environment to test, quitting')
return
else:
tags = None
os.mkdir('tests')
with pushd('tests'):
setup_all_tests(repo_file, tags=tags)
if __name__ == '__main__':
args = parser.parse_args()
main(args)
|
kc611/aeppl
|
tests/test_transforms.py
|
import aesara
import aesara.tensor as at
import numpy as np
import pytest
import scipy as sp
from aesara.graph.fg import FunctionGraph
from numdifftools import Jacobian
from aeppl.joint_logprob import joint_logprob
from aeppl.transforms import (
DEFAULT_TRANSFORM,
LogOddsTransform,
LogTransform,
RVTransform,
TransformValuesMapping,
TransformValuesOpt,
_default_transformed_rv,
)
from tests.utils import assert_no_rvs
@pytest.mark.parametrize(
"at_dist, dist_params, sp_dist, size",
[
(at.random.uniform, (0, 1), sp.stats.uniform, ()),
(
at.random.pareto,
(1.5, 10.5),
lambda b, scale: sp.stats.pareto(b, scale=scale),
(),
),
(
at.random.triangular,
(1.5, 3.0, 10.5),
lambda lower, mode, upper: sp.stats.triang(
(mode - lower) / (upper - lower), loc=lower, scale=upper - lower
),
(),
),
(
at.random.halfnormal,
(0, 1),
sp.stats.halfnorm,
(),
),
(
at.random.wald,
(1.5, 10.5),
lambda mean, scale: sp.stats.invgauss(mean / scale, scale=scale),
(),
),
(
at.random.exponential,
(1.5,),
lambda mu: sp.stats.expon(scale=mu),
(),
),
pytest.param(
at.random.lognormal,
(-1.5, 10.5),
lambda mu, sigma: sp.stats.lognorm(s=sigma, loc=0, scale=np.exp(mu)),
(),
),
(
at.random.lognormal,
(-1.5, 1.5),
lambda mu, sigma: sp.stats.lognorm(s=sigma, scale=np.exp(mu)),
(),
),
(
at.random.halfcauchy,
(1.5, 10.5),
lambda alpha, beta: sp.stats.halfcauchy(loc=alpha, scale=beta),
(),
),
(
at.random.gamma,
(1.5, 10.5),
lambda alpha, inv_beta: sp.stats.gamma(alpha, scale=1.0 / inv_beta),
(),
),
(
at.random.invgamma,
(1.5, 10.5),
lambda alpha, beta: sp.stats.invgamma(alpha, scale=beta),
(),
),
(
at.random.chisquare,
(1.5,),
lambda df: sp.stats.chi2(df),
(),
),
(
at.random.weibull,
(1.5, 10.5),
lambda alpha, beta: sp.stats.weibull_min(alpha, scale=beta),
(),
),
(
at.random.beta,
(1.5, 1.5),
lambda alpha, beta: sp.stats.beta(alpha, beta),
(),
),
(
at.random.vonmises,
(1.5, 10.5),
lambda mu, kappa: sp.stats.vonmises(kappa, loc=mu),
(),
),
(
at.random.dirichlet,
(np.array([0.5, 0.5]),),
lambda alpha: sp.stats.dirichlet(alpha),
(),
),
pytest.param(
at.random.dirichlet,
(np.array([0.5, 0.5]),),
lambda alpha: sp.stats.dirichlet(alpha),
(3, 2),
marks=pytest.mark.xfail(
reason="Need to make the test framework work for arbitrary sizes"
),
),
],
)
def test_transformed_logprob(at_dist, dist_params, sp_dist, size):
"""
This test takes a `RandomVariable` type, plus parameters, and uses it to
construct a variable ``a`` that's used in the graph ``b =
at.random.normal(a, 1.0)``. The transformed log-probability is then
computed for ``b``. We then test that the log-probability of ``a`` is
properly transformed, as well as any instances of ``a`` that are used
elsewhere in the graph (i.e. in ``b``), by comparing the graph for the
transformed log-probability with the SciPy-derived log-probability--using a
numeric approximation to the Jacobian term.
"""
a = at_dist(*dist_params, size=size)
a.name = "a"
a_value_var = a.clone()
a_value_var.name = "a_value"
b = at.random.normal(a, 1.0)
b.name = "b"
b_value_var = b.clone()
b_value_var.name = "b_value"
transform_opt = TransformValuesOpt({a_value_var: DEFAULT_TRANSFORM})
res = joint_logprob({a: a_value_var, b: b_value_var}, extra_rewrites=transform_opt)
test_val_rng = np.random.RandomState(3238)
decimals = 6 if aesara.config.floatX == "float64" else 4
logp_vals_fn = aesara.function([a_value_var, b_value_var], res)
a_trans_op = _default_transformed_rv(a.owner.op, a.owner).op
transform = a_trans_op.transform
a_forward_fn = aesara.function(
[a_value_var], transform.forward(a_value_var, *a.owner.inputs)
)
a_backward_fn = aesara.function(
[a_value_var], transform.backward(a_value_var, *a.owner.inputs)
)
for i in range(10):
a_dist = sp_dist(*dist_params)
a_val = a_dist.rvs(size=size, random_state=test_val_rng).astype(
a_value_var.dtype
)
b_dist = sp.stats.norm(a_val, 1.0)
b_val = b_dist.rvs(random_state=test_val_rng).astype(b_value_var.dtype)
exp_logprob_val = a_dist.logpdf(a_val)
a_trans_value = a_forward_fn(a_val)
if a_val.ndim > 0:
# exp_logprob_val = np.vectorize(a_dist.logpdf, signature="(n)->()")(a_val)
jacobian_val = Jacobian(a_backward_fn)(a_trans_value)[:-1]
else:
jacobian_val = np.atleast_2d(
sp.misc.derivative(a_backward_fn, a_trans_value, dx=1e-6)
)
exp_logprob_val += np.log(np.linalg.det(jacobian_val))
exp_logprob_val += b_dist.logpdf(b_val).sum()
logprob_val = logp_vals_fn(a_trans_value, b_val)
np.testing.assert_almost_equal(exp_logprob_val, logprob_val, decimal=decimals)
@pytest.mark.parametrize("use_jacobian", [True, False])
def test_simple_transformed_logprob_nojac(use_jacobian):
x_rv = at.random.halfnormal(0, 3, name="x_rv")
x = x_rv.clone()
transform_opt = TransformValuesOpt({x: DEFAULT_TRANSFORM})
tr_logp = joint_logprob(
{x_rv: x}, extra_rewrites=transform_opt, use_jacobian=use_jacobian
)
assert np.isclose(
tr_logp.eval({x: np.log(2.5)}),
sp.stats.halfnorm(0, 3).logpdf(2.5) + (np.log(2.5) if use_jacobian else 0.0),
)
def test_fallback_log_jac_det():
"""
Test fallback log_jac_det in RVTransform produces correct the graph for a
simple transformation: x**2 -> -log(2*x)
"""
class SquareTransform(RVTransform):
name = "square"
def forward(self, value, *inputs):
return at.power(value, 2)
def backward(self, value, *inputs):
return at.sqrt(value)
square_tr = SquareTransform()
value = at.scalar("value")
value_tr = square_tr.forward(value)
log_jac_det = square_tr.log_jac_det(value_tr)
assert np.isclose(log_jac_det.eval({value: 3}), -np.log(6))
def test_hierarchical_uniform_transform():
"""
This model requires rv-value replacements in the backward transformation of
the value var `x`
"""
lower_rv = at.random.uniform(0, 1, name="lower")
upper_rv = at.random.uniform(9, 10, name="upper")
x_rv = at.random.uniform(lower_rv, upper_rv, name="x")
lower = lower_rv.clone()
upper = upper_rv.clone()
x = x_rv.clone()
transform_opt = TransformValuesOpt(
{
lower: DEFAULT_TRANSFORM,
upper: DEFAULT_TRANSFORM,
x: DEFAULT_TRANSFORM,
}
)
logp = joint_logprob(
{lower_rv: lower, upper_rv: upper, x_rv: x},
extra_rewrites=transform_opt,
)
assert_no_rvs(logp)
assert not np.isinf(logp.eval({lower: -10, upper: 20, x: -20}))
def test_nondefault_transforms():
loc_rv = at.random.uniform(-10, 10, name="loc")
scale_rv = at.random.uniform(-1, 1, name="scale")
x_rv = at.random.normal(loc_rv, scale_rv, name="x")
loc = loc_rv.clone()
scale = scale_rv.clone()
x = x_rv.clone()
transform_opt = TransformValuesOpt(
{
loc: None,
scale: LogOddsTransform(),
x: LogTransform(),
}
)
logp = joint_logprob(
{loc_rv: loc, scale_rv: scale, x_rv: x},
extra_rewrites=transform_opt,
)
# Check numerical evaluation matches with expected transforms
loc_val = 0
scale_val_tr = -1
x_val_tr = -1
scale_val = sp.special.expit(scale_val_tr)
x_val = np.exp(x_val_tr)
exp_logp = 0
exp_logp += sp.stats.uniform(-10, 20).logpdf(loc_val)
exp_logp += sp.stats.uniform(-1, 2).logpdf(scale_val)
exp_logp += np.log(scale_val) + np.log1p(-scale_val) # logodds log_jac_det
exp_logp += sp.stats.norm(loc_val, scale_val).logpdf(x_val)
exp_logp += x_val_tr # log log_jac_det
assert np.isclose(
logp.eval({loc: loc_val, scale: scale_val_tr, x: x_val_tr}),
exp_logp,
)
def test_default_transform_multiout():
r"""Make sure that `Op`\s with multiple outputs are handled correctly."""
# This SVD value is necessarily `1`, but it's generated by an `Op` with
# multiple outputs and no default output.
sd = at.linalg.svd(at.eye(1))[1][0]
x_rv = at.random.normal(0, sd, name="x")
x = x_rv.clone()
transform_opt = TransformValuesOpt({x: DEFAULT_TRANSFORM})
logp = joint_logprob(
{x_rv: x},
extra_rewrites=transform_opt,
)
assert np.isclose(
logp.eval({x: 1}),
sp.stats.norm(0, 1).logpdf(1),
)
def test_nonexistent_default_transform():
"""
Test that setting `DEFAULT_TRANSFORM` to a variable that has no default
transform does not fail
"""
x_rv = at.random.normal(name="x")
x = x_rv.clone()
transform_opt = TransformValuesOpt({x: DEFAULT_TRANSFORM})
logp = joint_logprob(
{x_rv: x},
extra_rewrites=transform_opt,
)
assert np.isclose(
logp.eval({x: 1}),
sp.stats.norm(0, 1).logpdf(1),
)
def test_TransformValuesMapping():
x = at.vector()
fg = FunctionGraph(outputs=[x])
tvm = TransformValuesMapping({})
fg.attach_feature(tvm)
tvm2 = TransformValuesMapping({})
fg.attach_feature(tvm2)
assert fg._features[-1] is tvm
|
kc611/aeppl
|
aeppl/transforms.py
|
import abc
from functools import partial, singledispatch
from typing import Dict, List, Optional, Type, Union
import aesara.tensor as at
from aesara.gradient import jacobian
from aesara.graph.basic import Node, Variable
from aesara.graph.features import AlreadyThere, Feature
from aesara.graph.fg import FunctionGraph
from aesara.graph.op import Op
from aesara.graph.opt import GlobalOptimizer, in2out, local_optimizer
from aesara.graph.utils import MetaType
from aesara.tensor.random.op import RandomVariable
from aesara.tensor.var import TensorVariable
from aeppl.logprob import _logprob
@singledispatch
def _default_transformed_rv(
op: Op,
node: Node,
) -> Optional[TensorVariable]:
"""Create a graph for a transformed log-probability of a ``RandomVariable``.
This function dispatches on the type of ``op``, which should be a subclass
of ``RandomVariable``. If you want to implement new transforms for a
``RandomVariable``, register a function on this dispatcher.
"""
return None
class DistributionMeta(MetaType):
def __new__(cls, name, bases, clsdict):
cls_res = super().__new__(cls, name, bases, clsdict)
base_op = clsdict.get("base_op", None)
if base_op is not None and clsdict.get("default", False):
# Create dispatch functions
@_default_transformed_rv.register(type(base_op))
def class_transformed_rv(op, node):
new_op = cls_res()
res = new_op.make_node(*node.inputs)
res.outputs[1].name = node.outputs[1].name
return res
return cls_res
class RVTransform(abc.ABC):
@abc.abstractmethod
def forward(self, value: TensorVariable, *inputs: Variable) -> TensorVariable:
"""Apply the transformation."""
@abc.abstractmethod
def backward(self, value: TensorVariable, *inputs: Variable) -> TensorVariable:
"""Invert the transformation."""
def log_jac_det(self, value: TensorVariable, *inputs) -> TensorVariable:
"""Construct the log of the absolute value of the Jacobian determinant."""
# jac = at.reshape(
# gradient(at.sum(self.backward(value, *inputs)), [value]), value.shape
# )
# return at.log(at.abs_(jac))
phi_inv = self.backward(value, *inputs)
return at.log(at.nlinalg.det(at.atleast_2d(jacobian(phi_inv, [value]))))
class TransformedRV(RandomVariable, metaclass=DistributionMeta):
r"""A base class for transformed `RandomVariable`\s."""
@_logprob.register(TransformedRV)
def transformed_logprob(op, values, *inputs, use_jacobian=True, **kwargs):
"""Compute the log-likelihood graph for a `TransformedRV`.
We assume that the value variable was back-transformed to be on the natural
support of the respective random variable.
"""
(value,) = values
logprob = _logprob(op.base_op, values, *inputs, **kwargs)
if use_jacobian:
original_forward_value = op.transform.forward(value, *inputs)
jacobian = op.transform.log_jac_det(original_forward_value, *inputs)
logprob += jacobian
return logprob
class DefaultTransformSentinel:
pass
DEFAULT_TRANSFORM = DefaultTransformSentinel()
@local_optimizer(tracks=None)
def transform_values(fgraph: FunctionGraph, node: Node) -> Optional[List[Node]]:
"""Apply transforms to value variables.
It is assumed that the input value variables correspond to forward
transformations, usually chosen in such a way that the values are
unconstrained on the real line.
For example, if ``Y = halfnormal(...)``, we assume the respective value
variable is specified on the log scale and back-transform it to obtain
``Y`` on the natural scale.
"""
rv_map_feature = getattr(fgraph, "preserve_rv_mappings", None)
values_to_transforms = getattr(fgraph, "values_to_transforms", None)
if rv_map_feature is None or values_to_transforms is None:
return None # pragma: no cover
try:
rv_var = node.default_output()
except ValueError:
return None
value_var = rv_map_feature.rv_values.get(rv_var, None)
if value_var is None:
return None
transform = values_to_transforms.get(value_var, None)
if transform is None:
return None
elif transform is DEFAULT_TRANSFORM:
trans_node = _default_transformed_rv(node.op, node)
if trans_node is None:
return None
transform = trans_node.op.transform
else:
new_op = _create_transformed_rv_op(node.op, transform)()
trans_node = new_op.make_node(*node.inputs)
trans_node.outputs[1].name = node.outputs[1].name
# We now assume that the old value variable represents the *transformed space*.
# This means that we need to replace all instance of the old value variable
# with "inversely/un-" transformed versions of itself.
new_value_var = transform.backward(value_var, *trans_node.inputs)
if value_var.name and getattr(transform, "name", None):
new_value_var.name = f"{value_var.name}_{transform.name}"
# Map TransformedRV to new value var and delete old mapping
new_rv_var = trans_node.outputs[1]
rv_map_feature.rv_values[new_rv_var] = new_value_var
del rv_map_feature.rv_values[rv_var]
return trans_node.outputs
class TransformValuesMapping(Feature):
r"""A `Feature` that maintains a map between value variables and their transforms."""
def __init__(self, values_to_transforms):
self.values_to_transforms = values_to_transforms
def on_attach(self, fgraph):
if hasattr(fgraph, "values_to_transforms"):
raise AlreadyThere()
fgraph.values_to_transforms = self.values_to_transforms
class TransformValuesOpt(GlobalOptimizer):
r"""Transforms value variables according to a map and/or per-`RandomVariable` defaults."""
default_transform_opt = in2out(transform_values, ignore_newtrees=True)
def __init__(
self,
values_to_transforms: Dict[
TensorVariable, Union[RVTransform, DefaultTransformSentinel, None]
],
):
"""
Parameters
==========
values_to_transforms
Mapping between value variables and their transformations. Each
value variable can be assigned one of `RVTransform`,
``DEFAULT_TRANSFORM``, or ``None``. If a transform is not specified
for a specific value variable it will not be transformed.
"""
self.values_to_transforms = values_to_transforms
def add_requirements(self, fgraph):
values_transforms_feature = TransformValuesMapping(self.values_to_transforms)
fgraph.attach_feature(values_transforms_feature)
def apply(self, fgraph: FunctionGraph):
return self.default_transform_opt.optimize(fgraph)
class LogTransform(RVTransform):
name = "log"
def forward(self, value, *inputs):
return at.log(value)
def backward(self, value, *inputs):
return at.exp(value)
def log_jac_det(self, value, *inputs):
return value
class IntervalTransform(RVTransform):
name = "interval"
def __init__(self, args_fn):
self.args_fn = args_fn
def forward(self, value, *inputs):
a, b = self.args_fn(*inputs)
if a is not None and b is not None:
return at.log(value - a) - at.log(b - value)
elif a is not None:
return at.log(value - a)
elif b is not None:
return at.log(b - value)
def backward(self, value, *inputs):
a, b = self.args_fn(*inputs)
if a is not None and b is not None:
sigmoid_x = at.sigmoid(value)
return sigmoid_x * b + (1 - sigmoid_x) * a
elif a is not None:
return at.exp(value) + a
elif b is not None:
return b - at.exp(value)
def log_jac_det(self, value, *inputs):
a, b = self.args_fn(*inputs)
if a is not None and b is not None:
s = at.softplus(-value)
return at.log(b - a) - 2 * s - value
else:
return value
class LogOddsTransform(RVTransform):
name = "logodds"
def backward(self, value, *inputs):
return at.expit(value)
def forward(self, value, *inputs):
return at.log(value / (1 - value))
def log_jac_det(self, value, *inputs):
sigmoid_value = at.sigmoid(value)
return at.log(sigmoid_value) + at.log1p(-sigmoid_value)
class StickBreaking(RVTransform):
name = "stickbreaking"
def forward(self, value, *inputs):
log_value = at.log(value)
shift = at.sum(log_value, -1, keepdims=True) / value.shape[-1]
return log_value[..., :-1] - shift
def backward(self, value, *inputs):
value = at.concatenate([value, -at.sum(value, -1, keepdims=True)])
exp_value_max = at.exp(value - at.max(value, -1, keepdims=True))
return exp_value_max / at.sum(exp_value_max, -1, keepdims=True)
def log_jac_det(self, value, *inputs):
N = value.shape[-1] + 1
sum_value = at.sum(value, -1, keepdims=True)
value_sum_expanded = value + sum_value
value_sum_expanded = at.concatenate(
[value_sum_expanded, at.zeros(sum_value.shape)], -1
)
logsumexp_value_expanded = at.logsumexp(value_sum_expanded, -1, keepdims=True)
res = at.log(N) + (N * sum_value) - (N * logsumexp_value_expanded)
return at.sum(res, -1)
class CircularTransform(RVTransform):
name = "circular"
def backward(self, value, *inputs):
return at.arctan2(at.sin(value), at.cos(value))
def forward(self, value, *inputs):
return at.as_tensor_variable(value)
def log_jac_det(self, value, *inputs):
return at.zeros(value.shape)
def _create_transformed_rv_op(
rv_op: Op,
transform: RVTransform,
*,
default: bool = False,
cls_dict_extra: Optional[Dict] = None,
) -> Type[TransformedRV]:
"""Create a new `TransformedRV` given a base `RandomVariable` `Op`
Parameters
==========
rv_op
The `RandomVariable` for which we want to construct a `TransformedRV`.
transform
The `RVTransform` for `rv_op`.
default
If ``False`` do not make `transform` the default transform for `rv_op`.
cls_dict_extra
Additional class members to add to the constructed `TransformedRV`.
"""
trans_name = getattr(transform, "name", "transformed")
rv_type_name = type(rv_op).__name__
cls_dict = type(rv_op).__dict__.copy()
rv_name = cls_dict.get("name", "")
if rv_name:
cls_dict["name"] = f"{rv_name}_{trans_name}"
cls_dict["base_op"] = rv_op
cls_dict["transform"] = transform
cls_dict["default"] = default
if cls_dict_extra is not None:
cls_dict.update(cls_dict_extra)
new_op_type = type(f"Transformed{rv_type_name}", (TransformedRV,), cls_dict)
return new_op_type
create_default_transformed_rv_op = partial(_create_transformed_rv_op, default=True)
TransformedUniformRV = create_default_transformed_rv_op(
at.random.uniform,
# inputs[3] = lower; inputs[4] = upper
IntervalTransform(lambda *inputs: (inputs[3], inputs[4])),
)
TransformedParetoRV = create_default_transformed_rv_op(
at.random.pareto,
# inputs[3] = alpha
IntervalTransform(lambda *inputs: (inputs[3], None)),
)
TransformedTriangularRV = create_default_transformed_rv_op(
at.random.triangular,
# inputs[3] = lower; inputs[5] = upper
IntervalTransform(lambda *inputs: (inputs[3], inputs[5])),
)
TransformedHalfNormalRV = create_default_transformed_rv_op(
at.random.halfnormal,
# inputs[3] = loc
IntervalTransform(lambda *inputs: (inputs[3], None)),
)
TransformedWaldRV = create_default_transformed_rv_op(
at.random.wald,
LogTransform(),
)
TransformedExponentialRV = create_default_transformed_rv_op(
at.random.exponential,
LogTransform(),
)
TransformedLognormalRV = create_default_transformed_rv_op(
at.random.lognormal,
LogTransform(),
)
TransformedHalfCauchyRV = create_default_transformed_rv_op(
at.random.halfcauchy,
LogTransform(),
)
TransformedGammaRV = create_default_transformed_rv_op(
at.random.gamma,
LogTransform(),
)
TransformedInvGammaRV = create_default_transformed_rv_op(
at.random.invgamma,
LogTransform(),
)
TransformedChiSquareRV = create_default_transformed_rv_op(
at.random.chisquare,
LogTransform(),
)
TransformedWeibullRV = create_default_transformed_rv_op(
at.random.weibull,
LogTransform(),
)
TransformedBetaRV = create_default_transformed_rv_op(
at.random.beta,
LogOddsTransform(),
)
TransformedVonMisesRV = create_default_transformed_rv_op(
at.random.vonmises,
CircularTransform(),
)
TransformedDirichletRV = create_default_transformed_rv_op(
at.random.dirichlet,
StickBreaking(),
)
|
kc611/aeppl
|
aeppl/mixture.py
|
from typing import List, Optional
import aesara.tensor as at
import numpy as np
from aesara.compile.builders import OpFromGraph
from aesara.graph.basic import Apply
from aesara.graph.fg import FunctionGraph
from aesara.graph.opt import local_optimizer, pre_greedy_local_optimizer
from aesara.ifelse import ifelse
from aesara.tensor.basic import Join, MakeVector
from aesara.tensor.random.op import RandomVariable
from aesara.tensor.random.opt import local_dimshuffle_rv_lift, local_subtensor_rv_lift
from aesara.tensor.shape import shape_tuple
from aesara.tensor.var import TensorVariable
from aeppl.abstract import MeasurableVariable
from aeppl.logprob import _logprob, logprob
from aeppl.opt import naive_bcast_rv_lift, rv_sinking_db, subtensor_ops
from aeppl.utils import get_constant_value, indices_from_subtensor
def rv_pull_down(x: TensorVariable, dont_touch_vars=None) -> TensorVariable:
"""Pull a ``RandomVariable`` ``Op`` down through a graph, when possible."""
fgraph = FunctionGraph(outputs=dont_touch_vars or [], clone=False)
return pre_greedy_local_optimizer(
fgraph,
[
local_dimshuffle_rv_lift,
local_subtensor_rv_lift,
naive_bcast_rv_lift,
],
x,
)
class MixtureRV(OpFromGraph):
"""A placeholder used to specify a log-likelihood for a mixture sub-graph."""
@classmethod
def create_node(cls, node, indices, mixture_rvs):
out_var = node.default_output()
inputs = list(indices) + list(mixture_rvs)
mixture_op = cls(
inputs,
[out_var],
inline=True,
on_unused_input="ignore",
)
mixture_op.name = f"{out_var.name if out_var.name else ''}-mixture"
# new_node = mixture_op.make_node(None, None, None, *inputs)
new_node = mixture_op(*inputs)
return new_node.owner
def get_non_shared_inputs(self, inputs):
return inputs[: len(self.shared_inputs)]
MeasurableVariable.register(MixtureRV)
def get_stack_mixture_vars(
node: Apply,
) -> Optional[List[TensorVariable]]:
r"""Extract the mixture terms from a `*Subtensor*` applied to stacked `RandomVariable`\s."""
if not isinstance(node.op, subtensor_ops):
return None # pragma: no cover
joined_rvs = node.inputs[0]
# First, make sure that it's some sort of concatenation
if not (joined_rvs.owner and isinstance(joined_rvs.owner.op, (MakeVector, Join))):
# Node is not a compatible join `Op`
return None # pragma: no cover
if isinstance(joined_rvs.owner.op, MakeVector):
mixture_rvs = joined_rvs.owner.inputs
elif isinstance(joined_rvs.owner.op, Join):
mixture_rvs = joined_rvs.owner.inputs[1:]
join_axis = joined_rvs.owner.inputs[0]
try:
join_axis = int(get_constant_value(join_axis))
except ValueError:
# TODO: Support symbolic join axes
raise NotImplementedError(
"Symbolic `Join` axes are not supported in mixtures"
)
if join_axis != 0:
# TODO: Support other join axes
raise NotImplementedError("Only `Join` axis 0 is supported in mixtures")
if not all(
rv.owner and isinstance(rv.owner.op, RandomVariable) for rv in mixture_rvs
):
# Currently, all mixture components must be `RandomVariable` outputs
# TODO: Allow constants and make them Dirac-deltas
# raise NotImplementedError(
# "All mixture components must be `RandomVariable` outputs"
# )
return None
return mixture_rvs
@local_optimizer(subtensor_ops)
def mixture_replace(fgraph, node):
r"""Identify mixture sub-graphs and replace them with a place-holder `Op`.
The basic idea is to find ``stack(mixture_comps)[I_rv]``, where
``mixture_comps`` is a ``list`` of `RandomVariable`\s and ``I_rv`` is a
`RandomVariable` with a discrete and finite support.
From these terms, new terms ``Z_rv[i] = mixture_comps[i][i == I_rv]`` are
created for each ``i`` in ``enumerate(mixture_comps)``.
"""
rv_map_feature = getattr(fgraph, "preserve_rv_mappings", None)
if rv_map_feature is None:
return None # pragma: no cover
out_var = node.default_output()
if out_var not in rv_map_feature.rv_values:
return None # pragma: no cover
mixture_res = get_stack_mixture_vars(node)
if mixture_res is None:
return None # pragma: no cover
mixture_rvs = mixture_res
mixture_value_var = rv_map_feature.rv_values.pop(out_var, None)
# We loop through mixture components and collect all the array elements
# that belong to each one (by way of their indices).
for i, component_rv in enumerate(mixture_rvs):
if component_rv in rv_map_feature.rv_values:
raise ValueError("A value variable was specified for a mixture component")
component_rv.tag.ignore_logprob = True
# Replace this sub-graph with a `MixtureRV`
new_node = MixtureRV.create_node(node, node.inputs[1:], mixture_rvs)
new_mixture_rv = new_node.default_output()
new_mixture_rv.name = "mixture"
rv_map_feature.rv_values[new_mixture_rv] = mixture_value_var
# FIXME: This is pretty hackish
fgraph.import_node(new_node, import_missing=True, reason="mixture_rv")
return [new_mixture_rv]
@_logprob.register(MixtureRV)
def logprob_MixtureRV(op, values, *inputs, name=None, **kwargs):
(value,) = values
inputs = op.get_non_shared_inputs(inputs)
subtensor_node = op.outputs[0].owner
num_indices = len(subtensor_node.inputs) - 1
indices = inputs[:num_indices]
indices = indices_from_subtensor(
getattr(subtensor_node.op, "idx_list", None), indices
)
comp_rvs = inputs[num_indices:]
if value.ndim > 0:
# TODO: Make the join axis to the left-most dimension (or transpose the
# problem)
join_axis = 0 # op.join_axis
value_shape = shape_tuple(value)
logp_val = at.full(value_shape, -np.inf, dtype=value.dtype)
for i, comp_rv in enumerate(comp_rvs):
I_0 = indices[join_axis]
join_indices = at.nonzero(at.eq(I_0, i))
#
# pre_index = (
# tuple(at.ogrid[tuple(slice(None, s) for s in at.shape(join_indices))])
# if I_0 is not None
# else (slice(None),)
# )
#
# non_join_indices = pre_index + indices[1:]
#
# obs_i = value[join_indices][non_join_indices]
obs_i = value[join_indices]
comp_shape = shape_tuple(comp_rv)
bcast_shape = at.broadcast_shape(
value_shape, comp_shape, arrays_are_shapes=True
)
bcasted_comp_rv = at.broadcast_to(comp_rv, bcast_shape)
zz = bcasted_comp_rv[join_indices]
indexed_comp_rv = rv_pull_down(zz, inputs)
# indexed_comp_rv = rv_pull_down(indexed_comp_rv[non_join_indices], inputs)
logp_val = at.set_subtensor(
# logp_val[join_indices][non_join_indices],
logp_val[join_indices],
logprob(indexed_comp_rv, obs_i),
)
else:
logp_val = 0.0
for i, comp_rv in enumerate(comp_rvs):
comp_logp = logprob(comp_rv, value)
logp_val += ifelse(
at.eq(indices[0], i),
comp_logp,
at.as_tensor(0.0, dtype=comp_logp.dtype),
)
return logp_val
rv_sinking_db.register("mixture_replace", mixture_replace, -5, "basic")
|
kc611/aeppl
|
tests/test_joint_logprob.py
|
import aesara
import aesara.tensor as at
import numpy as np
import pytest
import scipy.stats.distributions as sp
from aesara.graph.basic import Apply, ancestors, equal_computations
from aesara.graph.op import Op
from aesara.tensor.subtensor import (
AdvancedIncSubtensor,
AdvancedIncSubtensor1,
AdvancedSubtensor,
AdvancedSubtensor1,
IncSubtensor,
Subtensor,
)
from aeppl.abstract import MeasurableVariable
from aeppl.joint_logprob import joint_logprob
from aeppl.logprob import _logprob, logprob
from aeppl.utils import rvs_to_value_vars, walk_model
from tests.utils import assert_no_rvs
def test_joint_logprob_basic():
# A simple check for when `joint_logprob` is the same as `logprob`
a = at.random.uniform(0.0, 1.0)
a.name = "a"
a_value_var = a.clone()
a_logp = joint_logprob({a: a_value_var}, sum=False)
a_logp_exp = logprob(a, a_value_var)
assert equal_computations([a_logp], [a_logp_exp])
# Let's try a hierarchical model
sigma = at.random.invgamma(0.5, 0.5)
Y = at.random.normal(0.0, sigma)
sigma_value_var = sigma.clone()
y_value_var = Y.clone()
total_ll = joint_logprob({Y: y_value_var, sigma: sigma_value_var}, sum=False)
# We need to replace the reference to `sigma` in `Y` with its value
# variable
ll_Y = logprob(Y, y_value_var)
(ll_Y,), _ = rvs_to_value_vars(
[ll_Y],
initial_replacements={sigma: sigma_value_var},
)
total_ll_exp = logprob(sigma, sigma_value_var) + ll_Y
assert equal_computations([total_ll], [total_ll_exp])
# Now, make sure we can compute a joint log-probability for a hierarchical
# model with some non-`RandomVariable` nodes
c = at.random.normal()
c.name = "c"
b_l = c * a + 2.0
b = at.random.uniform(b_l, b_l + 1.0)
b.name = "b"
b_value_var = b.clone()
c_value_var = c.clone()
b_logp = joint_logprob({a: a_value_var, b: b_value_var, c: c_value_var})
# There shouldn't be any `RandomVariable`s in the resulting graph
assert_no_rvs(b_logp)
res_ancestors = list(walk_model((b_logp,), walk_past_rvs=True))
assert b_value_var in res_ancestors
assert c_value_var in res_ancestors
assert a_value_var in res_ancestors
def test_joint_logprob_multi_obs():
a = at.random.uniform(0.0, 1.0)
b = at.random.normal(0.0, 1.0)
a_val = a.clone()
b_val = b.clone()
logp = joint_logprob({a: a_val, b: b_val}, sum=False)
logp_exp = logprob(a, a_val) + logprob(b, b_val)
assert equal_computations([logp], [logp_exp])
x = at.random.normal(0, 1)
y = at.random.normal(x, 1)
x_val = x.clone()
y_val = y.clone()
logp = joint_logprob({x: x_val, y: y_val})
exp_logp = joint_logprob({x: x_val, y: y_val})
assert equal_computations([logp], [exp_logp])
def test_joint_logprob_diff_dims():
M = at.matrix("M")
x = at.random.normal(0, 1, size=M.shape[1], name="X")
y = at.random.normal(M.dot(x), 1, name="Y")
x_vv = x.clone()
x_vv.name = "x"
y_vv = y.clone()
y_vv.name = "y"
logp = joint_logprob({x: x_vv, y: y_vv})
M_val = np.random.normal(size=(10, 3))
x_val = np.random.normal(size=(3,))
y_val = np.random.normal(size=(10,))
point = {M: M_val, x_vv: x_val, y_vv: y_val}
logp_val = logp.eval(point)
exp_logp_val = (
sp.norm.logpdf(x_val, 0, 1).sum()
+ sp.norm.logpdf(y_val, M_val.dot(x_val), 1).sum()
)
assert exp_logp_val == pytest.approx(logp_val)
@pytest.mark.parametrize(
"indices, size",
[
(slice(0, 2), 5),
(np.r_[True, True, False, False, True], 5),
(np.r_[0, 1, 4], 5),
((np.array([0, 1, 4]), np.array([0, 1, 4])), (5, 5)),
],
)
def test_joint_logprob_incsubtensor(indices, size):
"""Make sure we can compute a joint log-probability for ``Y[idx] = data`` where ``Y`` is univariate."""
rng = np.random.RandomState(232)
mu = np.power(10, np.arange(np.prod(size))).reshape(size)
sigma = 0.001
data = rng.normal(mu[indices], 1.0)
y_val = rng.normal(mu, sigma, size=size)
Y_rv = at.random.normal(mu, sigma, size=size)
Y_rv.name = "Y"
y_value_var = Y_rv.clone()
y_value_var.name = "y"
Y_sst = at.set_subtensor(Y_rv[indices], data)
assert isinstance(
Y_sst.owner.op, (IncSubtensor, AdvancedIncSubtensor, AdvancedIncSubtensor1)
)
Y_sst_logp = joint_logprob({Y_rv: y_value_var, Y_sst: None}, sum=False)
obs_logps = Y_sst_logp.eval({y_value_var: y_val})
y_val_idx = y_val.copy()
y_val_idx[indices] = data
exp_obs_logps = sp.norm.logpdf(y_val_idx, mu, sigma)
np.testing.assert_almost_equal(obs_logps, exp_obs_logps)
def test_joint_logprob_subtensor():
"""Make sure we can compute a joint log-probability for ``Y[I]`` where ``Y`` and ``I`` are random variables."""
size = 5
mu_base = np.power(10, np.arange(np.prod(size))).reshape(size)
mu = np.stack([mu_base, -mu_base])
sigma = 0.001
rng = aesara.shared(np.random.RandomState(232), borrow=True)
A_rv = at.random.normal(mu, sigma, rng=rng)
A_rv.name = "A"
p = 0.5
I_rv = at.random.bernoulli(p, size=size, rng=rng)
I_rv.name = "I"
A_idx = A_rv[I_rv, at.ogrid[A_rv.shape[-1] :]]
assert isinstance(
A_idx.owner.op, (Subtensor, AdvancedSubtensor, AdvancedSubtensor1)
)
A_idx_value_var = A_idx.type()
A_idx_value_var.name = "A_idx_value"
I_value_var = I_rv.type()
I_value_var.name = "I_value"
A_idx_logp = joint_logprob({A_idx: A_idx_value_var, I_rv: I_value_var}, sum=False)
logp_vals_fn = aesara.function([A_idx_value_var, I_value_var], A_idx_logp)
# The compiled graph should not contain any `RandomVariables`
assert_no_rvs(logp_vals_fn.maker.fgraph.outputs[0])
decimals = 6 if aesara.config.floatX == "float64" else 4
test_val_rng = np.random.RandomState(3238)
for i in range(10):
bern_sp = sp.bernoulli(p)
I_value = bern_sp.rvs(size=size, random_state=test_val_rng).astype(I_rv.dtype)
norm_sp = sp.norm(mu[I_value, np.ogrid[mu.shape[1] :]], sigma)
A_idx_value = norm_sp.rvs(random_state=test_val_rng).astype(A_idx.dtype)
exp_obs_logps = norm_sp.logpdf(A_idx_value)
exp_obs_logps += bern_sp.logpmf(I_value)
logp_vals = logp_vals_fn(A_idx_value, I_value)
np.testing.assert_almost_equal(logp_vals, exp_obs_logps, decimal=decimals)
def test_persist_inputs():
"""Make sure we don't unnecessarily clone variables."""
x = at.scalar("x")
beta_rv = at.random.normal(0, 1, name="beta")
Y_rv = at.random.normal(beta_rv * x, 1, name="y")
beta_vv = beta_rv.type()
y_vv = Y_rv.clone()
logp = joint_logprob({beta_rv: beta_vv, Y_rv: y_vv})
assert x in ancestors([logp])
# Make sure we don't clone value variables when they're graphs.
y_vv_2 = y_vv * 2
logp_2 = joint_logprob({beta_rv: beta_vv, Y_rv: y_vv_2})
assert y_vv_2 in ancestors([logp_2])
def test_ignore_logprob():
x = at.scalar("x")
beta_rv = at.random.normal(0, 1, name="beta")
beta_rv.tag.ignore_logprob = True
y_rv = at.random.normal(beta_rv * x, 1, name="y")
beta = beta_rv.type()
y = y_rv.type()
logp = joint_logprob({beta_rv: beta, y_rv: y})
y_rv_2 = at.random.normal(beta * x, 1, name="y")
logp_exp = joint_logprob({y_rv_2: y})
assert equal_computations([logp], [logp_exp])
def test_ignore_logprob_multiout():
class MyMultiOut(Op):
@staticmethod
def impl(a, b):
res1 = 2 * a
res2 = 2 * b
return [res1, res2]
def make_node(self, a, b):
return Apply(self, [a, b], [a.type(), b.type()])
def perform(self, node, inputs, outputs):
res1, res2 = self.impl(inputs[0], inputs[1])
outputs[0][0] = res1
outputs[1][0] = res2
MeasurableVariable.register(MyMultiOut)
@_logprob.register(MyMultiOut)
def logprob_MyMultiOut(op, value, *inputs, name=None, **kwargs):
return at.zeros_like(value)
Y_1_rv, Y_2_rv = MyMultiOut()(at.vector(), at.vector())
Y_1_rv.tag.ignore_logprob = True
Y_2_rv.tag.ignore_logprob = True
y_1_vv = Y_1_rv.clone()
y_2_vv = Y_2_rv.clone()
logp_exp = joint_logprob({Y_1_rv: y_1_vv, Y_2_rv: y_2_vv})
assert logp_exp is None
def test_multiple_rvs_to_same_value_raises():
x_rv1 = at.random.normal(name="x1")
x_rv2 = at.random.normal(name="x2")
x = x_rv1.type()
x.name = "x"
msg = "More than one logprob factor was assigned to the value var x"
with pytest.raises(ValueError, match=msg):
joint_logprob({x_rv1: x, x_rv2: x})
|
kc611/aeppl
|
aeppl/logprob.py
|
from functools import singledispatch
from typing import Tuple
import aesara.tensor as at
import aesara.tensor.random.basic as arb
import numpy as np
from aesara.assert_op import Assert
from aesara.graph.op import Op
from aesara.tensor.slinalg import Cholesky, solve_lower_triangular
from aesara.tensor.var import TensorVariable
# from aesara.tensor.xlogx import xlogy0
cholesky = Cholesky(lower=True, on_error="nan")
def betaln(x, y):
return at.gammaln(x) + at.gammaln(y) - at.gammaln(x + y)
def binomln(n, k):
return at.gammaln(n + 1) - at.gammaln(k + 1) - at.gammaln(n - k + 1)
def xlogy0(m, x):
# TODO: This should probably be a basic Aesara stabilization
return at.switch(at.eq(x, 0), at.switch(at.eq(m, 0), 0.0, -np.inf), m * at.log(x))
def logprob(rv_var, *rv_values, **kwargs):
"""Create a graph for the log-probability of a ``RandomVariable``."""
logprob = _logprob(rv_var.owner.op, rv_values, *rv_var.owner.inputs, **kwargs)
for rv_var in rv_values:
if rv_var.name:
logprob.name = f"{rv_var.name}_logprob"
return logprob
@singledispatch
def _logprob(
op: Op,
values: Tuple[TensorVariable],
*inputs: TensorVariable,
**kwargs,
):
"""Create a graph for the log-density/mass of a ``RandomVariable``.
This function dispatches on the type of ``op``, which should be a subclass
of ``RandomVariable``. If you want to implement new density/mass graphs
for a ``RandomVariable``, register a new function on this dispatcher.
"""
raise NotImplementedError()
@_logprob.register(arb.UniformRV)
def uniform_logprob(op, values, *inputs, **kwargs):
(value,) = values
lower, upper = inputs[3:]
return at.switch(
at.bitwise_and(at.ge(value, lower), at.le(value, upper)),
at.fill(value, -at.log(upper - lower)),
-np.inf,
)
@_logprob.register(arb.NormalRV)
def normal_logprob(op, values, *inputs, **kwargs):
(value,) = values
mu, sigma = inputs[3:]
res = (
-0.5 * at.pow((value - mu) / sigma, 2)
- at.log(np.sqrt(2.0 * np.pi))
- at.log(sigma)
)
res = Assert("sigma > 0")(res, at.all(at.gt(sigma, 0.0)))
return res
@_logprob.register(arb.HalfNormalRV)
def halfnormal_logprob(op, values, *inputs, **kwargs):
(value,) = values
loc, sigma = inputs[3:]
res = (
-0.5 * at.pow((value - loc) / sigma, 2)
+ at.log(np.sqrt(2.0 / np.pi))
- at.log(sigma)
)
res = at.switch(at.ge(value, loc), res, -np.inf)
res = Assert("sigma > 0")(res, at.all(at.gt(sigma, 0.0)))
return res
@_logprob.register(arb.BetaRV)
def beta_logprob(op, values, *inputs, **kwargs):
(value,) = values
alpha, beta = inputs[3:]
res = (
at.switch(at.eq(alpha, 1.0), 0.0, (alpha - 1.0) * at.log(value))
+ at.switch(at.eq(beta, 1.0), 0.0, (beta - 1.0) * at.log1p(-value))
- (at.gammaln(alpha) + at.gammaln(beta) - at.gammaln(alpha + beta))
)
res = at.switch(at.bitwise_and(at.ge(value, 0.0), at.le(value, 1.0)), res, -np.inf)
res = Assert("0 <= value <= 1, alpha > 0, beta > 0")(
res, at.all(at.gt(alpha, 0.0)), at.all(at.gt(beta, 0.0))
)
return res
@_logprob.register(arb.ExponentialRV)
def exponential_logprob(op, values, *inputs, **kwargs):
(value,) = values
(mu,) = inputs[3:]
res = -at.log(mu) - value / mu
res = at.switch(at.ge(value, 0.0), res, -np.inf)
res = Assert("mu > 0")(res, at.all(at.gt(mu, 0.0)))
return res
@_logprob.register(arb.LaplaceRV)
def laplace_logprob(op, values, *inputs, **kwargs):
(value,) = values
mu, b = inputs[3:]
return -at.log(2 * b) - at.abs_(value - mu) / b
@_logprob.register(arb.LogNormalRV)
def lognormal_logprob(op, values, *inputs, **kwargs):
(value,) = values
mu, sigma = inputs[3:]
res = (
-0.5 * at.pow((at.log(value) - mu) / sigma, 2)
- 0.5 * at.log(2.0 * np.pi)
- at.log(sigma)
- at.log(value)
)
res = at.switch(at.gt(value, 0.0), res, -np.inf)
res = Assert("sigma > 0")(res, at.all(at.gt(sigma, 0)))
return res
@_logprob.register(arb.ParetoRV)
def pareto_logprob(op, values, *inputs, **kwargs):
(value,) = values
alpha, m = inputs[3:]
res = at.log(alpha) + xlogy0(alpha, m) - xlogy0(alpha + 1.0, value)
res = at.switch(at.ge(value, m), res, -np.inf)
res = Assert("alpha > 0, m > 0")(
res, at.all(at.gt(alpha, 0.0)), at.all(at.gt(m, 0.0))
)
return res
@_logprob.register(arb.CauchyRV)
def cauchy_logprob(op, values, *inputs, **kwargs):
(value,) = values
alpha, beta = inputs[3:]
res = -at.log(np.pi) - at.log(beta) - at.log1p(at.pow((value - alpha) / beta, 2))
res = Assert("beta > 0")(res, at.all(at.gt(beta, 0.0)))
return res
@_logprob.register(arb.HalfCauchyRV)
def halfcauchy_logprob(op, values, *inputs, **kwargs):
(value,) = values
res = at.log(2) + cauchy_logprob(op, values, *inputs, **kwargs)
loc, _ = inputs[3:]
res = at.switch(at.ge(value, loc), res, -np.inf)
return res
@_logprob.register(arb.GammaRV)
def gamma_logprob(op, values, *inputs, **kwargs):
(value,) = values
alpha, inv_beta = inputs[3:]
beta = at.reciprocal(inv_beta)
res = (
-at.gammaln(alpha)
+ xlogy0(alpha, beta)
- beta * value
+ xlogy0(alpha - 1, value)
)
res = at.switch(at.ge(value, 0.0), res, -np.inf)
res = Assert("alpha > 0, beta > 0")(
res, at.all(at.gt(alpha, 0.0)), at.all(at.gt(beta, 0.0))
)
return res
@_logprob.register(arb.InvGammaRV)
def invgamma_logprob(op, values, *inputs, **kwargs):
(value,) = values
alpha, beta = inputs[3:]
res = -(alpha + 1) * np.log(value) - at.gammaln(alpha) - 1.0 / value
res = (
-at.gammaln(alpha)
+ xlogy0(alpha, beta)
- beta / value
+ xlogy0(-alpha - 1, value)
)
res = at.switch(at.ge(value, 0.0), res, -np.inf)
res = Assert("alpha > 0, beta > 0")(
res, at.all(at.gt(alpha, 0.0)), at.all(at.gt(beta, 0.0))
)
return res
@_logprob.register(arb.ChiSquareRV)
def chisquare_logprob(op, values, *inputs, **kwargs):
(value,) = values
(nu,) = inputs[3:]
res = gamma_logprob(op, values, *(*inputs[:3], nu / 2, 2))
return res
@_logprob.register(arb.WaldRV)
def wald_logprob(op, values, *inputs, **kwargs):
(value,) = values
mu, scale = inputs[3:]
res = (
0.5 * at.log(scale / (2.0 * np.pi))
- 1.5 * at.log(value)
- 0.5 * scale / value * ((value - mu) / mu) ** 2
)
res = at.switch(at.gt(value, 0.0), res, -np.inf)
res = Assert("mu > 0, scale > 0")(
res, at.all(at.gt(mu, 0.0)), at.all(at.gt(scale, 0.0))
)
return res
@_logprob.register(arb.WeibullRV)
def weibull_logprob(op, values, *inputs, **kwargs):
(value,) = values
alpha, beta = inputs[3:]
res = (
at.log(alpha)
- at.log(beta)
+ (alpha - 1.0) * at.log(value / beta)
- at.pow(value / beta, alpha)
)
res = at.switch(at.ge(value, 0.0), res, -np.inf)
res = Assert("alpha > 0, beta > 0")(
res, at.all(at.gt(alpha, 0.0)), at.all(at.gt(beta, 0.0))
)
return res
@_logprob.register(arb.VonMisesRV)
def vonmises_logprob(op, values, *inputs, **kwargs):
(value,) = values
mu, kappa = inputs[3:]
res = kappa * at.cos(mu - value) - at.log(2 * np.pi) - at.log(at.i0(kappa))
res = at.switch(
at.bitwise_and(at.ge(value, -np.pi), at.le(value, np.pi)), res, -np.inf
)
res = Assert("kappa > 0")(res, at.all(at.gt(kappa, 0.0)))
return res
@_logprob.register(arb.TriangularRV)
def triangular_logprob(op, values, *inputs, **kwargs):
(value,) = values
lower, c, upper = inputs[3:]
res = at.switch(
at.lt(value, c),
at.log(2 * (value - lower) / ((upper - lower) * (c - lower))),
at.log(2 * (upper - value) / ((upper - lower) * (upper - c))),
)
res = at.switch(
at.bitwise_and(at.le(lower, value), at.le(value, upper)), res, -np.inf
)
res = Assert("lower <= c, c <= upper")(
res, at.all(at.le(lower, c)), at.all(at.le(c, upper))
)
return res
@_logprob.register(arb.GumbelRV)
def gumbel_logprob(op, values, *inputs, **kwargs):
(value,) = values
mu, beta = inputs[3:]
z = (value - mu) / beta
res = -z - at.exp(-z) - at.log(beta)
res = Assert("0 < beta")(res, at.all(at.lt(0.0, beta)))
return res
@_logprob.register(arb.LogisticRV)
def logistic_logprob(op, values, *inputs, **kwargs):
(value,) = values
mu, s = inputs[3:]
z = (value - mu) / s
res = -z - at.log(s) - 2.0 * at.log1p(at.exp(-z))
res = Assert("0 < s")(res, at.all(at.lt(0.0, s)))
return res
@_logprob.register(arb.BinomialRV)
def binomial_logprob(op, values, *inputs, **kwargs):
(value,) = values
n, p = inputs[3:]
res = binomln(n, value) + xlogy0(value, p) + xlogy0(n - value, 1.0 - p)
res = at.switch(at.bitwise_and(at.le(0, value), at.le(value, n)), res, -np.inf)
res = Assert("0 <= p, p <= 1")(res, at.all(at.le(0.0, p)), at.all(at.le(p, 1.0)))
return res
@_logprob.register(arb.BetaBinomialRV)
def betabinomial_logprob(op, values, *inputs, **kwargs):
(value,) = values
n, alpha, beta = inputs[3:]
res = (
binomln(n, value)
+ betaln(value + alpha, n - value + beta)
- betaln(alpha, beta)
)
res = at.switch(at.bitwise_and(at.le(0, value), at.le(value, n)), res, -np.inf)
res = Assert("0 < alpha, 0 < beta")(
res, at.all(at.lt(0.0, alpha)), at.all(at.lt(0.0, beta))
)
return res
@_logprob.register(arb.BernoulliRV)
def bernoulli_logprob(op, values, *inputs, **kwargs):
(value,) = values
(p,) = inputs[3:]
res = at.switch(value, at.log(p), at.log(1.0 - p))
res = at.switch(at.bitwise_and(at.le(0, value), at.le(value, 1)), res, -np.inf)
res = Assert("0 <= p <= 1")(res, at.all(at.le(0.0, p)), at.all(at.le(p, 1.0)))
return res
@_logprob.register(arb.PoissonRV)
def poisson_logprob(op, values, *inputs, **kwargs):
(value,) = values
(mu,) = inputs[3:]
res = xlogy0(value, mu) - at.gammaln(value + 1) - mu
res = at.switch(at.le(0, value), res, -np.inf)
res = Assert("0 <= mu")(res, at.all(at.le(0.0, mu)))
res = at.switch(at.bitwise_and(at.eq(mu, 0.0), at.eq(value, 0.0)), 0.0, res)
return res
@_logprob.register(arb.NegBinomialRV)
def nbinom_logprob(op, values, *inputs, **kwargs):
(value,) = values
n, p = inputs[3:]
mu = n * (1 - p) / p
res = (
binomln(value + n - 1, value)
+ xlogy0(value, mu / (mu + n))
+ xlogy0(n, n / (mu + n))
)
res = at.switch(at.le(0, value), res, -np.inf)
res = Assert("0 < mu, 0 < n")(res, at.all(at.lt(0.0, mu)), at.all(at.lt(0.0, n)))
res = at.switch(at.gt(n, 1e10), poisson_logprob(op, values, *inputs[:3], mu), res)
return res
@_logprob.register(arb.GeometricRV)
def geometric_logprob(op, values, *inputs, **kwargs):
(value,) = values
(p,) = inputs[3:]
res = at.log(p) + xlogy0(value - 1, 1 - p)
res = at.switch(at.le(1, value), res, -np.inf)
res = Assert("0 <= p <= 1")(res, at.all(at.le(0.0, p)), at.all(at.ge(1.0, p)))
return res
@_logprob.register(arb.HyperGeometricRV)
def hypergeometric_logprob(op, values, *inputs, **kwargs):
(value,) = values
good, bad, n = inputs[3:]
total = good + bad
res = (
betaln(good + 1, 1)
+ betaln(bad + 1, 1)
+ betaln(total - n + 1, n + 1)
- betaln(value + 1, good - value + 1)
- betaln(n - value + 1, bad - n + value + 1)
- betaln(total + 1, 1)
)
lower = at.switch(at.gt(n - total + good, 0), n - total + good, 0)
upper = at.switch(at.lt(good, n), good, n)
res = at.switch(
at.bitwise_and(at.le(lower, value), at.le(value, upper)), res, -np.inf
)
return res
@_logprob.register(arb.CategoricalRV)
def categorical_logprob(op, values, *inputs, **kwargs):
(value,) = values
(p,) = inputs[3:]
p = p / at.sum(p, axis=-1, keepdims=True)
if p.ndim > 1:
if p.ndim > value.ndim:
value = at.shape_padleft(value, p.ndim - value.ndim)
elif p.ndim < value.ndim:
p = at.shape_padleft(p, value.ndim - p.ndim)
pattern = (p.ndim - 1,) + tuple(range(p.ndim - 1))
res = at.log(
at.take_along_axis(
p.dimshuffle(pattern),
value,
)
)
# FIXME: `take_along_axis` drops a broadcastable dimension
# when `value.broadcastable == p.broadcastable == (True, True, False)`.
else:
res = at.log(p[value])
res = at.switch(
at.bitwise_and(at.le(0, value), at.lt(value, at.shape(p)[-1])), res, -np.inf
)
res = Assert("0 <= p <= 1")(res, at.all(at.ge(p, 0.0)), at.all(at.le(p, 1.0)))
return res
@_logprob.register(arb.MvNormalRV)
def mvnormal_logprob(op, values, *inputs, **kwargs):
(value,) = values
mu, cov = inputs[3:]
r = value - mu
cov_chol = cholesky(cov)
cov_chol_diag = at.diag(cov_chol)
# TODO: Tag these matrices as positive definite when they're created
# Use pseudo-determinant instead. E.g. from SciPy,
# s, u = eigh(cov)
# factor = {'f': 1E3, 'd': 1E6}
# t = s.numpy_dtype.char.lower()
# cond = factor[t] * np.finfo(t).eps
# eps = cond * at.max(at.abs_(s))
# n = s[at.gt(s, eps)]
all_pos_definite = at.all(at.gt(cov_chol_diag, 0))
cov_chol = at.switch(all_pos_definite, cov_chol, 1)
z_T = solve_lower_triangular(cov_chol, r.T).T
quaddist = at.pow(z_T, 2).sum(axis=-1)
logdet = at.sum(at.log(cov_chol_diag))
n = value.shape[-1]
res = -0.5 * n * np.log(2 * np.pi) - 0.5 * quaddist - logdet
res = Assert("0 < diag(Sigma)")(res, all_pos_definite)
return res
@_logprob.register(arb.DirichletRV)
def dirichlet_logprob(op, values, *inputs, **kwargs):
(value,) = values
(alpha,) = inputs[3:]
res = at.sum(at.gammaln(alpha)) - at.gammaln(at.sum(alpha))
res = -res + at.sum((xlogy0(alpha - 1, value.T)).T, axis=0)
res = at.switch(
at.bitwise_and(
at.all(at.le(0.0, value), axis=-1), at.all(at.le(value, 1.0), axis=-1)
),
res,
-np.inf,
)
res = Assert("0 < alpha")(res, at.all(at.lt(0.0, alpha)))
return res
@_logprob.register(arb.MultinomialRV)
def multinomial_logprob(op, values, *inputs, **kwargs):
(value,) = values
n, p = inputs[3:]
res = at.gammaln(n + 1) + at.sum(-at.gammaln(value + 1) + xlogy0(value, p), axis=-1)
res = at.switch(
at.bitwise_and(
at.all(at.le(0.0, value), axis=-1), at.eq(at.sum(value, axis=-1), n)
),
res,
-np.inf,
)
res = Assert("p <= 1, sum(p) == 1, n >= 0")(
res,
at.all(at.le(p, 1)),
at.all(at.eq(at.sum(p, axis=-1), 1)),
at.all(at.ge(n, 0)),
)
return res
|
kc611/aeppl
|
aeppl/joint_logprob.py
|
<gh_stars>0
import warnings
from collections import deque
from typing import Dict, Optional, Union
import aesara.tensor as at
from aesara import config
from aesara.graph.basic import graph_inputs, io_toposort
from aesara.graph.fg import FunctionGraph
from aesara.graph.op import compute_test_value
from aesara.graph.opt import GlobalOptimizer, LocalOptimizer
from aesara.graph.optdb import OptimizationQuery
from aesara.tensor.basic_opt import ShapeFeature
from aesara.tensor.var import TensorVariable
from aeppl.abstract import get_measurable_outputs
from aeppl.logprob import _logprob
from aeppl.opt import PreserveRVMappings, logprob_rewrites_db
from aeppl.utils import rvs_to_value_vars
def factorized_joint_logprob(
rv_values: Dict[TensorVariable, TensorVariable],
warn_missing_rvs: bool = True,
extra_rewrites: Optional[Union[GlobalOptimizer, LocalOptimizer]] = None,
**kwargs,
) -> Dict[TensorVariable, TensorVariable]:
r"""Create a map between variables and their log-probabilities such that the
sum is their joint log-probability.
The `rv_values` dictionary specifies a joint probability graph defined by
pairs of random variables and respective measure-space input parameters
For example, consider the following
.. code-block:: python
import aesara.tensor as at
sigma2_rv = at.random.invgamma(0.5, 0.5)
Y_rv = at.random.normal(0, at.sqrt(sigma2_rv))
This graph for ``Y_rv`` is equivalent to the following hierarchical model:
.. math::
\sigma^2 \sim& \operatorname{InvGamma}(0.5, 0.5) \\
Y \sim& \operatorname{N}(0, \sigma^2)
If we create a value variable for ``Y_rv``, i.e. ``y_vv = at.scalar("y")``,
the graph of ``factorized_joint_logprob({Y_rv: y_vv})`` is equivalent to the
conditional probability :math:`\log p(Y = y \mid \sigma^2)`, with a stochastic
``sigma2_rv``. If we specify a value variable for ``sigma2_rv``, i.e.
``s_vv = at.scalar("s2")``, then ``factorized_joint_logprob({Y_rv: y_vv, sigma2_rv: s_vv})``
yields the joint log-probability of the two variables.
.. math::
\log p(Y = y, \sigma^2 = s) =
\log p(Y = y \mid \sigma^2 = s) + \log p(\sigma^2 = s)
Parameters
==========
rv_values
A ``dict`` of variables that maps stochastic elements
(e.g. `RandomVariable`\s) to symbolic `Variable`\s representing their
values in a log-probability.
warn_missing_rvs
When ``True``, issue a warning when a `RandomVariable` is found in
the graph and doesn't have a corresponding value variable specified in
`rv_values`.
extra_rewrites
Extra rewrites to be applied (e.g. reparameterizations, transforms,
etc.)
Returns
=======
A ``dict`` that maps each value variable to the log-probability factor derived
from the respective `RandomVariable`.
"""
# Since we're going to clone the entire graph, we need to keep a map from
# the old nodes to the new ones; otherwise, we won't be able to use
# `rv_values`.
# We start the `dict` with mappings from the value variables to themselves,
# to prevent them from being cloned.
memo = {v: v for v in rv_values.values()}
# We add `ShapeFeature` because it will get rid of references to the old
# `RandomVariable`s that have been lifted; otherwise, it will be difficult
# to give good warnings when an unaccounted for `RandomVariable` is
# encountered
fgraph = FunctionGraph(
outputs=list(rv_values.keys()),
clone=True,
memo=memo,
copy_orphans=False,
copy_inputs=False,
features=[ShapeFeature()],
)
# Update `rv_values` so that it uses the new cloned variables
rv_values = {memo[k]: v for k, v in rv_values.items()}
# This `Feature` preserves the relationships between the original
# random variables (i.e. keys in `rv_values`) and the new ones
# produced when `Op`s are lifted through them.
rv_remapper = PreserveRVMappings(rv_values)
fgraph.attach_feature(rv_remapper)
logprob_rewrites_db.query(OptimizationQuery(include=["basic"])).optimize(fgraph)
if extra_rewrites is not None:
extra_rewrites.optimize(fgraph)
# This is the updated random-to-value-vars map with the lifted/rewritten
# variables. The rewrites are supposed to produce new
# `MeasurableVariable`s that are amenable to `_logprob`.
updated_rv_values = rv_remapper.rv_values
# When a `_logprob` has been produced for a `MeasurableVariable` node, all
# other references to it need to be replaced with its value-variable all
# throughout the `_logprob`-produced graphs. The following `dict`
# cumulatively maintains remappings for all the variables/nodes that needed
# to be recreated after replacing `MeasurableVariable`s with their
# value-variables. Since these replacements work in topological order, all
# the necessary value-variable replacements should be present for each
# node.
replacements = updated_rv_values.copy()
# To avoid cloning the value variables, we map them to themselves in the
# `replacements` `dict` (i.e. entries already existing in `replacements`
# aren't cloned)
replacements.update({v: v for v in rv_values.values()})
# Walk the graph from its inputs to its outputs and construct the
# log-probability
q = deque(fgraph.toposort())
logprob_vars = {}
while q:
node = q.popleft()
outputs = get_measurable_outputs(node.op, node)
if not outputs:
continue
if warn_missing_rvs and any(
o not in updated_rv_values
for o in outputs
if getattr(o.tag, "ignore_logprob", False)
):
warnings.warn(
"Found a random variable that was neither among the observations "
f"nor the conditioned variables: {node}"
)
continue
q_rv_value_vars = [
replacements[q_rv_var]
for q_rv_var in outputs
if not getattr(q_rv_var.tag, "ignore_logprob", False)
]
if not q_rv_value_vars:
continue
# Replace `RandomVariable`s in the inputs with value variables.
# Also, store the results in the `replacements` map for the nodes
# that follow.
remapped_vars, _ = rvs_to_value_vars(
q_rv_value_vars + list(node.inputs),
initial_replacements=replacements,
)
q_rv_value_vars = remapped_vars[: len(q_rv_value_vars)]
value_var_inputs = remapped_vars[len(q_rv_value_vars) :]
q_logprob_vars = _logprob(
node.op,
q_rv_value_vars,
*value_var_inputs,
**kwargs,
)
if not isinstance(q_logprob_vars, (list, tuple)):
q_logprob_vars = [q_logprob_vars]
for q_rv_var, q_logprob_var in zip(q_rv_value_vars, q_logprob_vars):
if q_rv_var.name:
q_logprob_var.name = f"{q_rv_var.name}_logprob"
if q_rv_var in logprob_vars:
raise ValueError(
f"More than one logprob factor was assigned to the value var {q_rv_var}"
)
logprob_vars[q_rv_var] = q_logprob_var
# Recompute test values for the changes introduced by the
# replacements above.
if config.compute_test_value != "off":
for node in io_toposort(graph_inputs(q_logprob_vars), q_logprob_vars):
compute_test_value(node)
return logprob_vars
def joint_logprob(*args, sum: bool = True, **kwargs) -> Optional[TensorVariable]:
"""Create a graph representing the joint log-probability/measure of a graph.
This function calls `factorized_joint_logprob` and returns the combined
log-probability factors as a single graph.
Parameters
----------
sum: bool
If ``True`` each factor is collapsed to a scalar via ``sum`` before
being joined with the remaining factors. This may be necessary to
avoid incorrect broadcasting among independent factors.
"""
logprob = factorized_joint_logprob(*args, **kwargs)
if not logprob:
return None
elif len(logprob) == 1:
logprob = tuple(logprob.values())[0]
if sum:
return at.sum(logprob)
else:
return logprob
else:
if sum:
return at.sum([at.sum(factor) for factor in logprob.values()])
else:
return at.add(*logprob.values())
|
kc611/aeppl
|
aeppl/abstract.py
|
import abc
from functools import singledispatch
from typing import List
from aesara.graph.basic import Apply, Variable
from aesara.graph.op import Op
from aesara.tensor.random.op import RandomVariable
class MeasurableVariable(abc.ABC):
"""A variable that can be assigned a measure/log-probability"""
MeasurableVariable.register(RandomVariable)
def get_measurable_outputs(op: Op, node: Apply) -> List[Variable]:
"""Return only the outputs that are measurable."""
if isinstance(op, MeasurableVariable):
return _get_measurable_outputs(op, node)
else:
return []
@singledispatch
def _get_measurable_outputs(op, node):
return node.outputs
@_get_measurable_outputs.register(RandomVariable)
def _get_measurable_outputs_RandomVariable(op, node):
return node.outputs[1:]
|
kc611/aeppl
|
aeppl/printing.py
|
import string
import textwrap
from collections import OrderedDict
from collections.abc import Mapping, MutableMapping
from copy import copy
from typing import Optional, Union
import aesara
import aesara.tensor as at
from aesara.assert_op import Assert
from aesara.compile.function.types import Function
from aesara.graph.basic import Constant, Variable
from aesara.graph.fg import FunctionGraph
from aesara.printing import (
IgnorePrinter,
OperatorPrinter,
PatternPrinter,
PPrinter,
PrinterState,
)
from aesara.printing import pprint as at_pprint
from aesara.scalar.basic import Add, Mul
from aesara.tensor.basic_opt import ShapeFeature
from aesara.tensor.elemwise import Elemwise
from aesara.tensor.exceptions import NotScalarConstantError
from aesara.tensor.math import _dot
from aesara.tensor.random.basic import NormalRV
from aesara.tensor.random.op import RandomVariable
from aesara.tensor.random.var import RandomStateSharedVariable
from aesara.tensor.subtensor import AdvancedSubtensor, AdvancedSubtensor1, Subtensor
from aesara.tensor.type import float_dtypes, int_dtypes, uint_dtypes
from aesara.tensor.var import TensorConstant, TensorVariable
try:
from sympy import Array as SympyArray
from sympy.printing import latex as sympy_latex
def latex_print_array(data): # pragma: no cover
return sympy_latex(SympyArray(data))
except ImportError: # pragma: no cover
def latex_print_array(data):
return data
PrinterStateType = Union[MutableMapping, PrinterState]
class RandomVariablePrinter:
r"""Pretty print random variables.
`Op`\s are able to specify their ascii and LaTeX formats via a "print_name"
property. `Op.print_name` should be a tuple or list that specifies the
plain text/ascii and LaTeX name, respectively.
Also, distribution parameters can be formatted distinctly by overriding
the `RandomVariablePrinter.process_param` method.
"""
def __init__(self, name: Optional[str] = None):
"""Create a `RandomVariablePrinter`.
Parameters
----------
name: str (optional)
A fixed name to use for the random variables printed by this
printer. If not specified, use `RandomVariable.name`.
"""
self.name = name
def process_param(self, idx: int, sform: str, pstate: Optional[PrinterStateType]):
"""Perform special per-parameter post-formatting.
This can be used, for instance, to change a std. dev. into a variance.
Parameters
----------
idx: int
The index value of the parameter.
sform: str
The pre-formatted string form of the parameter.
pstate: object
The printer state.
"""
return sform # pragma: no cover
def process(self, output, pstate: Optional[PrinterStateType]):
if hasattr(pstate, "memo") and output in pstate.memo:
return pstate.memo[output]
pprinter = pstate.pprinter
node = getattr(output, "owner", None)
if node is None or not isinstance(node.op, RandomVariable): # pragma: no cover
raise TypeError(
"Function %s cannot represent a variable that is "
"not the result of a RandomVariable operation" % self.name
)
op_name = self.name or getattr(node.op, "print_name", None)
op_name = op_name or getattr(node.op, "name", None)
if op_name is None: # pragma: no cover
raise ValueError(f"Could not find a name for {node.op}")
# Allow `Op`s to specify their ascii and LaTeX formats (in a tuple/list
# with that order).
output_latex = getattr(pstate, "latex", False)
if isinstance(op_name, (tuple, list)):
op_name = op_name[int(output_latex)]
elif output_latex:
op_name = "\\operatorname{%s}" % op_name
preamble_dict = getattr(pstate, "preamble_dict", {})
new_precedence = -1000
try:
old_precedence = getattr(pstate, "precedence", None)
pstate.precedence = new_precedence
# Get the symbol name string from another pprinter.
# We create a dummy variable with no `owner`, so that
# the pprinter will format it like a standard variable.
dummy_out = output.clone()
dummy_out.owner = None
# Use this to get shape information down the line.
dummy_out.orig_var = output
var_name = pprinter.process(dummy_out, pstate)
if output_latex:
dist_format = "%s \\sim %s\\left(%s\\right)"
else:
dist_format = "%s ~ %s(%s)"
# Get the shape info for our dummy symbol, if available,
# and append it to the distribution definition.
if "shape_strings" in preamble_dict:
shape_info_str = preamble_dict["shape_strings"].pop(dummy_out)
shape_info_str = shape_info_str.lstrip(var_name)
if output_latex:
dist_format += "\\, {}".format(shape_info_str)
else:
dist_format += shape_info_str
dist_params = node.inputs[3:]
formatted_params = [
self.process_param(i, pprinter.process(p, pstate), pstate)
for i, p in enumerate(dist_params)
]
dist_def_str = dist_format % (
var_name,
op_name,
", ".join(formatted_params),
)
finally:
pstate.precedence = old_precedence
# All subsequent calls will use the variable name and
# not the distribution definition.
pstate.memo[output] = var_name
if preamble_dict:
rv_strings = preamble_dict.setdefault("rv_strings", [])
rv_strings.append(dist_def_str)
return var_name
else:
return dist_def_str
class GenericSubtensorPrinter:
def process(self, r: Variable, pstate: Optional[PrinterStateType]):
if getattr(r, "owner", None) is None: # pragma: no cover
raise TypeError("Can only print `*Subtensor*`s.")
output_latex = getattr(pstate, "latex", False)
inputs = list(r.owner.inputs)
obj = inputs.pop(0)
idxs = getattr(r.owner.op, "idx_list", inputs)
sidxs = []
old_precedence = getattr(pstate, "precedence", None)
try:
pstate.precedence = -1000
for entry in idxs:
if isinstance(entry, slice):
s_parts = [""] * 2
if entry.start is not None:
s_parts[0] = pstate.pprinter.process(inputs.pop())
if entry.stop is not None:
s_parts[1] = pstate.pprinter.process(inputs.pop())
if entry.step is not None:
s_parts.append(pstate.pprinter.process(inputs.pop()))
sidxs.append(":".join(s_parts))
else:
sidxs.append(pstate.pprinter.process(inputs.pop()))
if output_latex:
idx_str = ", \\,".join(sidxs)
else:
idx_str = ", ".join(sidxs)
finally:
pstate.precedence = old_precedence
try:
pstate.precedence = 1000
sub = pstate.pprinter.process(obj, pstate)
finally:
pstate.precedence = old_precedence
if output_latex:
return "%s\\left[%s\\right]" % (sub, idx_str)
else:
return "%s[%s]" % (sub, idx_str)
class VariableWithShapePrinter:
"""Print variable shape info in the preamble.
Also uses readable character names for un-named variables.
Constant arrays are only printed when their size is below a threshold
set by ``max_line_width * max_line_height``
"""
available_names = OrderedDict.fromkeys(string.ascii_letters)
default_printer = aesara.printing.default_printer
max_line_width = 40
max_line_height = 20
@classmethod
def process(cls, output: Variable, pstate: Optional[PrinterStateType]):
if output in pstate.memo:
return pstate.memo[output]
using_latex = getattr(pstate, "latex", False)
# Crude--but effective--means of stopping print-outs for large
# arrays.
constant = isinstance(
output, (TensorConstant, aesara.scalar.basic.ScalarConstant)
)
too_large = constant and (
output.data.size > cls.max_line_width * cls.max_line_height
)
if constant and not too_large:
# Print constants that aren't too large
if using_latex and output.ndim > 0:
out_name = latex_print_array(output.data)
else:
out_name = str(output.data)
elif (
isinstance(
output,
(
TensorVariable,
aesara.scalar.basic.Scalar,
aesara.scalar.basic.ScalarVariable,
),
)
or constant
):
# Process name and shape
# Attempt to get the original variable, in case this is a cloned
# `RandomVariable` output; otherwise, we won't get any shape
# information from the `FunctionGraph`.
var = getattr(output, "orig_var", output)
out_name = cls.process_variable_name(var, pstate)
shape_info = cls.process_shape_info(var, pstate)
shape_strings = pstate.preamble_dict.setdefault(
"shape_strings", OrderedDict()
)
shape_strings[output] = shape_info
else: # pragma: no cover
raise TypeError(f"Type {type(output)} not handled by variable printer")
pstate.memo[output] = out_name
return out_name
@classmethod
def process_variable_name(
cls, output: Variable, pstate: Optional[PrinterStateType]
):
"""Take a variable name from the available ones.
This function also initializes the available names by removing
all the manually specified names within the `FunctionGraph`
being printed (if available). Doing so removes the potential for
name collisions.
"""
if output in pstate.memo:
return pstate.memo[output]
available_names = getattr(pstate, "available_names", None)
if available_names is None:
# Initialize this state's available names
available_names = copy(cls.available_names)
# Remove known names in the graph.
_ = [available_names.pop(v.name, None) for v in pstate.fgraph.variables]
setattr(pstate, "available_names", available_names)
if getattr(output, "name", None):
# Observed an existing name; remove it.
out_name = output.name
available_names.pop(out_name, None)
else:
# Take an unused name.
out_name, _ = available_names.popitem(last=False)
pstate.memo[output] = out_name
return out_name
@classmethod
def process_shape_info(cls, output: Variable, pstate: Optional[PrinterStateType]):
using_latex = getattr(pstate, "latex", False)
if output.dtype in int_dtypes:
sspace_char = "Z"
elif output.dtype in uint_dtypes:
sspace_char = "N"
elif output.dtype in float_dtypes:
sspace_char = "R"
else:
sspace_char = "?"
shape_feature = None
if not hasattr(pstate.fgraph, "shape_feature"):
pstate.fgraph.attach_feature(ShapeFeature())
shape_feature = pstate.fgraph.shape_feature
shape_dims = []
for i in range(output.ndim):
s_i_out = None
if using_latex:
s_i_pat = "N^{%s}" + ("_{%s}" % i)
else:
s_i_pat = "N^%s" + ("_%s" % i)
if shape_feature:
new_precedence = -1000
try:
old_precedence = getattr(pstate, "precedence", None)
pstate.precedence = new_precedence
_s_i_out = shape_feature.get_shape(output, i)
if not isinstance(_s_i_out, (Constant, TensorVariable)):
s_i_out = pstate.pprinter.process(_s_i_out, pstate)
else:
s_i_out = str(at.get_scalar_constant_value(_s_i_out))
except (KeyError, IndexError, ValueError, NotScalarConstantError):
# Ugh, most of these exception types are just for
# `get_scalar_constant_value`!
# TODO: The design of that function contract could use some
# serious reconsideration.
pass
finally:
pstate.precedence = old_precedence
if not s_i_out:
s_i_out = cls.process_variable_name(output, pstate)
s_i_out = s_i_pat % s_i_out
shape_dims += [s_i_out]
shape_info = cls.process_variable_name(output, pstate)
if using_latex:
shape_info += " \\in \\mathbb{%s}" % sspace_char
shape_dims_str = " \\times ".join(shape_dims)
if shape_dims_str:
shape_info += "^{%s}" % shape_dims_str
else:
shape_info += " in %s" % sspace_char
shape_dims_str = " x ".join(shape_dims)
if shape_dims:
shape_info += "**(%s)" % shape_dims_str
return shape_info
class PreamblePPrinter(PPrinter):
r"""Pretty printer that displays a preamble.
Preambles are put into an `OrderedDict` of categories (determined by
printers that use the preamble). The order can be set by preempting the
category names within an `OrderedDict` passed to the constructor via
the `preamble_dict` keyword.
The lines accumulated in each category are comma-separated up to a fixed
length given by `PreamblePPrinter.max_preamble_width`, after which a
newline is appended and process repeats.
Example
-------
>>> import aesara.tensor as at
>>> from aeppl.printing import pprint
>>> X_rv = at.random.normal(at.scalar('\\mu'), at.scalar('\\sigma'), name='X')
>>> print(pprint(X_rv))
\\mu in R
\\sigma in R
X ~ N(\\mu, \\sigma**2), X in R
X
XXX: Not thread-safe!
"""
max_preamble_width = 40
def __init__(
self,
*args,
pstate_defaults: Optional[PrinterStateType] = None,
preamble_dict: Optional[Mapping] = None,
**kwargs,
):
"""Create a `PreamblePPrinter`.
Parameters
----------
pstate_defaults: dict (optional)
Default printer state parameters.
preamble_dict: OrderedDict (optional)
Default preamble dictionary. Use this to pre-set the print-out
ordering of preamble categories/keys.
"""
super().__init__(*args, **kwargs)
self.pstate_defaults: PrinterStateType = pstate_defaults or {}
self.pstate_defaults.setdefault(
"preamble_dict", OrderedDict() if preamble_dict is None else preamble_dict
)
self.printers_dict = dict(at_pprint.printers_dict)
self.printers = copy(at_pprint.printers)
self._pstate = None
def create_state(self, pstate: Optional[PrinterStateType]):
if pstate is None:
pstate = PrinterState(
pprinter=self, **{k: copy(v) for k, v in self.pstate_defaults.items()}
)
elif isinstance(pstate, Mapping):
pstate.update({k: copy(v) for k, v in self.pstate_defaults.items()})
pstate = PrinterState(pprinter=self, **pstate)
# FIXME: Good old fashioned circular references...
# We're doing this so that `self.process` will be called correctly
# accross all code. (I'm lookin' about you, `DimShufflePrinter`; get
# your act together.)
pstate.pprinter._pstate = pstate
return pstate
def process(self, r: Variable, pstate: Optional[PrinterStateType] = None):
pstate = self._pstate
assert pstate
return super().process(r, pstate)
def process_graph(self, inputs, outputs, updates=None, display_inputs=False):
raise NotImplementedError() # pragma: no cover
def __call__(self, *args, latex_env="equation", latex_label: str = None):
in_vars = args[0]
pstate = next(iter(args[1:]), None)
if isinstance(pstate, (MutableMapping, PrinterState)):
pstate = self.create_state(args[1])
elif pstate is None:
pstate = self.create_state(None)
if isinstance(in_vars, Function):
in_vars = in_vars.maker.fgraph
# This pretty printer needs more information about shapes and inputs,
# which it gets from a `FunctionGraph`.
fgraph = None
out_vars = None
if isinstance(in_vars, FunctionGraph):
# We were given a `FunctionGraph` to start with; let's make sure
# it has the shape information we need.
fgraph = in_vars
if not hasattr(fgraph, "shape_feature"):
shape_feature = ShapeFeature()
fgraph.attach_feature(shape_feature)
in_vars = fgraph.inputs
out_vars = fgraph.outputs
elif not isinstance(in_vars, (tuple, list)):
in_vars = [in_vars]
if fgraph is None:
memo = {}
fgraph = FunctionGraph(
outputs=in_vars,
features=[ShapeFeature()],
clone=True,
memo=memo,
)
in_vars = [memo[i] for i in in_vars]
out_vars = fgraph.outputs
pstate.fgraph = fgraph
# TODO: How should this be formatted to better designate
# the output numbers (in LaTeX, as well)?
body_strs = []
for v in out_vars:
body_strs += [super().__call__(v, pstate)]
latex_out = getattr(pstate, "latex", False)
comma_str = ", \\," if latex_out else ", "
newline_str = "\n\\\\\n" if latex_out else "\n"
indent_str = " "
# Let's join all the preamble categories, but split within
# categories when the joined line is too long.
preamble_lines = []
for v in pstate.preamble_dict.values():
if isinstance(v, Mapping):
v = list(v.values())
assert isinstance(v, list)
if not v:
continue
v_new = []
c_len = l_idx = 0
for l in v:
if len(v_new) <= l_idx:
c_len = self.max_preamble_width * l_idx
v_new.append([l])
else:
v_new[l_idx].append(l)
c_len += len(l)
l_idx += int(c_len // self.max_preamble_width > l_idx)
preamble_lines.append(newline_str.join(comma_str.join(z) for z in v_new))
if preamble_lines and latex_out:
preamble_body = newline_str.join(preamble_lines)
preamble_str = f"\\begin{{gathered}}\n{textwrap.indent(preamble_body, indent_str)}\n\\end{{gathered}}"
res = newline_str.join([preamble_str] + body_strs)
else:
res = newline_str.join(preamble_lines + body_strs)
if latex_out and latex_env:
label_out = f"\\label{{{latex_label}}}\n" if latex_label else ""
res = textwrap.indent(res, indent_str)
res = (
f"\\begin{{{latex_env}}}\n"
f"{res}\n"
f"{label_out}"
f"\\end{{{latex_env}}}"
)
return res
pprint = PreamblePPrinter()
# The order here is important!
pprint.printers.insert(
0,
(
lambda pstate, r: isinstance(r, (aesara.scalar.basic.Scalar, Variable)),
VariableWithShapePrinter,
),
)
pprint.printers.insert(
0,
(
lambda pstate, r: getattr(r, "owner", None)
and isinstance(r.owner.op, RandomVariable),
RandomVariablePrinter(),
),
)
# This handles the in-place versions of `Add` and `Mul` produced by
# optimizations
pprint.assign(
lambda pstate, r: getattr(r, "owner", None)
and isinstance(r.owner.op, Elemwise)
and isinstance(r.owner.op.scalar_op, Add),
OperatorPrinter("+", -1, "left"),
)
pprint.assign(
lambda pstate, r: getattr(r, "owner", None)
and isinstance(r.owner.op, Elemwise)
and isinstance(r.owner.op.scalar_op, Mul),
OperatorPrinter("*", -1, "left"),
)
class NormalRVPrinter(RandomVariablePrinter):
def __init__(self):
super().__init__("N")
def process_param(self, idx, sform, pstate):
if idx == 1:
if getattr(pstate, "latex", False):
return f"{{{sform}}}^{{2}}"
else:
return f"{sform}**2"
else:
return sform
pprint.assign(NormalRV, NormalRVPrinter())
pprint.assign(_dot, OperatorPrinter("@", -1, "left"))
pprint.assign(at.and_, OperatorPrinter("and", -1, "left"))
pprint.assign(at.or_, OperatorPrinter("or", -1, "left"))
pprint.assign(Assert, IgnorePrinter())
pprint.assign(RandomStateSharedVariable, IgnorePrinter())
# pprint.assign(random_state_type, IgnorePrinter())
subtensor_printer = GenericSubtensorPrinter()
pprint.assign(Subtensor, subtensor_printer)
pprint.assign(AdvancedSubtensor, subtensor_printer)
pprint.assign(AdvancedSubtensor1, subtensor_printer)
pprint.assign(at.ge, PatternPrinter(("%(0)s >= %(1)s", -1000)))
pprint.assign(at.gt, PatternPrinter(("%(0)s > %(1)s", -1000)))
pprint.assign(at.le, PatternPrinter(("%(0)s <= %(1)s", -1000)))
pprint.assign(at.lt, PatternPrinter(("%(0)s < %(1)s", -1000)))
pprint.assign(at.eq, PatternPrinter(("%(0)s == %(1)s", -1000)))
latex_pprint = PreamblePPrinter(pstate_defaults={"latex": True})
latex_pprint.assign(Assert, IgnorePrinter())
latex_pprint.assign(RandomStateSharedVariable, IgnorePrinter())
latex_pprint.printers = copy(pprint.printers)
latex_pprint.printers_dict = dict(pprint.printers_dict)
latex_pprint.assign(at.ge, PatternPrinter(("%(0)s \\ge %(1)s", -1000)))
latex_pprint.assign(at.gt, PatternPrinter(("%(0)s \\gt %(1)s", -1000)))
latex_pprint.assign(at.le, PatternPrinter(("%(0)s \\le %(1)s", -1000)))
latex_pprint.assign(at.lt, PatternPrinter(("%(0)s \\lt %(1)s", -1000)))
latex_pprint.assign(at.eq, PatternPrinter(("%(0)s = %(1)s", -1000)))
latex_pprint.assign(at.and_, OperatorPrinter("\\land", -1, "left"))
latex_pprint.assign(at.or_, OperatorPrinter("\\lor", -1, "left"))
latex_pprint.assign(at.invert, PatternPrinter(("\\lnot %(0)s", -1000)))
latex_pprint.assign(_dot, OperatorPrinter("\\;", -1, "left"))
latex_pprint.assign(at.mul, OperatorPrinter("\\odot", -1, "either"))
latex_pprint.assign(at.true_div, PatternPrinter(("\\frac{%(0)s}{%(1)s}", -1000)))
latex_pprint.assign(at.sqrt, PatternPrinter(("\\sqrt{%(0)s}", -1000)))
latex_pprint.assign(at.pow, PatternPrinter(("{%(0)s}^{%(1)s}", -1000)))
|
kc611/aeppl
|
tests/test_mixture.py
|
import aesara
import aesara.tensor as at
import numpy as np
import pytest
import scipy.stats.distributions as sp
from aesara.graph.basic import Variable
from aeppl.joint_logprob import joint_logprob
from tests.utils import assert_no_rvs
def test_mixture_basics():
srng = at.random.RandomStream(29833)
def create_mix_model(size, axis):
X_rv = srng.normal(0, 1, size=size, name="X")
Y_rv = srng.gamma(0.5, 0.5, size=size, name="Y")
p_at = at.scalar("p")
p_at.tag.test_value = 0.5
I_rv = srng.bernoulli(p_at, size=size, name="I")
i_vv = I_rv.clone()
i_vv.name = "i"
if isinstance(axis, Variable):
M_rv = at.join(axis, X_rv, Y_rv)[I_rv]
else:
M_rv = at.stack([X_rv, Y_rv], axis=axis)[I_rv]
M_rv.name = "M"
m_vv = M_rv.clone()
m_vv.name = "m"
return locals()
with pytest.raises(ValueError, match=".*value variable was specified.*"):
env = create_mix_model(None, 0)
X_rv = env["X_rv"]
I_rv = env["I_rv"]
i_vv = env["i_vv"]
M_rv = env["M_rv"]
m_vv = env["m_vv"]
x_vv = X_rv.clone()
x_vv.name = "x"
joint_logprob({M_rv: m_vv, I_rv: i_vv, X_rv: x_vv})
with pytest.raises(NotImplementedError):
env = create_mix_model((2,), 1)
I_rv = env["I_rv"]
i_vv = env["i_vv"]
M_rv = env["M_rv"]
m_vv = env["m_vv"]
joint_logprob({M_rv: m_vv, I_rv: i_vv})
with pytest.raises(NotImplementedError):
axis_at = at.lscalar("axis")
axis_at.tag.test_value = 0
env = create_mix_model((2,), axis_at)
I_rv = env["I_rv"]
i_vv = env["i_vv"]
M_rv = env["M_rv"]
m_vv = env["m_vv"]
joint_logprob({M_rv: m_vv, I_rv: i_vv})
@pytest.mark.parametrize(
"p_val, size",
[
(np.array(0.0, dtype=aesara.config.floatX), ()),
(np.array(1.0, dtype=aesara.config.floatX), ()),
(np.array(0.0, dtype=aesara.config.floatX), (2,)),
(np.array(1.0, dtype=aesara.config.floatX), (2, 1)),
],
)
@aesara.config.change_flags(compute_test_value="raise")
def test_hetero_mixture_scalar(p_val, size):
srng = at.random.RandomStream(29833)
X_rv = srng.normal(0, 1, size=size, name="X")
Y_rv = srng.gamma(0.5, 0.5, size=size, name="Y")
p_at = at.scalar("p")
p_at.tag.test_value = p_val
I_rv = srng.bernoulli(p_at, size=size, name="I")
i_vv = I_rv.clone()
i_vv.name = "i"
M_rv = at.stack([X_rv, Y_rv])[I_rv]
M_rv.name = "M"
m_vv = M_rv.clone()
m_vv.name = "m"
M_logp = joint_logprob({M_rv: m_vv, I_rv: i_vv}, sum=False)
M_logp_fn = aesara.function([p_at, m_vv, i_vv], M_logp)
# The compiled graph should not contain any `RandomVariables`
assert_no_rvs(M_logp_fn.maker.fgraph.outputs[0])
decimals = 6 if aesara.config.floatX == "float64" else 4
test_val_rng = np.random.RandomState(3238)
bern_sp = sp.bernoulli(p_val)
norm_sp = sp.norm(loc=0, scale=1)
gamma_sp = sp.gamma(0.5, scale=1.0 / 0.5)
for i in range(10):
i_val = bern_sp.rvs(size=size, random_state=test_val_rng)
x_val = norm_sp.rvs(size=size, random_state=test_val_rng)
y_val = gamma_sp.rvs(size=size, random_state=test_val_rng)
m_val = np.stack([x_val, y_val])[i_val]
exp_obs_logps = np.stack([norm_sp.logpdf(x_val), gamma_sp.logpdf(y_val)])[i_val]
exp_obs_logps += bern_sp.logpmf(i_val)
logp_vals = M_logp_fn(p_val, m_val, i_val)
np.testing.assert_almost_equal(logp_vals, exp_obs_logps, decimal=decimals)
@pytest.mark.parametrize(
"p_val, size",
[
# (np.array(0.0, dtype=aesara.config.floatX), ()),
# (np.array(1.0, dtype=aesara.config.floatX), ()),
# (np.array(0.0, dtype=aesara.config.floatX), (2,)),
# (np.array(1.0, dtype=aesara.config.floatX), (2, 1)),
# (np.array(1.0, dtype=aesara.config.floatX), (2, 3)),
(np.array([0.1, 0.9], dtype=aesara.config.floatX), (2, 3)),
],
)
def test_hetero_mixture_nonscalar(p_val, size):
srng = at.random.RandomStream(29833)
X_rv = srng.normal(0, 1, size=size, name="X")
Y_rv = srng.gamma(0.5, 0.5, size=size, name="Y")
if np.ndim(p_val) == 0:
p_at = at.scalar("p")
p_at.tag.test_value = p_val
I_rv = srng.bernoulli(p_at, size=size, name="I")
else:
p_at = at.vector("p")
p_at.tag.test_value = np.array(p_val, dtype=aesara.config.floatX)
I_rv = srng.categorical(p_at, size=size, name="I")
p_val_1 = p_val[1]
i_vv = I_rv.clone()
i_vv.name = "i"
M_rv = at.stack([X_rv, Y_rv])[I_rv]
M_rv.name = "M"
m_vv = M_rv.clone()
m_vv.name = "m"
M_logp = joint_logprob({M_rv: m_vv, I_rv: i_vv}, sum=False)
M_logp_fn = aesara.function([p_at, m_vv, i_vv], M_logp)
assert_no_rvs(M_logp_fn.maker.fgraph.outputs[0])
decimals = 6 if aesara.config.floatX == "float64" else 4
test_val_rng = np.random.RandomState(3238)
bern_sp = sp.bernoulli(p_val_1)
norm_sp = sp.norm(loc=0, scale=1)
gamma_sp = sp.gamma(0.5, scale=1.0 / 0.5)
for i in range(10):
i_val = bern_sp.rvs(size=size, random_state=test_val_rng)
x_val = norm_sp.rvs(size=size, random_state=test_val_rng)
y_val = gamma_sp.rvs(size=size, random_state=test_val_rng)
exp_obs_logps = np.stack([norm_sp.logpdf(x_val), gamma_sp.logpdf(y_val)])[i_val]
exp_obs_logps += bern_sp.logpmf(i_val)
m_val = np.stack([x_val, y_val])[i_val]
logp_vals = M_logp_fn(p_val, m_val, i_val)
np.testing.assert_almost_equal(logp_vals, exp_obs_logps, decimal=decimals)
|
Sayan97/BIET---Official
|
app1.py
|
<gh_stars>0
from flask import Flask
app = Flask(__name__,static_url_path="/static",static_folder="/Users/Sayan/Documents/GitHub/BIET/static")
@app.route('/')
def homepage():
return app.send_static_file('index.html')
@app.route('/index.html')
def homepage1():
return app.send_static_file('index.html')
@app.route('/onecolumn.html')
def about():
return app.send_static_file('onecolumn.html')
@app.route('/twocolumn1.html')
def contact():
return app.send_static_file('twocolumn1.html')
@app.route('/twocolumn2.html')
def ideation():
return app.send_static_file('twocolumn2.html')
if __name__=='__main__':
app.run(debug=True)
|
danielpops/slack-starterbot
|
starterbot.py
|
import os
import time
import re
from slackclient import SlackClient
# instantiate Slack client
slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
# starterbot's user ID in Slack: value is assigned after the bot starts up
starterbot_id = None
# constants
RTM_READ_DELAY = 1 # 1 second delay between reading from RTM
EXAMPLE_COMMAND = "do"
MENTION_REGEX = "^<@(|[WU].+?)>(.*)"
def parse_event(slack_event):
"""
Parses a list of events coming from the Slack RTM API
"""
if event["type"] == "message" and not "subtype" in event and not "bot_id" in event:
user_id, message = parse_direct_mention(event["text"])
event["is_direct"] = False
event["mentioned_user"] = None
if user_id == starterbot_id:
event["is_direct"] = True
event["mentioned_user"] = user_id
event["text"] = message
print event
return event
elif event["type"] == "reaction_added":
pass
elif event["type"] == "reaction_removed":
pass
return None
def parse_direct_mention(message_text):
"""
Finds a direct mention (a mention that is at the beginning) in message text
and returns the user ID which was mentioned. If there is no direct mention, returns None
"""
matches = re.search(MENTION_REGEX, message_text)
# the first group contains the username, the second group contains the remaining message
return (matches.group(1), matches.group(2).strip()) if matches else (None, None)
def handle_event(event):
"""
Executes bot command if the command is known
"""
# Default to no response
response = None
if event["is_direct"]:
if event["text"].startswith("hi"):
response = "hi :wave:"
slack_client.api_call(
"reactions.add",
channel=event["channel"],
name="wave",
timestamp=event["event_ts"],
)
else:
# Do something else
if "love" in event["text"]:
slack_client.api_call(
"reactions.add",
channel=event["channel"],
name="heart",
timestamp=event["event_ts"],
)
if response:
# Sends the response back to the channel
slack_client.api_call(
"chat.postMessage",
as_user=True,
channel=event["channel"],
text=response
)
if __name__ == "__main__":
if slack_client.rtm_connect(with_team_state=False):
print("Starter Bot connected and running!")
# Read bot's user ID by calling Web API method `auth.test`
starterbot_id = slack_client.api_call("auth.test")["user_id"]
while True:
parsed_event = None
for event in slack_client.rtm_read():
parsed_event = parse_event(event)
if parsed_event:
handle_event(parsed_event)
time.sleep(RTM_READ_DELAY)
else:
print("Connection failed. Exception traceback printed above.")
|
fluffynuts/scripts
|
audtool-play-pause.py
|
import subprocess
import os
def play_pause():
file_path = os.path.realpath(__file__)
containing_dir = os.path.dirname(file_path)
aud_tool = os.path.join(containing_dir, "audtool.exe")
if not os.path.isfile(aud_tool):
print("please copy this script into the folder containing audtool.exe")
exit(1)
status = fetch_status(aud_tool)
if status == "stopped" or status == "paused":
subprocess.check_output([ aud_tool, "--playback-play" ])
elif status == "playing":
subprocess.check_output([ aud_tool, "--playback-pause" ])
next_status = fetch_status(aud_tool)
print("%s -> %s" % (status, next_status))
def fetch_status(aud_tool):
byte_string = subprocess.check_output([ aud_tool, "--playback-status"])
raw_string = byte_string.decode().strip()
return raw_string
if __name__ == "__main__":
play_pause()
|
fluffynuts/scripts
|
colors.py
|
<gh_stars>1-10
#!/usr/bin/python
#print table header
import sys
colw = 10
bg_start = 40
bg_end = 48
fg_start = 30
fg_end = 38
print("Terminal color code lookup chart")
print(" \033[2;37;40m* add leading '\\033' and trailing 'm' to text in desired block\033[0m")
print(" \033[2;37;40m* remember to terminate with '\\033[0m'\033[0m")
for fg in range(fg_start, fg_end, 1):
for mod in [2, 3, 1, 4]:
#line = str(fg)
#while ((len(line) % colw) != 0):
# line += " "
line = ""
for bg in range(bg_start, bg_end, 1):
line += "\033[" + str(mod) + ";" + str(fg) + ";" + str(bg) + "m"
tmp = " " + str(mod) + ";" + str(fg) + ";" + str(bg)
while(len(tmp) < colw):
tmp += " "
line += tmp
line += "\033[0m"
print(line)
|
fluffynuts/scripts
|
DropboxPushbulletPusher.py
|
#!/usr/bin/python
import sys
import os
from pushbullet import Pushbullet
import ConfigParser
import dropbox
class Pusher:
def __init__(self):
self._loadConfig()
def _loadConfig(self):
home = os.path.expanduser('~')
self._secretsFile = os.path.join(home, 'secrets.ini')
self._loadConfigFile()
def _loadConfigFile(self):
cfg = ConfigParser.ConfigParser()
cfg.read(self._secretsFile)
self._initDropbox(cfg)
self._initPushbullet(cfg)
def _initPushbullet(self, cfg):
section = 'pushbullet'
self._pbToken = cfg.get(section, 'token')
self._pbDevice = int(cfg.get(section, 'device'))
def _initDropbox(self, cfg):
section = 'dropbox'
self._key = cfg.get(section, 'key')
self._secret = cfg.get(section, 'secret')
if cfg.has_option(section, 'token'):
self._token = cfg.get(section, 'token')
else:
self._token = self._authorize()
self._setPersistentToken(cfg, section, self._token)
def _setPersistentToken(self, cfg, section, token):
cfg.set(section, 'token', token)
fp = open(self._secretsFile, 'w')
fp.truncate()
cfg.write(fp)
def _authorize(self):
flow = dropbox.client.DropboxOAuth2FlowNoRedirect(self._key, self._secret)
authorize_url = flow.start()
print '1. Go to: ' + authorize_url
print '2. Click "Allow" (you might have to log in first)'
print '3. Copy the authorization code.'
auth_token = raw_input("Enter the authorization code here: ").strip()
access_token, user_id = flow.finish(auth_token)
print(access_token)
print(user_id)
return access_token
def push(self, path):
url = self._pushToDropbox(path)
self._pushToPushbullet(url)
def _pushToDropbox(self, path):
client = dropbox.client.DropboxClient(self._token)
print('uploading %s...' % (path))
uploadName = os.path.basename(path)
with open(path, 'r') as fp:
response = client.put_file(uploadName, fp, overwrite=True)
print('upload complete!')
m = client.metadata(uploadName)
return client.share(m['path'])['url']
def _pushToPushbullet(self, url):
pb = Pushbullet(self._pbToken)
device = pb.devices[self._pbDevice]
device.push_note('New ROM!', url)
if __name__ == '__main__':
pusher = Pusher()
for arg in sys.argv[1:]:
print('%s:%s' % (arg, pusher.push(arg)))
|
fluffynuts/scripts
|
cputemp-xml.py
|
<filename>cputemp-xml.py
#!/usr/bin/python
import os
import sys
import xml.dom.minidom
def all_numeric(s):
for c in s:
if "01234567890".count(c) == 0:
return False
return True
if __name__ == "__main__":
out = ""
if len(sys.argv[1:]):
out = sys.argv[1]
doc = xml.dom.minidom.Document()
docEl = doc.createElement("cputemp")
doc.appendChild(docEl);
for line in os.popen("sensors"):
while line.count(" ") > 0:
line = line.replace(" ", " ")
parts = line.split(" ")
if parts[0].strip("0123456789") != "Core":
continue
el = doc.createElement(parts[0])
docEl.appendChild(el)
tmp = parts[2]
temp = ""
for c in tmp:
if ".1234567890".count(c) > 0:
temp += c
t = doc.createTextNode(temp)
el.appendChild(t)
x = doc.toprettyxml()
if len(out) == 0:
print(x)
else:
open(out, "w").write(x)
|
fluffynuts/scripts
|
flatten_fs.py
|
<reponame>fluffynuts/scripts
#!/usr/bin/python
import sys
import os
import re
class Flattener:
def __init__(self, basedir = None):
self.basedir = basedir
self.blankstr = 75 * " "
def status(self, s):
if (len(s) > 72):
s = s[:72] + "..."
sys.stdout.write("\r%s\r%s" % (self.blankstr, s))
sys.stdout.flush()
def ireplace(self,s,old,new,count=0):
''' Behaves like string.replace(), but does so in a case-insensitive
fashion. (scraped from
http://www.noogz.net/website/blog/programming/20080327-StringIRep.html)'''
pattern = re.compile(re.escape(old),re.I)
return re.sub(pattern,new,s,count)
def flatten(self, dir = None):
if dir == None:
dir = self.basedir
if dir == None:
return False
contents = self.ls_R(dir)
dirlen = len(dir)
if dirlen > 0 and dir[-1] != os.sep:
dirlen += 1
dirs = []
i = 0
for f in contents:
if os.path.isdir(f):
dirs.append(f)
continue
rel = f[dirlen:]
#parts = os.path.split(rel)
parts = rel.split(os.sep)
basedir = parts[0]
# try to remove the artist name from the file -- it's
# in the dirname
parts[-1] = self.ireplace(parts[-1], basedir + " - ", "")
if len(parts) > 1:
# remove the album name from the file name if it's there
parts[-1] = self.ireplace(parts[-1], parts[-2] + " - ", "")
fname = " # ".join(parts[1:])
newname = os.path.join(dir, basedir, fname)
if newname != f:
print("%s\n -> %s" % (f, newname))
os.rename(f, newname)
if os.path.splitext(newname)[-1].lower() == ".ogg":
self.convert_ogg_to_mp3(newname)
basedirs = os.listdir(dir)
for i in range(len(basedirs)):
basedirs[i] = os.path.join(dir, basedirs[i])
for d in sorted(dirs, reverse=True):
if not os.path.isdir(d):
continue
if basedirs.count(d) > 0:
continue
if len(os.listdir(d)) == 0:
print("del: %s" % (d))
os.rmdir(d)
else:
print("%s not empty!" % (d))
return True
def convert_ogg_to_mp3(self, file):
tmpfile = os.path.join("/tmp", os.path.basename(file) + ".raw")
if os.system("oggdec \"%s\" -o \"%s\"" % (file, tmpfile)):
print(" -- Can't convert %s to mp3: oggdec fails" % (file))
return
if os.system("lame --preset hifi \"%s\" \"%s\"" % (tmpfile, os.path.splitext(file)[0] + ".mp3")):
print(" -- Can't convert %s to mp3: lame fails" % (file))
return
os.remove(tmpfile)
os.remove(file)
def ls_R(self, dir, include_dirs=False):
stack = [dir]
ret = []
items = 0
while stack:
thisdir = stack.pop(0)
for f in sorted(os.listdir(thisdir)):
items += 1
if items % 100 == 0:
self.status("Listing directory contents... %i" % (items))
path = os.path.join(thisdir, f)
if os.path.isdir(path):
ret.append(path)
stack.append(path)
continue
ret.append(path)
self.status("")
return sorted(ret)
if __name__ == "__main__":
f = Flattener()
for arg in sys.argv[1:]:
if os.path.isdir(arg):
f.flatten(arg)
else:
print("%s: dir not found" % (arg))
|
fluffynuts/scripts
|
uptime.py
|
import sys
import os
from datetime import datetime
class Uptime:
def printUptime(self):
lastbootstr = self._getLastBootStr()
uptimeDelta = self._getUptimeDelta(lastbootstr)
self._printUptime(uptimeDelta)
def _printUptime(self, tdelta):
printstring = self._getPrintString(tdelta)
print(printstring)
def _getPrintString(self, tdelta):
seconds = self._zeroPad(tdelta.seconds % 60)
minutes = self._zeroPad((tdelta.seconds % 3600) / 60)
hours = tdelta.seconds / 3600
return "Uptime: %i days, %i hours, %s minutes, %s seconds" % (tdelta.days, hours, minutes, seconds)
def _zeroPad(self, i):
s = str(i)
prepend = []
while len(s) < 2:
s = "0" + s
return s
def _getUptimeDelta(self, timeString):
lastboot = datetime.strptime(timeString, "%Y-%m-%d %H:%M:%S %p")
return datetime.now() - lastboot
def _getLastBootStr(self):
for line in os.popen("net statistics workstation"):
parts = filter(None, line.split(" "))
if parts[0] == "Statistics":
return (" ".join([parts[2], parts[3], parts[4]])).strip()
return None
if __name__ == "__main__":
Uptime().printUptime()
|
fluffynuts/scripts
|
PushbulletMessage.py
|
#!/usr/bin/python
import sys
import os
from pushbullet import Pushbullet
import ConfigParser
import dropbox
class MessagePusher:
def __init__(self):
self._loadConfig()
def _loadConfig(self):
home = os.path.expanduser('~')
self._secretsFile = os.path.join(home, 'secrets.ini')
self._loadConfigFile()
def _loadConfigFile(self):
cfg = ConfigParser.ConfigParser()
cfg.read(self._secretsFile)
self._initPushbullet(cfg)
def _initPushbullet(self, cfg):
section = 'pushbullet'
self._pbToken = cfg.get(section, 'token')
self._pbDevice = int(cfg.get(section, 'device'))
def push(self, subject, message):
self._pushToPushbullet(subject, message)
def _pushToPushbullet(self, subject, message):
pb = Pushbullet(self._pbToken)
device = pb.devices[self._pbDevice]
device.push_note(subject, message)
if __name__ == '__main__':
if len(sys.argv[1:]) < 2:
print("Usage: %s <subject> <message>" % (sys.argv[0]))
sys.exit(1)
pusher = MessagePusher()
pusher.push(sys.argv[1], sys.argv[2])
|
fluffynuts/scripts
|
update-cm.py
|
#!/usr/bin/python
import os
import shutil
from PushbulletMessage import MessagePusher
store_path = '/mnt/dump/install-src/cm12-i9300/rom'
serve_path = '/var/www/html/cm12'
urlBase = 'http://192.168.1.100/cm12/'
exe = 'code/xda-rom-downloader/source/xda-rom-downloader/bin/Debug/xda-rom-downloader.exe'
home = os.path.expanduser('~')
exe = os.path.join(home, exe)
status = os.system('%s -o %s' % (exe, store_path))
if status == 0:
stored = os.listdir(store_path)
served = os.listdir(serve_path)
to_copy = list(set(stored) - set(served))
for newFile in to_copy:
print('pushing message about: ' + newFile)
shutil.copyfile(os.path.join(store_path, newFile), os.path.join(serve_path, newFile))
localUrl = urlBase + newFile
pusher = MessagePusher()
pusher.push("New ROM available", localUrl)
|
fluffynuts/scripts
|
vacuum_ff.py
|
#!/usr/bin/python
import os
import sys
import sqlite3
import ConfigParser
def pad(s, w = 60):
while (len(s) < w):
s += " "
return s
def vacuum(db):
sys.stdout.write(pad(" Vacuuming %s" % (os.path.basename(db))))
sys.stdout.flush()
try:
c = sqlite3.connect(db)
c.executescript("vacuum;")
sys.stdout.write("(ok)\n")
except Exception, e:
sys.stdout.write("(fail)\n")
print(str(e))
def vacuum_profile(ppath):
for f in os.listdir(ppath):
if os.path.splitext(f)[1] == ".sqlite":
f = os.path.join(ppath, f)
vacuum(f)
if __name__ == "__main__":
if (sys.platform == "win32" or sys.platform == "win64"):
pbase = os.path.join(os.path.expanduser("~"), "Application Data", "Mozilla", "Firefox")
else:
pbase = os.path.join(os.path.expanduser("~"), ".mozilla", "firefox")
pini = os.path.join(pbase, "profiles.ini")
pcfg = ConfigParser.ConfigParser()
pcfg.readfp(open(pini, "r"))
pidx = 0
while True:
psection = ("Profile%i" % pidx)
if not pcfg.has_section(psection):
break
pname = pcfg.get(psection, "name")
ppath = pcfg.get(psection, "path").replace("/", os.sep)
ppath = os.path.join(pbase, ppath)
print("Profile: %s" % pname)
vacuum_profile(ppath)
pidx += 1
|
fluffynuts/scripts
|
shuf.py
|
<gh_stars>1-10
#!/usr/bin/python
import sys
import random
import os
import re
def main(args):
last_arg = ""
limit = None
for arg in args:
if last_arg == "-n":
limit = int(arg)
last_arg = ""
if arg == "-n":
last_arg = arg
if not sys.stdin.isatty():
all_lines = list(map(strip_newlines, sys.stdin.readlines()))
else:
all_lines = []
for arg in args:
if os.path.isfile(arg):
fp = open(arg, "r")
for line in fp.readlines():
all_lines.append(strip_newlines(line))
fp.close()
random.shuffle(all_lines)
if limit is None:
limit = len(all_lines)
for i in range(0, limit):
print(all_lines[i])
def strip_newlines(s):
return re.sub(r"(\n|\r\n)$", "", s)
main(sys.argv[1:])
|
fluffynuts/scripts
|
dircmp.py
|
<filename>dircmp.py
#!/usr/bin/python
# vim: expandtab shiftwidth=2 tabstop=2
import os
import sys
try:
import psyco
except:
pass
import time
blankstr = ""
ls_label = ""
def ls_R(dir):
ls = [dir]
os.path.walk(dir, walk_cb, ls)
ls = ls[1:]
sys.stdout.write("\r" + blankstr + "\r")
sys.stdout.flush()
ls.sort()
return ls
def walk_cb(ls, dirname, fnames):
global ls_label
d = dirname[len(ls[0]):]
master_len = len(ls[0]) + 1
for f in fnames:
fpath = os.path.join(dirname, f)
if os.path.isfile(fpath):
ls.append(fpath[master_len:])
items = len(ls) - 1
if items % 100 == 0:
sys.stdout.write("\r" + blankstr + "\r%s : %s files found..." %(ls_label, str(items)))
sys.stdout.flush()
def usage():
print("Usage: " + os.path.basename(sys.argv[0]) + " {dir1} {dir2}")
sys.exit(0)
def get_hr_time(t):
t = int(t)
min = str(t / 60)
secs = str(t % 60)
if len(min) < 2:
min = "0" + min
if len(secs) < 2:
secs = "0" + secs
return min + ":" + secs
def get_new(label, reference, compare):
global blankstr
ret = []
idx = 0
count = len(compare)
for f in compare:
idx += 1
if reference.count(f) == 0:
ret.append(f)
if idx % 100 == 0:
perc = (idx * 100.0) / count
status("%s: %i%%" % (label, int(perc)))
ret.sort()
return ret
def get_removed(label, reference, compare):
global blankstr
ret = []
idx = 0
count = len(reference)
for f in reference:
idx += 1
if compare.count(f) == 0:
ret.append(f)
if idx % 100 == 0:
perc = (idx * 100.0) / count
status("%s: %i%%" % (label, int(perc)))
return ret
def clear_line():
global blankstr
sys.stdout.write("\r%s\r" % blankstr)
def print_list(label, ref_list, dlist, out_file):
if len(dlist) == 0:
return
if len(out_file):
try:
fp = open(out_file, "ab")
fp.write("========\n" + label + "========\n")
for f in dlist:
fp.write(" %s\n" % (f))
fp.close()
except Exception, e:
print(str(e))
else:
print(label)
for f in dlist:
print(" %s" % (f))
def status(s):
sys.stdout.write("\r%s\r%s" % (blankstr, s))
sys.stdout.flush()
def main():
global blankstr
blankstr = ""
if len(sys.argv) == 0:
usage()
for i in range(76):
blankstr += " "
left_dirs = []
right_dirs = []
last_arg = ""
out_file = ""
for arg in sys.argv[1:]:
if ["-l", "-r", "-o"].count(arg) > 0:
last_arg = arg
continue
if last_arg == "-o":
out_file = arg
try:
fp = open(out_file, "wb")
fp.close()
except Exception, e:
print(str(e))
if os.path.isdir(arg):
if last_arg == "":
if len(left_dirs) == 0:
left_dirs.append(arg)
continue
if len(right_dirs) == 0:
right_dirs.append(arg)
continue
print("Left and right dirs already defined; for multiple dirs use")
print(" -l <dir> <dir> ... -r <dir> <dir>")
sys.exit(1)
if last_arg == "-r":
right_dirs.append(arg)
continue
if last_arg == "-l":
left_dirs.append(arg)
if len(left_dirs) == 0:
print("No left dir(s) defined")
sys.exit(1)
if len(right_dirs) == 0:
print("No right dir(s) defined")
sys.exit(1)
start = time.time()
left_list = []
right_list = []
if len(left_dirs) > 0:
s = "s"
else:
s = ""
print("\r%s\rListing contents under left dir%s" % (blankstr, s))
global ls_label
for d in left_dirs:
ls_label = "Scanning %s" % d
tmp = ls_R(d)
if len(tmp) > 0:
left_list.extend(tmp)
if len(right_dirs) > 0:
s = "s"
else:
s = ""
print("Left dirs scanned: %i files found" % len(left_list))
status("Listing contents under right dir%s" % (s))
for d in right_dirs:
ls_label = "Scanning %s" % d
tmp = ls_R(d)
if len(tmp) > 0:
right_list.extend(tmp)
clear_line()
print("Right dirs scanned: %i files found" % len(right_list))
right_new = get_new("-> Calculating missing from left", left_list, right_list)
clear_line()
print_list("Missing from left:", right_dirs, right_new, out_file)
left_new = get_new("-> Calculating missing from right", right_list, left_list)
clear_line()
print_list("Missing from right:", left_dirs, left_new, out_file)
if len(left_new) + len(right_new) == 0:
print("left and right match up!")
else:
print("%i files missing from right" % len(left_new))
print("%i files missing from left" % len(right_new))
runtime = int(time.time() - start);
print("Run time: " + get_hr_time(runtime))
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print("\n (Aborted)")
except Exception, e:
print("\n FAIL: %s" % str(e))
|
fluffynuts/scripts
|
umount_v30.py
|
<filename>umount_v30.py
#!/usr/bin/python
import os
import sys
import pygtk
pygtk.require("2.0")
import pynotify
import time
try:
import psyco
psyco.full()
except:
pass
def fix_series_similar(dir):
if not os.path.isdir(dir):
return False
files = os.listdir(dir)
pre = ""
idx = 1
while True:
for f in files:
path = os.path.join(dir, f)
if os.path.isfile(path):
pre = f[:idx]
break
if len(pre) == 0:
break
break_out = False
for f in files:
try:
if f.index(pre) != 0:
pre = pre[:len(pre)-1]
break
break_out = True
except Exception, e:
pre = pre[:len(pre)-1]
break_out = True
if break_out:
break
idx += 1
if (len(pre) == 0):
# nothing to do and no error
return True
for f in files:
path = os.path.join(dir, f)
if not os.path.isfile(path):
continue
newpath = os.path.join(dir, f[len(pre):])
try:
print("renaming '" + path + "' to '" + newpath + "'")
os.rename(path, newpath)
except Exception, e:
print("Can't rename '" + path + "' to '" + newpath + "':" + str(e))
return False
return True
def fix_series_dash(dir):
print dir
if not os.path.isdir(dir):
return False
pre = ""
for f in os.listdir(dir):
if not os.path.isfile(os.path.join(dir, f)):
continue
parts = f.split(" - ")
if len(parts) < 2:
return False
if pre == "":
pre = parts[0]
print("Looking for pre '" + pre + "'")
continue
if pre != parts[0]:
print("'" + f + "' doesn't have pre '" + pre + "'")
return False
pre += " - "
l = len(pre)
for f in os.listdir(dir):
src = os.path.join(dir, f)
if os.path.isdir(src):
continue
dst = os.path.join(dir, f[l:])
if os.path.isfile(dst):
continue
try:
os.rename(src, dst)
except Exception, e:
print("Can't rename '" + src + "' to '" + dst + "':" + str(e))
return False
return True
def fix_series_space(dir):
if not os.path.isdir(dir):
return False
pre = ""
for f in os.listdir(dir):
if not os.path.isfile(os.path.join(dir, f)):
continue
parts = f.split(" ")
if len(parts) < 2:
return False
head = " ".join(parts[:-1])
if pre == "":
pre = head
print("Looking for pre '" + pre + "'")
continue
if pre != head:
print("'" + f + "' doesn't have pre '" + pre + "'")
return False
l = len(pre)
for f in os.listdir(dir):
src = os.path.join(dir, f)
if os.path.isdir(src):
continue
dst = os.path.join(dir, f[l:].strip())
if os.path.isfile(dst):
print(dst + " already exists!")
try:
print("renaming '" + src + "' to '" + dst + "'")
os.rename(src, dst)
except Exception, e:
print("Can't rename '" + src + "' to '" + dst + "':" + str(e))
return False
return True
if __name__ == "__main__":
did_something = False
notify_init = pynotify.init("V30 unmounter")
for line in os.popen("mount"):
parts = line.split(" ")
dev = parts[0]
mount = parts[2].lower()
if mount.find('/media/v30') > -1:
did_something = True
if notify_init:
n = pynotify.Notification("Series fix...", "Scanning for series to shorten names for 8.3 pathing")
n.set_timeout(30000)
n.show()
fix_series_similar(parts[2])
#if not fix_series_dash(parts[2]):
# fix_series_space(parts[2])
if notify_init:
n.close()
print("Unmounting " + dev)
if notify_init:
n.set_timeout(100000)
n = pynotify.Notification("Unmounting", "Unmounting V30 device; please be patient as data is written to disk")
n.show()
ret = os.system("umount " + dev)
if notify_init:
n.close()
if ret == 0:
if notify_init:
n = pynotify.Notification("Sorting FAT", "Sorting FAT filesystem for stupid readers...")
n.show()
os.system("fatsort -c " + dev)
if notify_init:
n.close()
n = pynotify.Notification("V30 unmounted", "It is safe to remove your V30 stokkie now!")
n.show()
if not did_something:
if notify_init:
n = pynotify.Notification("Nothing to do", "No V30 mounts found; nothing to do! Remember, this is a silly script specifically designed for the V30 stokkies that we use for the divx-capable media player...")
n.show()
|
fluffynuts/scripts
|
smartsaver.py
|
<reponame>fluffynuts/scripts
#!/usr/bin/python
#vim: expandtab
import sys
import os
import glob
import time
class SmartSaver:
def __init__(self):
self.watch_cmds = ["/usr/bin/mplayer", "/usr/bin/vlc"]
#self.watch_cmds = ["gmplayer"]
self.interval = 15
self.proc_cache = dict()
self.tmpprocs = []
os.system("renice 5 " + str(os.getpid()))
def run_cmd(self, cmd):
if os.system(cmd):
print "cmd fails: '" + cmd + "'"
def poke_screensaver(self):
print "Poking screensaver"
if self.is_running("gnome-screensaver"):
self.run_cmd("gnome-screensaver-command -p")
if self.is_running("xscreensaver"):
self.run_cmd("xscreensaver-command -deactivate")
self.run_cmd("xset -dpms")
def is_running(self, cmd):
pids = []
self.tmpprocs = glob.glob("/proc/*")
self.tmpprocs.reverse()
for f in self.tmpprocs:
d = os.path.split(f)[1]
try:
pid = int(d)
#print "Examining pid:", pid
if self.proc_cache.has_key(pid):
if self.proc_cache[pid] == cmd:
#print "Using cached value for pid:", pid, "(" + cmd + ")"
print cmd, "is running (cached)"
return True
else:
continue
if d == 10723 and cmd == 'vlc':
print('foo')
for line in open(os.path.join(f, "cmdline"), "r"):
parts = line.split("\x00")
self.proc_cache[pid] = parts[0]
# look for the command in question (direct bin run)
if parts[0] == cmd or os.path.split(parts[0])[1] == cmd:
print cmd, "is running (1)"
self.proc_cache[pid] = cmd
return True
# look for the command in question as a script
# (not required for my purposes, but may be useful
# at a later stage)
if len(parts) > 1 and\
(parts[1] == cmd or\
os.path.split(parts[1])[1] == cmd):
for idx in range(1, len(parts) - 1):
if os.path.isfile(parts[idx]):
for line in open(parts[idx], "r"):
if line.find("#!") == 0:
iparts = line.split(" ")
interp = iparts[0][2:]
if os.path.isfile(interp):
print(cmd, "is running (2)")
self.proc_cache[pid] = cmd
return True
else:
self.proc_cache[pid] = parts[idx]
break
pids.append(pid)
except Exception as ex:
pass
print(cmd + " is not running")
return False
def trim_cache(self, pids = []):
if len(pids) == 0:
for f in self.tmpprocs:
d = os.path.split(f)[1]
try:
pid = int(d)
pids.append(pid)
except:
pass
for pid in self.proc_cache.keys():
if pids.count(pid) == 0:
del self.proc_cache[pid]
def watch(self):
last_poked = False
while True:
print("")
poked = False
self.tmpprocs = []
for cmd in self.watch_cmds:
print('checking on: ' + cmd)
if self.is_running(cmd):
self.poke_screensaver()
poked = True
break
if last_poked and not poked:
# re-enable the dpms features
os.system("xset +dpms")
os.system("xset dpms 1200 1500 1800")
last_poked = poked
time.sleep(self.interval)
self.trim_cache()
if __name__ == "__main__":
s = SmartSaver()
s.watch()
|
fluffynuts/scripts
|
gen_toxvid_makefile.py
|
<filename>gen_toxvid_makefile.py
#!/usr/bin/python
# purpose: to generate a makefile wrapper around the toxvid shell script that I wrote
# which converts one or more video files into a single output video file with
# the xvid video codec. The problem with this script is that it uses mencoder
# which doesn't take advantage of multiple cores -- and I have 4 now, so I want
# to encode at least 3 times faster (perhaps leave one core open for the machine)
# -- this would be trivial if we had a Makefile for each toxvid job -- hence this
# script
import os
import sys
def usage():
print("Usage: %s [source folder] [destination folder]", os.path.basename(sys.argv[0]))
print(" generates a Makefile in [source folder] with toxvid jobs")
print(" for each video in that folder, outputting to [destination folder]")
print(" NOTE 1: [source folder] and [desintation folder] cannot be the same")
print(" NOTE 2: this script will attempt to just copy files which are already")
print(" in xvid/divx format")
sys.exit(0)
if __name__ == "__main__":
src = None
dst = None
for arg in sys.argv[1:]:
if src == None:
src = arg
continue
if dst == None:
dst = arg
continue
usage()
if src == None or dst == None:
usage()
makefile = os.path.join(src, "Makefile")
fp = None
try:
fp = open(makefile, "w")
except Exception as e:
print("Unable to open %s: %s", makefile, str(e))
sys.exit(1)
# targets will just have short names with an index
idx = 0
# maintain dictionary of targets
targets = dict()
conversions = 0
copies = 0
for f in os.listdir(src):
srcfile = os.path.join(src, f)
if not os.path.isfile(srcfile):
continue
# only process video files (by extension)
ext = os.path.splitext(f)[-1][1:].lower()
if ext not in ["avi", "mpg", "mpeg", "ts", "mkv", "mov"]:
print("(ignoring file: %s)" % (srcfile))
continue
sys.stdout.write("%s: " % (f))
sys.stdout.flush()
dstfile = os.path.join(dst, os.path.splitext(f)[0] + ".avi")
convert = True
#if True:
try:
pfp = os.popen("file \"%s\"" % srcfile)
fdata = pfp.read()
pfp.close()
pos = fdata.index("video:")
fdata = fdata[pos + len("video:"):]
pos = fdata.index("audio:")
vcodec = fdata[:pos].strip(" ,").lower()
fdata = fdata[pos + len("audio:")]
acodec = fdata[pos:].strip(" \n,").lower()
if vcodec in ["divx", "xvid"]:
convert = False
except:
#else:
# if there's a problem reading the data, fall back on re-encode
pass
if convert:
print("convert")
cmd = "toxvid -o \"%s\" \"%s\"" % (dstfile, srcfile)
conversions += 1
else:
print("copy")
cmd = "cp \"%s\" \"%s\"" % (srcfile, dstfile)
copies += 1
targets["target%i" % (idx)] = ["@if test ! -e \"%s\"", "then mkdir -p %s" % (dst), "%s" % (cmd), "fi"]
idx += 1
# generate output makefile
t = list(targets.keys())
t.sort()
fp.write("# generated by %s\n\nall: %s cleanup\n\n" % (os.path.basename(sys.argv[0]), " ".join(t)))
fp.write("\n\ncleanup:\n\t@rm \"%s\"\n" % (makefile))
for k in t:
fp.write("\n%s:\n\t%s\n" % (k, ";\\\n\t".join(targets[k])))
fp.close()
s1 = "s"
if conversions == 1:
s1 = ""
s2 = "s"
if copies == 1:
s2 = ""
print("Makefile generated at %s" % (makefile))
print(" %i conversion%s %i copie%s" % (conversions, s1, copies, s2))
|
fluffynuts/scripts
|
hddtemp-xml.py
|
#!/usr/bin/python
import os
import sys
import xml.dom.minidom
def all_numeric(s):
for c in s:
if "01234567890".count(c) == 0:
return False
return True
def get_temp(drive):
cmd = "/usr/sbin/hddtemp " + drive
fp = os.popen(cmd, "r")
ret = ""
line = fp.readline()
in_temp = False
idx = -1
nums = ".0123456789"
while (len(line) + idx) > 0:
if nums.find(line[idx]) > -1:
break
idx -= 1
while (len(line) + idx) > 0:
if nums.find(line[idx]) > -1:
ret = line[idx] + ret
else:
break
idx -= 1
return ret
if __name__ == "__main__":
out = ""
drives = []
lastarg = ""
for arg in sys.argv[1:]:
if ["-o"].count(arg) > 0:
lastarg = arg
elif lastarg == "-o":
out = arg
lastarg = ""
elif lastarg == "":
if arg[:5] != "/dev/" and not os.path.exists(arg):
arg = "/dev/" + arg
drives.append(arg)
else:
print("Unrecognised argument '" + arg + "'")
sys.exit(1)
if len(drives) == 0:
print("You must specify at least one drive on the commandline")
sys.exit(1)
doc = xml.dom.minidom.Document()
docEl = doc.createElement("hddtemp")
doc.appendChild(docEl);
for d in drives:
temp = get_temp(d)
if len(temp) == 0:
print("Can't get temp for '" + d + "'; skipping");
continue
el = doc.createElement(os.path.basename(d))
docEl.appendChild(el)
t = doc.createTextNode(temp)
el.appendChild(t)
x = doc.toprettyxml()
if len(out) == 0:
print(x)
else:
open(out, "w").write(x)
|
fluffynuts/scripts
|
get-latest-chromium.py
|
#!/usr/bin/python
# determine major version of the python engine:
# this script was originall written for py 2.6
# but adapted for 3.1; I'd like both to work
import sys
pyver = int(sys.version.split(" ")[0].split(".")[0])
import time
import urllib
if pyver == 3:
import urllib.request
else:
import urllib2
import os
import datetime
import zipfile
import shutil
import traceback
import platform
start = datetime.datetime.now()
last_transferredK = 0
min_chunk = 1024
max_chunk = 65536
chunk = min_chunk
timeout = 180
blank = ""
BASE_OUT = os.path.expanduser("~") # default "temp" dir is user's home
if os.name == "posix":
OUT = "chrome-linux.zip"
if platform.machine().find("64") > -1:
BASE = "http://build.chromium.org/buildbot/snapshots/chromium-rel-linux-64/"
else:
BASE = "http://build.chromium.org/buildbot/snapshots/chromium-rel-linux/"
elif os.name == "mac":
BASE = "http://build.chromium.org/buildbot/snapshots/chromium-rel-mac/"
OUT = "chrome-mac.zip"
else:
BASE = "http://build.chromium.org/buildbot/snapshots/chromium-rel-xp/"
OUT = "chrome-win32.zip"
if pyver == 3:
class ResumableDownloader(urllib.request.FancyURLopener):
def http_error_206(self, url, fp, errcode, errmsg, headers, data=None):
pass
else:
class ResumableDownloader(urllib.FancyURLopener):
def http_error_206(self, url, fp, errcode, errmsg, headers, data=None):
pass
for i in range(75):
blank += " "
def msg(s):
sys.stdout.write(s)
sys.stdout.flush()
def get_dl_secs():
global start
td = datetime.datetime.now() - start
return td.seconds + (td.microseconds / 1000000.0)
def get_Kps(transferred, size):
global last_transferredK, start
secs = get_dl_secs()
transferredK = (float(transferred) / 1024.0) - last_transferredK
if last_transferredK != 0:
start = datetime.datetime.now()
last_transferredK += transferredK
if (secs > 0):
transferredKS = (float(transferredK) / float(secs))
if (size > 0):
eta_secs = (float(size - transferred) / 1024.0) / transferredKS
eta_min = eta_secs / 60
eta_secs = eta_secs % 60
ETA = "%02i:%02i" % (eta_min, eta_secs)
return "%.2f KB/s ETA: %s" % (transferredKS, ETA)
else:
return "%.2f KB/s" % transferredKS
else:
return ""
def clearline():
global blank
sys.stdout.write("\r" + blank + "\r")
def status1(val):
secs = get_dl_secs()
clearline()
msg("Downloaded: %i K (%s)" % ((val / 1024), get_Kps(val, 0)))
def status2(read, size):
global start
clearline()
perc = "%.2f" % ((float(read) * 100.0) / float(size))
msg("Downloaded: %i / %i K [%s %%] (%s)" % (read/1024, size/1024, perc, get_Kps(read, size)))
def unpack(src, dst, theme):
if (dst == ""):
return
if not os.path.isdir(dst):
try:
os.mkdir(dst)
except Exception as e:
print("Can't extract to '" + dst + "': " + str(e))
return
msg("Extracting to " + dst)
try:
z = zipfile.ZipFile(src)
for f in z.namelist():
z.extract(f, sys.argv[1])
z.close()
print(" (ok)")
except Exception as e:
sys.stdout.write(" (fail)\n")
print("(" + str(e) + ")")
if (len(theme) > 0):
msg("Setting theme to '" + theme + "'")
theme_dll = os.path.join(dst, "chrome-win32", "themes", theme)
theme_dll += ".dll"
if not os.path.isfile(theme_dll):
print("Can't find '" + theme_dll + "'; install your theme here first and re-run")
return
old_dll = os.path.join(dst, "chrome-win32", "themes", "default.dll")
try:
dst = old_dll + ".bak"
if os.path.isfile(dst):
os.remove(dst)
shutil.copyfile(old_dll, dst)
shutil.copyfile(theme_dll, old_dll)
print(" (ok)")
except Exception as e:
print("Unable to set theme: " + str(e))
def usage():
print("Usage: " + os.path.basename(sys.argv[0]) + " {<extract dir>} {-t <theme name>} {-i <intermediatary dir}")
print(" <extract dir> is a dir to optionally extract downloaded archive to")
print(" <theme name> is the name of a theme dll already in the themes dir of")
print(" your extracted chromium (so, first-time around, this will")
print(" b0rk and you will be told where to put your theme dll.")
print(" Just re-run with the same arguments to apply the theme.")
print(" <intermediatary dir> is an optional override for the intermediatary")
print(" dir for operations (default is your home dir:")
print(" " + os.path.expanduser("~"))
print("WARNING: themes seem to quickly become incompatible with the latest chromium")
print(" builds. If you see a lot of red, just re-run without your -t argument to")
print(" revert to the default chromium theme")
def get_ver():
LATEST = BASE + "LATEST"
fail = 0
for i in range(5):
try:
if pyver == 2:
ver = urllib2.urlopen(LATEST, "rb").read().strip()
else:
ver = urllib.request.urlopen(LATEST).read().decode().strip()
return ver
except Exception as e:
time.sleep(1)
print("\nUnable to get LATEST version: " + str(e))
return ""
def update_chrome(known_version = ""):
global chunk,min_chunk,max_chunk,pyver,BASE_OUT,OUT
if ((sys.argv[1:].count("-h") > 0) or (sys.argv[1:].count("-h") > 0)):
usage()
sys.exit(0)
extract_out = ""
theme = ""
lastarg = ""
for arg in sys.argv[1:]:
if arg == "-t" or arg == "-i":
lastarg = arg
else:
if (lastarg == "-i"):
if os.path.isdir(arg):
lastarg = ""
BASE_OUT = arg
else:
print("-i requires a valid intermediatary directory to work")
elif (lastarg == "-t"):
theme = arg
lastarg = ""
elif os.path.isdir(arg):
extract_out = arg
elif os.path.isdir(os.path.dirname(arg)):
try:
os.mkdir(arg)
extract_out = arg
except:
print("Can't find or create dir %s; aborting" % arg)
sys.exit(1)
ver = known_version
if (ver == ""):
msg("Determining latest version...")
ver = get_ver()
if (len(ver) == 0):
sys.exit(1)
print(" (ok)")
LAST = os.path.join(BASE_OUT, ".CHROME-LATEST-VERSION")
if (os.path.isfile(LAST)):
last_dl = open(LAST, "r").read().strip()
if (last_dl == ver):
print(" -> Already have latest version (" + ver + ")")
tmp = os.path.join(BASE_OUT, OUT)
if os.path.isfile(tmp):
unpack(os.path.join(BASE_OUT, OUT), extract_out, theme)
time.sleep(2)
sys.exit(0)
INSTALLER = BASE + ver + "/" + OUT
print("Download starts... (version: " + ver + ")")
print("(url: " + INSTALLER + ")")
offset = 0
attempts = 0
while True:
if attempts > 0:
print("Resuming from %i bytes..." %(offset))
attempts += 1
try:
start = datetime.datetime.now()
if pyver == 2:
fp = urllib2.urlopen(INSTALLER)
headers = fp.headers.headers;
else:
fp = urllib.request.urlopen(INSTALLER)
headers = fp.getheaders()
stLen = 0
resuming = False
for h in headers:
if pyver == 2:
parts = h.strip().split(":")
else:
parts = h
if (parts[0].lower() == "content-length"):
resuming = True
stLen = int(parts[1].strip())
if offset > 0:
fp.close()
r = ResumableDownloader()
r.addheader("Range", "bytes=" + str(offset) + "-")
fp = r.open(INSTALLER)
for h in fp.headers.headers:
parts = h.strip().split(":")
if (parts[0].lower() == "content-length"):
stLen = int(parts[1].strip())
if offset > 0 and not resuming:
offset = 0
stRead = 0
new = ""
oldlen = -1
if offset == 0:
print("Getting %s" % (os.path.join(BASE_OUT, OUT)))
fpout = open(os.path.join(BASE_OUT, OUT) + ".part", "wb")
else:
fpout = open(os.path.join(BASE_OUT, OUT) + ".part", "ab")
if (stLen == 0):
read_bytes = 0;
fail = 0
# can't determine length; just carry on
while (oldlen != read_bytes) and (fail < 5):
oldlen = read_bytes
part = fp.read(chunk)
if len(part) == 0:
fail == 1
time.sleep(50)
continue
read_bytes += len(part)
fpout.write(part)
status1(len(new))
break
else:
fail = 0
while (stRead < stLen):
part = fp.read(chunk)
stRead += len(part)
if len(part) < chunk:
chunk -= min_chunk
if chunk <= 0:
chunk = min_chunk
else:
if chunk < max_chunk:
chunk += min_chunk
fpout.write(part)
if (len(part) == 0):
fail += 1
else:
fail = 0
if (fail > timeout):
newver = get_ver()
if (newver != ver):
print("\nVersion changed upstreeam! Trying again...")
update_chrome()
else:
print("timed out ):")
fpout.close()
fp.close()
offset = os.stat((os.path.join(BASE_OUT, OUT)) + ".part").st_size
break
elif (fail > 0):
time.sleep(1)
status2(stRead, stLen)
if (stRead >= stLen):
break
except Exception as e:
print("Unable to download latest zip (" + INSTALLER + "): " + str(e))
traceback.print_exc()
sys.exit(1)
#print("\nWriting to file...")
#try:
# fp = open(OUT, "wb")
# fp.write(new)
# fp.close()
#except Exception as e:
# print("Unable to save new archive: " + str(e))
# sys.exit(1)
fpout.close()
if os.path.isfile(os.path.join(BASE_OUT, OUT)):
os.remove(os.path.join(BASE_OUT, OUT))
os.rename(os.path.join(BASE_OUT, OUT) + ".part", os.path.join(BASE_OUT, OUT))
print("Chrome archive updated successfully!")
try:
fp = open(LAST, "w")
fp.write(ver)
fp.close()
except Exception as e:
print("WARNING: Unable to save download version to '" + LAST + "': " + str(e))
unpack(OUT, extract_out, theme)
if __name__ == "__main__":
#update_chrome()
#sys.exit(0)
try:
update_chrome()
except KeyboardInterrupt:
print("aborted.")
|
fluffynuts/scripts
|
opts.py
|
#!/bin/false
# CLI options helper
import os
import sys
class Options:
def __init__(self, UsageHeader = "", UnconsumedHelp = ""):
self.options = dict()
# put something like "MyApp V1.2.3.4" here
self.UsageHeader = UsageHeader
# help for trailing arguments which do not have an argument spec, such as an expected URI or filename
self.UnconsumedHelp = UnconsumedHelp
self.cols = self.GetConsoleCols()
self.unconsumed = []
self.AddOpt("-h", "Help for this application", aliases=["-?", "/?"], consumes=0)
self.AddOpt("--help", "More verbose help (including option aliases)", consumes=0)
self.UsageOnBadArg = False
self.UsageOnNoArgs = False
self.ShortUsageHidesAliases = True
def GetConsoleCols(self):
try:
if sys.platform == "win32" or sys.platform == "win64":
# taken from http://code.activestate.com/recipes/440694/
from ctypes import windll, create_string_buffer
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
import struct
(bufx, bufy, curx, cury, wattr, left, top, right, bottom, maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
return right-left -1
else:
return 78
elif sys.platform == "posix":
import struct, fcntl, termios
s = struct.pack("HHHH", 0, 0, 0, 0)
x = fcntl.ioctl(1, termios.TIOCGWINSZ, s)
width = struct.unpack("HHHH", x)[1]
if width < 80:
width = 78
return width -2
else:
return 80
except Exception as e:
print("Can't determine console columns (" + str(e) + "); Defaulting to 80")
return 78
def AddOpt(self, opt, help = "(no help)", aliases = [], consumes = 0,
ConsumesHelp = "", Default="", ShortHelp="", required=False, DataType="string",
PrependShortHelp=True, ValidValues=[], LowerValues=False):
if consumes == 0 and len(ConsumesHelp) > 0:
# Default consumes value to number of items in the help listing
consumes = len(ConsumesHelp.split(" " ))
if consumes != 1 and Default == "":
Default = []
o = self.Option(opt, help, aliases, consumes, ConsumesHelp, Default, ShortHelp, DataType)
o.required = required
o.ValidValues = ValidValues
o.LowerValues = LowerValues
self.options[opt] = o
def _print(self, s, indent = 0):
words = s.split(" ")
line = ""
while (len(words)):
if len(words[0]) + len(line) > self.cols:
print((" " * indent) + line)
line = ""
if len(line) > 0:
line += " "
line += words[0]
words = words[1:]
if len(line):
print((" " * indent) + line)
def GetMaxLHSW(self, SkipAliases=False):
lhsw = 0
opts = self.options.keys()
for o in opts:
self.options[o].cols = self.cols
self.options[o].prepare(SkipAliases)
if self.options[o].lhsw > lhsw:
lhsw = self.options[o].lhsw
if lhsw > (self.cols / 2):
lhsw = self.cols / 2
return lhsw
def Usage(self, Long=False):
# make sure cols are up to date
self.cols = self.GetConsoleCols()
if (len(self.UsageHeader)):
self._print(self.UsageHeader)
tmp = ""
opts = sorted(self.options.keys())
if len(opts):
tmp = " {options}"
self._print("Usage: " + os.path.basename(sys.argv[0]) + tmp + " " + self.UnconsumedHelp)
if len(opts):
self._print("where options are of:", 1)
if Long:
SkipAliases = False
else:
SkipAliases = self.ShortUsageHidesAliases
lhsw = self.GetMaxLHSW(SkipAliases)
for o in opts:
self.options[o].lhsw = lhsw
self.options[o].Usage(Long, SkipAliases=SkipAliases)
if Long:
self._print("data types:", 1)
self._print("bool: yes/no/true/false/1/0", 2)
self._print("string: any characters", 2)
self._print("int: any whole numeric value", 2)
self._print("float: any numeric value", 2)
def BoolToStr(self, b):
if b:
return "True"
else:
return "False"
def Dump(self):
opts = sorted(self.options.keys())
for o in opts:
print(o)
opt = self.options[o]
print(" required: " + self.BoolToStr(opt.required))
print(" selected: " + self.BoolToStr(opt.selected))
print(" value: " + str(opt.value))
print(" Default: " + str(opt.Default))
def CheckType(self, val, opt):
try:
if opt.DataType == "string" or opt.DataType == "str":
return str(val)
if opt.DataType == "int":
return int(val)
if opt.DataType == "float":
return float(val)
if opt.DataType == "file":
if not os.path.isfile(val):
print("Option '%s' requires (an) existing file(s) for data ('%s' is invalid)" % (opt.opt, val))
sys.exit(1)
else:
return val
if opt.DataType == "dir":
if not os.path.isdir(val):
print("Option '%s' requires (an) existing folder(s) for data ('%s' is invalid)" % (opt.opt, val))
sys.exit(1)
else:
return val
if opt.DataType == "bool" or opt.DataType == "boolean":
arg = arg.lower()
if ["yes", "true", "1"].count(arg) > 0:
return True
else:
return False
except Exception as e:
print("Option '" + opt.opt + "' requires an option of type '" + opt.DataType + "'")
sys.exit(1)
raise Exception("Unhandled DataType '" + opt.DataType + "' for option '" + opt.opt + "'")
def ParseArgs(self):
if self.UsageOnNoArgs and len(sys.argv[1:]) == 0:
self.Usage()
return False
# take advantage of python's objects-by-reference schema
last_consumer = None
opts = list(self.options.keys())
for o in self.options.keys():
for a in self.options[o].aliases:
opts.append(a)
for arg in sys.argv[1:]:
if arg == "-h":
self.Usage(False)
sys.exit(0)
if arg == "--help":
self.Usage(True)
sys.exit(0)
if arg == "--":
last_consumer = None
continue
if opts.count(arg) == 0:
if last_consumer == None:
if len(arg) and (arg[0] == "-") and self.UsageOnBadArg:
self._print("Bad option / argument: \"%s\"\n" % arg)
self.Usage()
return False
self.unconsumed.append(arg)
elif (sys.argv[1:].count("--") == 0):
# accept as the start of a new switch if the user has NOT employed
# explicit option argument ending (--)
last_consumer = None
if last_consumer == None:
for opt_key in self.options.keys():
o = self.options[opt_key]
if o.Match(arg):
last_consumer = o
last_consumer.selected = True;
if last_consumer.consumes == 0:
last_consumer = None
found = True
break
else:
if not last_consumer.Valid(arg):
self._print("Bad value for option '%s': %s" % (last_consumer.opt, arg))
self._print("Acceptable values are:")
for v in last_consumer.ValidValues:
self._print(v, 2)
sys.exit(1)
if last_consumer.consumes < 2 and last_consumer.consumes > 0:
last_consumer.SetValue(self.CheckType(arg, last_consumer))
last_consumer = None
else:
if (last_consumer.consumes == -1) or (len(last_consumer.value) < last_consumer.consumes):
last_consumer.AppendValue((self.CheckType(arg, last_consumer)))
else:
self._print("Option '%s' has too many arguments specified" % last_consumer.opt)
self.Usage()
return True
def RequiredMissing(self):
missing = dict()
for o in self.options.keys():
opt = self.options[o]
if opt.required:
if not opt.selected:
missing[o] = opt
elif opt.consumes == 1 and opt.value == "":
missing[o] = opt
elif (opt.consumes > 1) and (opt.consumes != len(opt.value)):
missing[o] = opt
elif (self.options[o].consumes == -1) and (len(opt.value) == 0):
missing[o] = opt
missing_opts = list(missing.keys())
if len(missing_opts):
missing_opts.sort()
if len(missing_opts) == 1:
s = " was"
else:
s = "s were"
errmsg = "\nThe following required option" + s + " not supplied or fully qualified:\n\n"
self.cols = self.GetConsoleCols()
lhsw = self.GetMaxLHSW()
for o in missing_opts:
missing[o].lhsw = lhsw
errmsg += missing[o].Usage(SkipAliases=True, ReturnAsString=True)
print(errmsg)
return True
return False
def validate(self, opt):
opts = list(self.options.keys())
if opts.count(opt) == 0:
raise Exception("Option '" + opt + "' not handled by Options class")
def value(self, opt):
self.validate(opt)
return self.options[opt].value
def selected(self, opt):
self.validate(opt)
return self.options[opt].selected
def selectedOptions(self):
ret = []
for opt in list(self.options.keys()):
if self.selected(opt):
ret.append(opt)
return ret
class Option:
def __init__(self, opt, help, aliases, consumes, ConsumesHelp, Default, ShortHelp,
DataType="string", PrependShortHelp=True):
self.opt = opt
self.DataType = DataType.lower()
self.consumes = consumes
self.aliases = aliases
self.help = help
self.ShortHelp = ShortHelp
self.ConsumesHelp = ConsumesHelp
self.lhsw = 0
self.rhsw = 0
self.cols = 0
self.selected = False
self.required = False
self.newline_before_help = False
self.Default = Default
self.PrependShortHelp = PrependShortHelp
self.ValidValues = []
self.LowerValues = False
self.value = Default
self.indent = 2
def Valid(self, val):
if len(self.ValidValues) == 0:
return True
elif self.LowerValues:
if self.ValidValues.count(val) > 0:
return True
else:
return False
def Match(self, arg):
if self.opt == arg:
return True
for a in self.aliases:
if a == arg:
return True
return False
def AppendValue(self, val):
if self.LowerValues:
self.value.append(val.lower())
else:
self.value.append(val)
def SetValue(self, val):
if self.LowerValues:
self.value = val.lower()
else:
self.value = val
def prepare(self, SkipAliases=False):
self.lhsw = self.leftw(SkipAliases)
self.sanitise_leftw()
def sanitise_leftw(self):
if self.lhsw > (self.cols / 2):
self.lhsw = 0
self.newline_before_help = True
def leftw(self, SkipAliases=False):
"""Returns the minimum colwidth required to display the
LHS of the help for this option"""
self.aliases.sort()
w = len(self.opt) + len(self.ConsumesHelp) + 1
if not SkipAliases:
for a in self.aliases:
ll = len(a) + len(self.ConsumesHelp) + 1
if ll > w:
w = ll
return w + self.indent + 2
def pad(self, s, w, padwith = " "):
while len(s) < w:
s += padwith
return s
def format_rhs(self, s):
lines = []
words = s.split(" ")
current_line = (self.indent * " ") + (" " * self.lhsw)
force_newline = 0
while len(words):
word = words[0].strip("\r")
if word.count("\n") > 0:
parts = word.split("\n")
words = words[1:]
words.insert(0, word[len(parts[0])+1:])
word = parts[0]
words.insert(0, word)
force_newline = 1
elif force_newline == 1:
force_newline = 2
if len(word) and word[0] == "\"" and word.count("\"") == 1:
# try to keep a quoted string together
tmp = [word]
for w in words[1:]:
tmp.append(w)
if w.count("\"") == 1:
break
word = " ".join(tmp)
if len(tmp) == len(words):
words = []
else:
words = words[:len(tmp)]
words = words[:len(tmp)]
if len(word) + len(current_line) >= self.cols or force_newline == 2:
if len(lines) == 0:
current_line = current_line[self.lhsw + self.indent:]
lines.append(current_line)
current_line = (self.indent * " ") + (" " * self.lhsw)
if len(current_line):
current_line += " "
current_line += word
words = words[1:]
if force_newline == 2:
force_newline = 0
if len(lines) == 0:
current_line = current_line[self.lhsw + self.indent:]
if len(current_line.strip()) > 0:
lines.append(current_line)
return "\n".join(lines)
def Usage(self, Long=False, SkipAliases=False, ReturnAsString = False):
"""Prints out usage for this option"""
if self.lhsw == 0:
self.lhsw = self.leftw()
self.sanitise_leftw()
if self.cols == 0:
self.cols = 80
if self.rhsw == 0:
self.rhsw = self.cols - self.lhsw
self.aliases.sort()
s = self.opt
if len(self.ConsumesHelp) > 0:
s += " " + self.ConsumesHelp
s = self.pad(s, self.lhsw - self.indent)
lhspad = "\n" + (" " * (self.lhsw + self.indent))
ret = (" " * self.indent) + s
if not SkipAliases:
sorted_aliases = []
matched = True
dashcount = 1
while (matched):
matched = False
head = dashcount * "-"
sublist = []
for alias in self.aliases:
if alias[:dashcount] == head and alias[dashcount] != "-":
matched = True
sublist.append(alias)
sublist.sort()
sorted_aliases += sublist
dashcount += 1
prepend = []
for a in self.aliases:
if sorted_aliases.count(a) == 0:
prepend.append(a)
prepend.sort()
sorted_aliases = prepend + sorted_aliases
for a in sorted_aliases:
a = (" " * (self.indent + 1)) + a
if len(self.ConsumesHelp):
a += " " + self.ConsumesHelp
ret += "\n" + self.pad(a, self.lhsw)
if (self.newline_before_help):
ret += lhspad
if Long or len(self.ShortHelp) == 0:
if self.PrependShortHelp and len(self.ShortHelp) > 0:
ret += self.format_rhs(self.ShortHelp) + lhspad
formatted_help = self.format_rhs(self.help)
ret += self.format_rhs(self.help)
if Long:
if self.Default == None or self.Default == [] or len(str(self.Default)) == 0:
d = "(None)"
else:
d = str(self.Default)
if self.consumes != 0:
ret += lhspad + self.format_rhs(self.pad(" [ default:", 12) + d + " ]")
ret += lhspad + self.format_rhs(self.pad(" [ data type:", 12) + self.DataType + " ]")
else:
ret += self.format_rhs(self.ShortHelp)
if ReturnAsString:
return ret
sys.stdout.write(ret)
sys.stdout.write("\n\n")
sys.stdout.flush()
|
fluffynuts/scripts
|
flatten_once.py
|
<reponame>fluffynuts/scripts
#!/usr/bin/python
import os
import sys
class Flattener:
def __init__(self, options = {}):
self.blankstr = 75 * " "
self.options = {
}
for opt in options:
self.options[opt] = options[opt]
def process(self, baseFolder):
allFiles = self.ls_R(baseFolder)
for path in allFiles:
if os.path.isdir(path):
continue
fileName = os.path.basename(path)
folder = os.path.dirname(path)
parent = os.path.dirname(folder)
if not parent.startswith(baseFolder):
print('Can\'t flatten "' + folder + '"')
print('fileName: ' + fileName)
print('folder: ' + folder)
print('parent: ' + parent)
continue
folderName = os.path.basename(folder)
newFolder = parent + ' - ' + folderName
if not os.path.isdir(newFolder):
os.mkdir(newFolder)
newFile = os.path.join(newFolder, fileName)
print(path + ' -> ' + newFile)
os.rename(path, newFile)
self.removeAllEmptyFoldersUnder(baseFolder)
def removeAllEmptyFoldersUnder(self, baseFolder):
deletedSomething = True
while deletedSomething:
deletedSomething = False
for path in self.ls_R(baseFolder):
if not os.path.isdir(path):
continue
if len(os.listdir(path)) == 0:
try:
os.rmdir(path)
deletedSomething = True
except Exception as e:
print('Can\'t remove "' + path + '": ' + e.strerror)
def ls_R(self, dir, include_dirs=False):
stack = [dir]
ret = []
items = 0
while stack:
thisdir = stack.pop(0)
for f in sorted(os.listdir(thisdir)):
items += 1
if items % 100 == 0:
self.status('Listing directory contents... %i' % (items))
path = os.path.join(thisdir, f)
if os.path.isdir(path):
ret.append(path)
stack.append(path)
continue
ret.append(path)
self.status('')
return sorted(ret)
def status(self, s):
if (len(s) > 72):
s = s[:72] + "..."
sys.stdout.write("\r%s\r%s" % (self.blankstr, s))
sys.stdout.flush()
if __name__ == '__main__':
flattener = Flattener()
if len(sys.argv[1:]) == 0:
print('Please specify one or more base folders to work on')
for arg in sys.argv[1:]:
flattener.process(arg)
|
fluffynuts/scripts
|
medesync.py
|
<filename>medesync.py<gh_stars>1-10
#!/usr/bin/python
# vim: ft=python columns=123 foldmethod=marker foldmarker=<<<,>>> expandtab shiftwidth=2 tabstop=2 softtabstop=2
# Released under the terms of the BSD license, outlined below:
# Copyright (c) 2010, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Davyd McColl nor the names of any other contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import ftplib
import time
import re
import datetime
from opts import Options
import shutil
try:
import fcntl, termios, struct, os
COLS = None
except:
COLS = 78 # fallback
class SmartSync:
def __init__(self): #<<<
self.connected = False
self.ftp = None
self.__err__ = None
self.show_status = True
self.current_transfer = None
self.host = None
self.user = "anonymous"
self.port = 21
self.passwd = "<EMAIL>"
self.ftp_conns = []
self.io_chunk = 8192
self.current_transfer = dict()
self.current_transfer["total"] = 0
self.current_transfer["bytes"] = 0
self.current_transfer["name"] = "foobar"
self.overall_transfers = dict()
self.overall_transfers["total"] = 0
self.overall_transfers["bytes"] = 0
self.overall_transfers["start"] = None
self.copy_errors = 0
self.spinner = "|"
self.last_progress = time.time()
self.last_listing = []
self.status_ticks = 0
self.attempts = 120
self.ftp_size_cache = dict()
self.logfp = sys.stdout
self.no_remove = False
#>>>
def __deinit__(self):#<<<*/
for conn in self.ftp_conns:
try:
conn.close()
except Exception as e:
pass
#>>>*/
def spin(self):#<<<*/
spinchars = ["|", "/", "-", "\\"]
idx = spinchars.index(self.spinner)
idx += 1
if idx >= len(spinchars):
idx = 0
self.spinner = spinchars[idx]
return self.spinner
#>>>*/
def _print(self, s):#<<<*/
if self.logfp != sys.stdout:
self.logfp.write(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:S "))
self.logfp.write("%s\n" % s)
#>>>*/
def get_terminal_size(self):#<<<*/
if self.logfp != sys.stdout:
return 0, 0
global COLS
if COLS != None:
return COLS, 25
def ioctl_GWINSZ(fd):
try:
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except:
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (env['LINES'], env['COLUMNS'])
except:
cr = (25, 80)
return int(cr[1]), int(cr[0])
#>>>*/
def set_last_error(self, descr, err): #<<<
self.__err__ = dict()
self.__err__["description"] = descr
self.__err__["exception"] = err
if self.show_status:
self._print("%s:\n %s" % (descr, str(err)))
#>>>
def feedback(self, fstr):#<<<
self.cols, tmp = self.get_terminal_size()
if self.show_status:
maxlen = self.cols-6
fstr = self.shorten(fstr, maxlen)
if fstr.count("\n") == 0:
while (len(fstr) < maxlen):
fstr += " "
if self.logfp != sys.stdout:
self.logfp.write(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S "))
self.logfp.write(fstr)
self.logfp.flush()
#>>>
def show_ok(self):#<<<
if self.show_status:
self.logfp.write("[ OK ]\n")
#>>>
def show_fail(self):#<<<
if self.show_status:
self.logfp.write("[FAIL]\n")
#>>>
def get_last_error(self):#<<<
if self.__err__ != None:
return self.err["description"], self.err["exception"]
else:
return "", None
#>>>
def clear_last_error(self):#<<<
self.__error__ = None
#>>>
def connect(self, host, user, passwd):#<<<
if self.ftp != None:
self.ftp.quit()
else:
self.ftp = ftplib.FTP()
try:
self.feedback("Connecting to %s" % (host))
self.ftp.connect(host, 21, 60)
self.show_ok()
self.feedback("Logging in...")
self.ftp.login(user, passwd)
self.show_ok()
self.feedback("Setting PASV")
self.ftp.set_pasv(True)
self.show_ok()
self.connected = True
except Exception as e:
self.show_fail()
self.set_last_error("Unable to connect to FTP host", e)
return self.connected
#>>>
def split_uri(self, uri):#<<<
parts = uri.split("://")
ret = dict()
if len(parts) > 1:
ret["protocol"] = parts[0].lower()
uri = "://".join(parts[1:])
else:
ret["protocol"] = "file"
uri = parts[0]
if ret["protocol"] == "file":
ret["host"] = None
ret["path"] = uri
else:
parts = uri.split("/")
ret["host"] = parts[0]
if len(parts) == 0:
ret["path"] = ""
else:
ret["path"] = "/".join(parts[1:])
parts = ret["host"].split("@")
if len(parts) == 0:
ret["user"] = None
ret["password"] = None
else:
ret["host"] = parts[-1]
tmp = "@".join(parts[0:-1])
parts = tmp.split(":")
ret["user"] = parts[0]
ret["password"] = ":".join(parts[1:])
parts = ret["host"].split(":")
if len(parts) == 1:
ret["port"] = None
else:
ret["port"] = int(parts[-1])
ret["host"] = parts[0:-1]
if ret["protocol"] == "ftp":
ret["timeout"] = 30
ret["passive"] = True
ret["path"] = ret["path"].replace(os.sep, "/")
return ret
#>>>
def ls_R(self, path, include_dirs = False, prepend_dirname = True):#<<<
uri_parts = self.split_uri(path)
ret = None
if uri_parts["protocol"] == "file":
for i in range(10):
ret = self.ls_R_local(uri_parts["path"], include_dirs, prepend_dirname)
if ret != None:
return ret
time.sleep(1)
elif uri_parts["protocol"] == "ftp":
for i in range(10):
ret = self.ls_R_ftp(uri_parts, include_dirs, prepend_dirname)
if ret != None:
return ret
time.sleep(1)
else:
self._print("Can't ls-r on %s" % (path))
return None
return ret
#>>>
def resolvebool(self, opts, key, default):#<<<
if list(opts.keys()).count(key) > 0:
return opts[key]
else:
return default
#>>>
def remove_ignored(self, l, regex):#<<<*/
if l == None:
return None
if len(regex) == 0:
return l
out = []
for f in l:
m = re.match(regex, f)
if m == None:
out.append(f)
return out
#>>>*/
def remove_hidden(self, ls, base):#<<<*/
if ls == None:
status("Unable to remove hidden files from %s: listing failed" % (base))
uri_parts = self.split_uri(base)
sep = os.sep
if uri_parts["protocol"] != "file":
sep = "/"
out = []
for f in ls:
parts = f.split(sep)
if len(parts[-1]) and parts[-1][0] == ".":
continue
out.append(f)
return out
#>>>*/
def is_videofile(self, path):#<<<
ext = os.path.splitext(path)[-1].lower()
video_extensions = [".mp4", ".mkv", ".m4v", ".avi", ".mpg", ".mpeg", ".wmv"]
return not video_extensions.count(ext) == 0
#>>>
def schedule_remove_item(self, to_remove, item):
if self.no_remove:
return
to_remove.append(item)
def sync(self, options):#<<<
self.dummy = self.resolvebool(options, "dummy", False)
self.overwrite = self.resolvebool(options, "overwrite", True)
self.no_remove = self.resolvebool(options, "no-remove", False)
to_copy = []
to_remove = []
to_archive = []
if self.dummy:
self.feedback("! Dummy operation !\n")
# get a listing of all files and dirs under local_src
local_files = self.remove_ignored(self.ls_R(options["src"], True, False), options["ignore"])
if local_files == None or len(local_files) == 0:
print("can't list local files...")
return False
# get a listing of all files and dirs under remote_dst
remote_files = None
remote_files = self.ls_R(options["dst"], True, False)
if not options["includehidden"]:
local_files = self.remove_hidden(local_files, options["src"])
remote_files = self.remove_hidden(remote_files, options["dst"])
if remote_files == None:
return False
if options["archive"] != None:
watched_files = []
for f in remote_files:
if remote_files.count(f + ".t") > 0:
# add this file to the archive list
to_archive.append(f)
# add the watched indicator for this file to the remove list
self.schedule_remove_item(to_remove, f + '.t')
# remove orphaned watched file indicators
dotparts = f.split(".")
if dotparts[-1] == "t" and remote_files.count(".".join(dotparts[:-1])) == 0:
self.schedule_remove_item(to_remove, f)
tmp = []
# remove watched markers from remote listing
for f in remote_files:
if to_remove.count(f) == 0:
tmp.append(f)
remote_files = tmp
remote_files = self.remove_ignored(remote_files, options["ignore"])
# create a list of all files missing from remote_dst
# also create a list of all files missing from src (to del)
idx = 0
total_files = len(local_files)
mentioned_calc = False
for f in local_files:
spinner = self.spin()
perc = (idx * 100.0) / total_files
idx += 1
if self.logfp == sys.stdout:
self.status("Calculating workload [ %2i %%] %s" % (perc, spinner))
elif not mentioned_calc:
self.feedback("Calculating workload...")
mentioned_calc = True
if self.isdir(options["src"], f):
continue
src_size = self.filesize(options["src"], f)
if remote_files.count(f) == 0:
self.overall_transfers["total"] += src_size
if self.dummy:
self.status("")
self._print("Missing: %s" % f)
to_copy.append(f)
continue
dst_size = self.filesize(options["dst"], f)
if src_size == -1 or dst_size == -1:
return False
if src_size != dst_size:
if self.dummy:
self.status("")
self._print("%s: %i vs %i" % (f, src_size, dst_size))
self.overall_transfers["total"] += src_size
to_copy.append(f)
for f in remote_files:
# TODO: remove non-video files if they are alone perhaps? Older than some age and alone?
# MEGA-TODO: refactor!
if self.is_videofile(f):
if local_files.count(f) == 0:
self.schedule_remove_item(to_remove, f)
self.status("")
if self.logfp == sys.stdout:
# leave the line clear of a percentage
self.feedback("Calculating workload")
self.show_ok()
if len(to_remove) == 0 and len(to_copy) == 0 and len(to_archive) == 0:
self.status("%s up to date\n" % (options["dst"]))
# check on empty dirs at dest
return True
# archive watched files; push these files onto the to_remove stack
#print(to_archive)
for f in to_archive:
# perform local move (may be a rename, may be a copy-and-delete)
if self.move_file(options["src"], f, options["archive"], f):
# add the remote file to the to_remove list
if to_remove.count(f) == 0:
self.schedule_remove_item(to_remove, f)
# add watched indicator file to to_remove list
w = f + ".t"
if to_remove.count(f) == 0:
self.schedule_remove_item(to_remove, f)
# remove extra remote files first (perhaps need the space)
uri_parts = self.split_uri(options["dst"])
for f in sorted(to_remove, reverse=True):
if self.remove(options["dst"], f):
self.status("")
self.feedback("Remove remote: %s" % (f))
self.show_ok()
else:
self.status("")
self.feedback("Remove remote: %s" % (f))
self.show_fail()
if uri_parts["protocol"] == "file":
fdir = os.path.join(os.path.dirname(f))
else:
fdir = "/".join(f.split("/")[:-1])
# copy missing remote files
self.overall_transfers["start"] = time.time()
totalBytes = self.overall_transfers["total"]
totalHuman = self.humanreadable_size(totalBytes)
self._print("Overall transfer size: %i (%s)" % (totalBytes, totalHuman))
for f in to_copy:
if self.isdir(options["src"], f):
continue
self.feedback("Copy file: %s" % (f))
if self.copy_file(options["src"], options["dst"], f):
#self.status("")
self.show_ok()
else:
self.status("")
self.show_fail()
#self._print("")
#>>>*/
def move_file(self, uri_from, relative_from, uri_to, relative_to):#<<<*/
uri_parts_from = self.split_uri(uri_from)
uri_parts_to = self.split_uri(uri_to)
self.feedback("Archiving %s" % (relative_from))
if uri_parts_from["protocol"] != uri_parts_to["protocol"]:
self.show_fail()
self._print("(Can't move files across protocols)")
return False
if uri_parts_to["protocol"] == "ftp":
dirname = "/".join(relative_to.split("/")[:-1])
if not self.ensure_dir_exists(uri_to, dirname):
self.show_fail()
self._print("(Can't make dest dir at %s)" % (dirname))
return False
try:
ftp = self.mkftp2(uri_parts_to)
if ftp == None:
self.show_fail()
self._print("(Can't get FTP connection)")
return False
if not self.dummy:
ftp.rename(relative_from, relative_to)
self.show_ok()
return True
except Exception as e:
self.show_fail()
self._print("Can't move file %s at ftp://%s to %s: %s" % (relative_from, uri_parts_to["host"], relative_to, str(e)))
return False
elif uri_parts_to["protocol"] == "file":
dirname = os.sep.join(relative_to.split(os.sep)[:-1])
_from = os.path.join(uri_from, relative_from)
_to = os.path.join(uri_to, relative_to)
if not os.path.isfile(_from):
if not os.path.isfile(_to):
self._print("Can't find %s; not copying what isn't there" % (_from))
return True
dirname = os.path.dirname(_to)
if not self.ensure_dir_exists_local(dirname):
self.show_fail()
self._print("Can't make dest dir at %s" % (dirname))
return False
try:
if not self.dummy:
try:
os.rename(_from, _to)
except Exception as e:
shutil.copyfile(_from, _to)
os.remove(_from)
else:
self._print("\nrename: %s => %s" % (_from, _to))
self.show_ok()
return True
except Exception as e:
try:
if not self.dummy:
open(_to, "wb").write(open(_from, "rb").read())
os.remove(_from)
self.show_ok()
return True
except Exception as e:
self._print("Can't copy-and-del %s to %s: %s" % (_from, _to, str(e)))
self.show_fail()
return False
else:
self._print("%s: unsupported protocol for file_move" % (uri_parts_to["protocol"]))
# unsupported protocol
return False
#>>>*/
def isdir(self, uri_base, relative_path):#<<<*/
uri_parts = self.split_uri(uri_base)
if uri_parts["protocol"] == "file":
sep = os.sep
else:
sep = "/"
fullpath = uri_parts["path"] + sep + relative_path
if uri_parts["protocol"] == "file":
return os.path.isdir(fullpath)
else:
ftp = self.mkftp2(uri_parts)
if ftp == None:
return False
return self.is_ftp_dir(ftp, fullpath)
#>>>*/
def remove(self, uri_base, relative_path = ""):#<<<
if self.dummy:
return True
uri_parts = self.split_uri(uri_base)
pathtype = "path"
if uri_parts["protocol"] == "file":
try:
if len(relative_path) > 0:
f = os.path.sep.join([uri_parts["path"], relative_path])
else:
f = uri_parts["path"]
if self.dummy:
self.feedback("local remove: %s" % (f))
else:
if os.path.isdir(f):
pathtype = "dir"
os.rmdir(f)
else:
pathtype = "file"
os.remove(f)
return True
except Exception as e:
self.set_last_error("Unable to remove %s %s" % (pathtype, uri_parts["path"]), e)
return False
elif uri_parts["protocol"] == "ftp":
ftp = self.mkftp2(uri_parts)
if ftp == None:
return False
try:
f = "/".join([uri_parts["path"], relative_path])
if self.dummy:
self.feedback("remote remove: %s" % (f))
else:
f_sane = f #self.sanitise_ftp_path(f)
if self.is_ftp_dir(ftp, f_sane):
pathtype = "dir"
ftp.cwd("/")
ftp.rmd(f)
else:
pathtype = "file"
if self.filesize(uri_base, relative_path) > -1:
ftp.delete(f_sane)
return True
except Exception as e:
self.set_last_error("Unable to remove %s %s" % (pathtype, f), e)
return False
else:
return False
#>>>
def ensure_dir_exists(self, uri_base, relative_path):#<<<
up = self.split_uri(uri_base)
if up["protocol"] == "file":
#print(up)
return self.ensure_dir_exists_local(os.path.dirname(os.sep.join([up["path"], relative_path])))
elif up["protocol"] == "ftp":
return self.ensure_dir_exists_ftp(up, relative_path)
else:
return False
#>>>
def ensure_dir_exists_local(self, path):#<<<
parts = os.path.split(path)
test = ""
if self.dummy:
self.feedback("! Check local dir: %s\n" % (path))
for part in parts:
if len(test) > 0: test += os.sep
test += part
if self.dummy:
print("testing %s" % test)
if not os.path.isdir(test):
if self.dummy:
print("%s: dir doesn't exist" % test)
try:
os.mkdir(test)
if self.dummy:
print("Ensured local dir '%s' exists!" % test)
except Exception as e:
self.set_last_error("Unable to make dir %s" % test, e)
return False
return True
#>>>
def ensure_dir_exists_ftp(self, uri_parts, relative_path):#<<<
ftp = self.mkftp2(uri_parts)
if ftp == None:
return False
relative_path = relative_path.replace(os.sep, "/")
parentdir = "/".join(relative_path.split("/")[:-1])
path = uri_parts["path"]
if len(parentdir) > 0:
path += "/" + parentdir
parts = path.split("/")
test = ""
if self.dummy:
self.feedback("! Check remote dir: %s\n" % (path))
return True
for part in parts:
if len(test) > 0: test += "/"
test += part
if not self.is_ftp_dir(ftp,test):
try:
ftp.mkd(test)
except Exception as e:
self.set_last_error("Unable to make dir %s on %s" % (test, uri_parts["host"]), e)
return False
return True
#>>>
def show_progress(self, label, fraction):#<<<*/
if self.logfp != sys.stdout:
# log file doesn't get progress bar
return
cols, rows = self.get_terminal_size()
label = self.shorten(label, int(float(cols) * 0.90))
rem = cols - len(label) - 4
bars = int(fraction * rem)
draw_arrow = False
if (bars > 0):
bars -= 1
draw_arrow = True
label += " [" + (bars * "=")
if draw_arrow:
label += ">"
rem -= 1
label += ((rem - bars) * " ") + "]"
self.status(label, False)
#>>>*/
def copy_file(self, src_base, dst_base, relative_path):#<<<
up_src = self.split_uri(src_base)
up_dst = self.split_uri(dst_base)
# ensure that the remote dir exist
if not(self.ensure_dir_exists(dst_base, relative_path)):
return False
# copy the file
if up_src["protocol"] == "file":
if up_dst["protocol"] == "file":
return self.copy_file_local_to_local(src_base, dst_base, relative_path)
elif up_dst["protocol"] == "ftp":
return self.copy_file_local_to_ftp(src_base, dst_base, relative_path)
elif up_src["protocol"] == "ftp":
self._print("FTP source not supported (yet)")
return False
else:
self._print("Protocol %s not supported (yet)" % up_src["protocol"])
return False
#>>>
def copy_file_local_to_ftp(self, src, dst, rel):#<<<
for i in range (self.attempts):
srcpath = os.path.join(src, rel)
dstpath = "/".join([dst, rel])
uri_parts = self.split_uri(dstpath)
if self.dummy:
self.feedback("! copy: %s\n" % (rel))
self._print("! dst: %s\n" % uri_parts["path"])
return True
ftp = self.mkftp2(uri_parts)
if ftp == None:
return False
if ftp == None:
self._print("Can't copy %s: Can't get ftp object")
return False
ftp.cwd("/")
parentdir = "/".join(rel.split("/")[:-1])
if not self.ensure_dir_exists(dst, parentdir):
self._print("Can't make dir %s" % (parentdir))
try:
self.current_transfer["total"] = os.stat(srcpath).st_size
self.current_transfer["start"] = time.time()
if self.is_ftp_dir(ftp, uri_parts["path"]):
ftp.rmd(uri_parts["path"])
else:
rsize = None
try:
rsize = ftp.size(uri_parts["path"])
except:
pass
if rsize != None and rsize != self.current_transfer["total"]:
ftp.delete(uri_parts["path"])
self.current_transfer["bytes"] = 0
self.current_transfer["name"] = uri_parts["path"].split("/")[-1]
if self.current_transfer["total"] > 0:
fpsrc = open(srcpath, "rb")
#self._print("STOR %s" % (uri_parts["path"].replace(" ", "%20")))
ftp.cwd("/")
ftp.storbinary("STOR %s" % (uri_parts["path"]), fpsrc, self.io_chunk, self.ftp_status)
fpsrc.close()
return True
except Exception as e:
if Exception == KeyboardInterrupt:
self.user_abort();
self._print("Can't copy %s: %s" % (rel, str(e)))
time.sleep(1)
self._print("Giving up on %s" % (rel))
return False
def humanreadable_size(self, byteCount):
suf = ["b", "Kb", "Mb", "Gb", "Tb", "Pb"]
for i in range(len(suf)):
if byteCount < 1024:
return "%.2f%s" % (byteCount, suf[i])
byteCount /= 1024.0
return "%.2f%s" % (byteCount, suf[-1])
def humanreadable_rate(self, b, s):
suf = ["b", "Kb", "Mb", "Gb", "Tb", "Pb"]
rate = float(b) / float(s)
return "%s/s" % (self.humanreadable_size(rate))
def human_readable_time(self, secs):
s = secs % 60
m = int(secs / 60) % 60
h = int(secs / 3600)
if h > 0:
return "%02i:%02i:%02i" % (h,m,s)
elif m > 0:
return "%02i:%02i" % (m,s)
else:
return "%2is" % (s)
def ftp_status(self, b):
chunk = len(b)
now = time.time()
self.overall_transfers["bytes"] += chunk
self.current_transfer["bytes"] += chunk
spinner = self.spin()
if now - self.last_progress < 0.3:
return
self.last_progress = now
perc = float(self.current_transfer["bytes"]) / float(self.current_transfer["total"])
r = self.humanreadable_rate(self.current_transfer["bytes"], (now - self.current_transfer["start"]))
rbytes = float(self.current_transfer["bytes"]) / (float(now - self.current_transfer["start"]))
#overall_r = float(self.overall_transfers["bytes"]) / (now - self.overall_transfers["start"])
etr = int((float(self.overall_transfers["total"] - self.overall_transfers["bytes"]) / rbytes))
hetr = self.human_readable_time(etr)
label = "%s [%s %s] %s" % (self.current_transfer["name"], r, hetr, spinner)
self.show_progress(label, perc)
#>>>
def copy_file_local_to_local(self, src, dst, rel):#<<<
if self.dummy:
self.feedback("! copy: %s\n" % (rel))
return True
srcpath = os.path.join(src, rel)
dstpath = os.path.join(dst, rel)
if not self.ensure_dir_exists_local(os.path.dirname(dstpath)):
self._print("Can't make dir: %s" % (os.path.dirname(dstpath)))
return False
try:
shutil.copyfile(srcpath, dstpath)
return True
except KeyboardInterrupt as e:
self.user_abort()
except Exception as e:
self._print("Copy %s fails: %s" % (rel, str(e)))
self._print(srcpath)
self._print(dstpath)
return False
#>>>
def user_abort(self): #<<<
self._print("\n >> user abort <<")
sys.exit(1)
#>>>
def shorten(self, checkstr, maxlen = None):#<<<
if self.logfp != sys.stdout:
if len(checkstr) and checkstr[-1] != "\n":
return checkstr + " "
else:
return checkstr
if maxlen == None:
maxlen, rows = self.get_terminal_size()
if len(checkstr) >= maxlen:
newstr = checkstr[:(maxlen/2)-3] + "..."
newstr += checkstr[(len(checkstr) - (maxlen/2))+3:]
checkstr = newstr
return checkstr
#>>>
def status_callback(self, blocks):#<<<
if not self.show_status:
return
#>>>
def status(self, statusstr, autofit = True):#<<<
if not self.show_status or self.logfp != sys.stdout:
return
cols, rows = self.get_terminal_size()
if autofit:
statusstr = self.shorten(statusstr)
if self.logfp == sys.stdout:
self.logfp.write("\r%s\r%s" % ((cols * " "), statusstr))
else:
self.logfp.write(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S "))
self.logfp.write("%s" % (statusstr))
self.logfp.flush()
#>>>
def clear_status(self): #<<<
if self.logfp == sys.stdout:
self.logfp.write("\r%s\r" % (78 * " "))
else:
self.logfp.write("\n")
self.logfp.flush()
#>>>
def ls_R_local(self, dirname, include_dirs=False, prepend_dirname = True): #<<<
if not os.path.isdir(dirname):
self._print("%s: dir not found" % (dirname))
sys.exit(1)
self.feedback("Listing local dir: %s" % (dirname))
stack = [dirname]
ret = []
items = 0
try:
while stack:
thisdir = stack.pop(0)
try:
for f in sorted(os.listdir(thisdir)):
items += 1
path = os.path.join(thisdir, f)
if os.path.isdir(path):
if include_dirs:
if prepend_dirname:
ret.append(path)
else:
ret.append(path[len(dirname)+1:])
stack.append(path)
continue
if prepend_dirname:
ret.append(path)
else:
ret.append(path[len(dirname)+1:])
except:
pass
self.show_ok()
return ret
except Exception as e:
self._print("local ls error: %s" % str(e))
return None
#>>>
def mkftp2(self, uri_parts):#<<<
return self.mkftp(uri_parts["host"],uri_parts["user"],uri_parts["password"],\
uri_parts["port"],uri_parts["timeout"],uri_parts["passive"])
#>>>
def mkftp(self, host, user, password, port = None, timeout=None, passive=None):#<<<
if host == None: host = "localhost"
if user == None: user = "anonymous"
if port == None: port = 21
if password == None: password = "<PASSWORD>.<EMAIL>"
if timeout == None: timeout = 30
if passive == None: passive = True
# look for a matching conn in the pool
ftp = None
for f in self.ftp_conns:
if f["host"] == host and \
f["user"] == user and \
f["password"] == password and \
f["port"] == port and \
f["timeout"] == timeout and \
f["passive"] == passive:
#test if ftp object is still alive
try:
ftp = f["client"]
ftp.cwd(".")
return ftp
except:
ftp.close()
self.ftp_conns.remove(f)
ftp = None
break
try:
if ftp == None:
ftp = ftplib.FTP()
ftp.connect(host, port, timeout)
ftp.login(user, password)
ftp.set_pasv(passive)
# cache connection
f = {"host":host, "user":user, "password":password, "port": port, "timeout":timeout, "passive":passive, "client":ftp}
self.ftp_conns.append(f)
return ftp
except Exception as e:
self.set_last_error("Unable to connect to ftp://%s:%s@%s:%i" % (user, password, host, port), e)
return None
#>>>
def filesize(self, base_uri, relative_path):#<<<*/
uri_parts = self.split_uri(base_uri)
if uri_parts["protocol"] == "file":
try:
return os.stat(os.sep.join([uri_parts["path"], relative_path])).st_size
except Exception as e:
self.set_last_error("filesize: can't stat %s%s%s" % (base_uri, os.sep, relative_path), e)
return 0
elif uri_parts["protocol"] == "ftp":
# look for cached size
size_key = "%s|%s|%s|%s/%s" % (uri_parts["user"], uri_parts["password"], uri_parts["host"], uri_parts["path"], relative_path)
if list(self.ftp_size_cache.keys()).count(size_key) > 0:
if self.dummy:
print("filesize: returning cached value")
return self.ftp_size_cache[size_key]
ftp = self.mkftp2(uri_parts)
if ftp == None:
self.set_last_error("filesize: can't get ftp object for %s/%s" % (base_uri, relative_path), None)
return -1
if True:
#try:
self.last_listing = []
path = self.sanitise_ftp_path("/".join([uri_parts["path"], relative_path]))
ftp.dir(path, self.catch_dir)
if len(self.last_listing) == 0:
if self.dummy:
print("Unable to list on:\n%s" % (path))
return 0
parts = self.get_non_empty(self.last_listing[0].split(" "))
self.last_listing = []
if self.dummy and int(parts[4]) == 0:
print("zero ret, parts:")
print(parts)
print("full ret:")
print(self.last_listing)
return int(parts[4])
#return ftp.size("/".join([uri_parts["path"], relative_path]))
#except Exception as e:
else:
return 0
else:
return -1
#>>>*/
def sanitise_ftp_path(self, p):#<<<*/
return p.replace("[", "\\[").replace("]", "\\]")
#>>>*/
def catch_dir(self, line):#<<<*/
self.last_listing.append(line)
#>>>*/
def get_non_empty(self, l):#<<<*/
out = []
for item in l:
if len(item) > 0:
out.append(item)
return out
#>>>*/
def ls_R_ftp(self, uri_parts, include_dirs = False, prepend_dirname = True):#<<<
ftp = self.mkftp(uri_parts["host"], uri_parts["user"], \
uri_parts["password"], uri_parts["port"], uri_parts["timeout"], \
uri_parts["passive"])
if ftp == None:
return None
stack = [uri_parts["path"]]
ret = []
items = 0
while stack:
thisdir = stack.pop(0)
self.feedback("Listing remote dir: %s" % (thisdir))
ftp.cwd("/")
# nlst method
contents = sorted(ftp.nlst(thisdir))
# dir method; try to cache file sizes
#self.last_listing = []
#ftp.dir(thisdir, self.catch_dir)
#contents = []
for l in self.last_listing:
parts = self.get_non_empty(l.split(" "))
fsize = int(parts[4])
year_or_time = parts[7]
pos = l.find(year_or_time) + len(year_or_time)
fpath = l[pos:]
fpath = fpath.lstrip()
contents.append(fpath)
size_key = "%s|%s|%s|%s/%s" % (uri_parts["user"], uri_parts["password"], uri_parts["host"], thisdir, fpath)
self.ftp_size_cache[size_key] = fsize
for f in contents:
items += 1
#self._print("%i: %s" % (items, f))
path = "/".join([thisdir, f])
if self.is_ftp_dir(ftp, path):
#self._print("\nrdir: %s" % path)
if include_dirs:
if prepend_dirname:
ret.append(path)
else:
ret.append(path[len(uri_parts["path"])+1:])
stack.append(path)
stack = sorted(stack)
#self._print("\nstack: " + str(stack))
else:
#self._print("\nrfile: %s" % f)
if prepend_dirname:
ret.append(path)
else:
ret.append(path[len(uri_parts["path"])+1:])
self.show_ok()
ftp.close()
del ftp
return ret
#>>>
def is_ftp_dir(self, ftp, dirname):#<<<
# TODO: find a faster way to do this
current = ftp.pwd()
try:
if len(dirname) == 0:
return False
ftp.cwd(dirname)
ftp.cwd(current)
return True
except:
ftp.cwd(current)
return False
#>>>
if __name__ == "__main__":
opts = Options()
opts.AddOpt("-s", help = "Source directory", aliases=["-src", "--src"], consumes = 1, \
ConsumesHelp = "<source url/path>", required = True)
opts.AddOpt("-d", help="Destination directory", aliases=["-dst", "--dst"], consumes=1, \
ConsumesHelp="<destination url/path>", required=True)
opts.AddOpt("-a", help="Files which have been copied in a previous run and are now "\
+ "found to be missing are archived to this location", \
ShortHelp="Archive location", aliases=["-archive", "--archive"], consumes=1, \
required=False)
opts.AddOpt("-dummy", help="Prints out information about what would be done, but doesn't actually do it", \
aliases = ["--dummy"], consumes=0,ShortHelp="Dummy run")
opts.AddOpt("-i", "Regular expression to match source and destination paths not to bother synchronising" \
+ " (default matches watched indicator files on mede8er players)", \
aliases = ["-ignore", "--ignore"], consumes=1, ShortHelp="Ignore paths matching regex",\
Default=".*\\.t$", ConsumesHelp = "<regular expression>")
opts.AddOpt("-l", help="Redirect logging to this file instead of stdout", \
aliases = ["--logfile"], consumes=1, ShortHelp = "Log file (instead of stdout)")
opts.AddOpt("-h", help="Include hidden (dot-) files/dirs", aliases=["-include-hidden", "--include-hidden"], \
consumes = 0)
opts.AddOpt("-n", help="No target file removal", required=False, consumes=0, aliases = ['--no-remove'])
opts.ParseArgs()
if opts.RequiredMissing():
sys.exit(1)
f = SmartSync()
cmdopts = dict()
cmdopts["src"] = opts.value("-s")
cmdopts["dst"] = opts.value("-d")
cmdopts["includehidden"] = opts.selected("-h")
if opts.selected("-a"):
cmdopts["archive"] = opts.value("-a")
else:
cmdopts["archive"] = None
cmdopts["dummy"] = opts.selected("-dummy")
cmdopts["ignore"] = opts.value("-i")
cmdopts["no-remove"] = opts.selected('-n')
logfp = sys.stdout
if opts.selected("-l"):
try:
if opts.value("-l") != "stdout":
logfp = open(opts.value("-l"), "a")
except Exception as e:
print("Unable to open %s for appending; outputting to stdout instead" % opts.value("-l"))
print(str(e))
logfp = sys.stdout
try:
f.logfp = logfp
f.sync(cmdopts)
except KeyboardInterrupt:
logfp.write("\n>> user abort <<\n")
if logfp != sys.stdout:
logfp.close()
|
fluffynuts/scripts
|
inhibit-screensaver.py
|
<gh_stars>1-10
#!/usr/bin/python
import dbus
import time
bus = dbus.Bus(dbus.Bus.TYPE_SESSION)
devobj = bus.get_object('org.gnome.ScreenSaver', '/org/gnome/ScreenSaver')
dev = dbus.Interface(devobj, "org.gnome.ScreenSaver")
cookie = dev.Inhibit('inhibit-screensaver.py', 'client request')
time.sleep(0)
dev.UnInhibit(cookie)
|
artful-addict/kh-squared-coming-soon
|
usr/libexec/fax/coverpage.py
|
#!/usr/bin/python
# cover.py -- add an autogenerated cover page to a PDF document
import sys, os, getopt, string, pwd, cgi
from Quartz.CoreGraphics import *
from Quartz.ImageIO import *
from AppKit import *
from WebKit import *
from Foundation import *
## definitions
subject_title = 'Subject:'
from_title = 'From:'
to_title = 'To:'
date_title = 'Date:'
pages_title = 'Sheets to follow:'
subject_text = None
from_name = 'auto'
to_name = None
date_string = None
pages_string = None
body_file = None
body_string = None
body_type = None
output_file = 'cover.pdf'
page_rect = CGRectMake (0, 0, 612, 792)
page_inset = (72, 72)
default_logo_size = (120, 120)
web_prefs = None
header_font_size = 14
logos = []
## functions
def createPDFDocumentWithPath(path):
return CGPDFDocumentCreateWithURL(CFURLCreateFromFileSystemRepresentation(kCFAllocatorDefault, path, len(path), False))
def createImage(imagePath):
image = None
provider = CGDataProviderCreateWithFilename(imagePath)
if provider:
imageSrc = CGImageSourceCreateWithDataProvider(provider, None)
if imageSrc:
image = CGImageSourceCreateImageAtIndex(imageSrc, 0, None)
if not image:
print "Cannot import the image from file %s" % imagePath
return image
def setGraphicsContext(context):
graphicsContext = None
if context:
graphicsContext = NSGraphicsContext.graphicsContextWithGraphicsPort_flipped_(context, True)
NSGraphicsContext.setCurrentContext_(graphicsContext)
def webPrefsWithFontSize(fontSize):
global web_prefs
if web_prefs == None:
web_prefs = WebPreferences.alloc().init();
web_prefs.setUserStyleSheetEnabled_(False);
web_prefs.setJavaEnabled_(False);
web_prefs.setJavaScriptEnabled_(False);
web_prefs.setJavaScriptCanOpenWindowsAutomatically_(False);
web_prefs.setPlugInsEnabled_(False);
web_prefs.setAllowsAnimatedImages_(False);
web_prefs.setAllowsAnimatedImageLooping_(False);
web_prefs.setLoadsImagesAutomatically_(True);
web_prefs.setAutosaves_(False);
if fontSize <= 0.0:
fontSize = 12.0
web_prefs.setDefaultFontSize_(fontSize);
web_prefs.setDefaultFixedFontSize_(fontSize);
return web_prefs;
def CGContextDrawHTMLTextInRect (context, data, rect, fontSize):
# Use a frame with the right width (for wrapping), but a ridiculously
# small height. This forces WebKit to expand the document frame's
# view to the real height of the passed in text. We can then use
# that to calculate the actual size of the drawn content.
ink_rect = NSZeroRect
view = WebView.alloc().initWithFrame_(NSMakeRect(0, 0, CGRectGetWidth(rect), 1))
if view:
view.setPreferences_(webPrefsWithFontSize(fontSize))
frame = view.mainFrame()
frame.loadData_MIMEType_textEncodingName_baseURL_(data, u"text/html", None, None)
date = NSDate.dateWithTimeIntervalSinceNow_(10)
while view.isLoading():
NSRunLoop.currentRunLoop().runMode_beforeDate_(NSDefaultRunLoopMode, date)
doc_view = frame.frameView().documentView()
if doc_view:
if context:
CGContextSaveGState(context)
CGContextClipToRect(context, rect)
CGContextTranslateCTM(context, CGRectGetMinX(rect), CGRectGetMinY(rect))
CGContextConcatCTM(context, CGAffineTransformMake(1, 0, 0, -1, 0, CGRectGetHeight(rect)))
setGraphicsContext(context)
doc_view.drawRect_(NSMakeRect(0, 0, CGRectGetWidth(rect), CGRectGetHeight(rect)))
if context:
setGraphicsContext(None)
CGContextRestoreGState(context)
# Calculate the area we drew into.
ink_rect = NSOffsetRect(doc_view.frame(), CGRectGetMinX(rect), CGRectGetMinY(rect))
ink_rect = NSIntersectionRect(ink_rect, rect)
del view
return ink_rect
def CGContextDrawTextInRect(context, data, rect, fontSize, format):
ink_rect = NSZeroRect
tx_rect = NSMakeRect(0, 0, CGRectGetWidth(rect), CGRectGetHeight(rect))
text_view = NSTextView.alloc().initWithFrame_(tx_rect)
text_view.setRichText_(True)
text_view.setDrawsBackground_(False)
text_storage = text_view.textStorage()
if fontSize > 0.0:
text_storage.setFont_(NSFont.userFontOfSize_(fontSize))
text_storage.beginEditing()
if format:
formatDict = NSDictionary.dictionaryWithObject_forKey_(format, NSDocumentTypeDocumentOption)
attr_string = NSAttributedString.alloc().initWithData_options_documentAttributes_error_(data, formatDict, None, None)
text_storage.setAttributedString_(attr_string)
del attr_string
else:
string = NSString.alloc().initWithBytes_length_encoding_(data, len(data), NSUTF8StringEncoding)
text_storage.replaceCharactersInRange_withString_(NSMakeRange(0, 0), string)
del string
text_storage.endEditing()
if context:
CGContextSaveGState(context)
CGContextTranslateCTM(context, rect.origin.x, rect.origin.y)
CGContextConcatCTM(context, CGAffineTransformMake(1, 0, 0, -1, 0, CGRectGetHeight(rect)))
setGraphicsContext(context)
text_view.drawRect_(tx_rect)
ink_rect = NSOffsetRect(text_view.layoutManager().boundingRectForGlyphRange_inTextContainer_(NSMakeRange(0, text_storage.length()), text_view.textContainer()), CGRectGetMinX(rect), CGRectGetMinY(rect))
if context:
setGraphicsContext(None)
CGContextRestoreGState(context)
del text_view
return ink_rect
def CGContextDrawRTFTextInRect(context, data, rect, fontSize):
return CGContextDrawTextInRect(context, data, rect, fontSize, NSRTFTextDocumentType)
#def CGContextDrawHTMLTextInRect(context, data, rect, fontSize):
# return CGContextDrawTextInRect(context, data, rect, fontSize, NSHTMLTextDocumentType)
def CGContextDrawPlainTextInRect(context, data, rect, fontSize):
return CGContextDrawTextInRect(context, data, rect, fontSize, None)
def body (c, rect):
# The logo image or images
for (f, r) in logos:
image = createImage(f)
if image:
CGContextDrawImage(c, r, image)
# The header
html = make_header_html()
tr = CGContextDrawHTMLTextInRect(c, NSData.dataWithBytes_length_(html, len(html)), rect, header_font_size)
# The body, whose position is related to the header
global body_type
# Adjust body height according to header size
rect.size.height -= CGRectGetHeight(tr)
data = None
if body_file:
with open(body_file, 'rb') as f:
data = f.read()
if not body_type:
bits = string.split (body_file, '.')
if len (bits) > 1:
suff = string.lower (bits[-1])
if suff == 'rtf':
body_type = 'rtf'
elif suff == 'html' or suff == 'htm':
body_type = 'html'
elif body_string:
data = body_string
if data:
fontSize = 12
if body_type == 'rtf':
CGContextDrawRTFTextInRect(c, data, rect, fontSize)
elif body_type == 'html':
CGContextDrawHTMLTextInRect(c, data, rect, fontSize)
else:
CGContextDrawPlainTextInRect (c, data, rect, fontSize)
# Encoding the header section as HTML is the easiest way to get nice
# alignment of the two columns
def make_header_html ():
global date_string, from_name
def make_row (title, body, format=None):
f = ''; F = ''
if format:
f = '<%(format)s>' % vars ()
F = '</%(format)s>' % vars ()
body = cgi.escape(body, True);
return ('<tr><td valign=top align=right>%(title)s</td>'
'<td>%(f)s%(body)s%(F)s</td></tr>\n' % vars())
if date_string == 'now':
localizedDate = NSDateFormatter.localizedStringFromDate_dateStyle_timeStyle_(NSDate.date(), NSDateFormatterLongStyle, NSDateFormatterLongStyle)
date_string = localizedDate.encode("utf-8")
#f = os.popen ('date')
## read the date, dropping the trailing newline
#date_string = f.read () [:-1]
#f.close ()
if from_name == 'auto':
p = pwd.getpwuid (os.getuid ())
from_name = p and p[4];
pagesNumber = int(pages_string)
pagesNumberStr = NSString.localizedStringWithFormat_("%d", pagesNumber)
pages_string_loc = pagesNumberStr.encode("utf-8")
return ('<html>\n'
+ '<head><meta http-equiv=Content-Type content="text/html; charset=UTF-8"></head>\n'
+ '<body text=\"#000000\">\n<table>\n'
+ (to_name and make_row (to_title, to_name) or '')
+ (from_name and make_row (from_title, from_name) or '')
+ (date_string and make_row (date_title, date_string) or '')
+ (subject_text and make_row (subject_title, subject_text, 'b') or '')
+ (pages_string_loc and make_row (pages_title, pages_string_loc) or '')
+ '</table>\n</body>\n</html>')
## entry point
def usage ():
print '''
usage: python cover.py [OPTION]... PDF-FILES...
Add a cover page to one or more PDF documents.
-f, --from=STRING
-F, --from-title=STRING
-t, --to=STRING
-T, --to-title=STRING
-s, --subject=STRING
-S, --subject-title=STRING
-d, --date=STRING (use STRING='now' for current date)
-D, --date-title=STRING
-n, --count=STRING
-N, --count-title=STRING
-b, --body=STRING
-B, --body-file=FILENAME
-Y, --body-type=TYPE
-p, --page-rect=X,Y,W,H
-o, --output=FILENAME
-l, --logo=FILENAME[@X,Y,W,H]
-H, --header-font-size=SIZE
'''
def main ():
global subject_title, from_title, to_title, pages_title, date_title
global subject_text, from_name, to_name, pages_string, body_file, body_string
global body_type, output_file, page_rect, date_string, logos
global header_font_size
def parse_rect (s):
a = string.split (s, ',')
return (CGRectMake (float (a[0]), float (a[1]), float (a[2]), float (a[3])))
try:
opts,args = getopt.getopt (sys.argv[1:],
'f:F:t:T:s:S:b:B:T:p:o:l:d:D:n:N:',
['from=', 'from-title=', 'to=', 'to-title=',
'subject=', 'subject-title=', 'body=',
'body-file=', 'body-type=', 'page-rect=',
'output=', 'logo=', 'date=', 'date-title=',
'count=', 'count-title=', 'header-font-size='])
except getopt.GetoptError:
usage ()
sys.exit (1)
for o,a in opts:
if o in ('-f', '--from'):
from_name = a
if o in ('-F', '--from-title'):
from_title = a
elif o in ('-t', '--to'):
to_name = a
elif o in ('-T', '--to-title'):
to_title = a
elif o in ('-s', '--subject'):
subject_text = a
elif o in ('-S', '--subject-title'):
subject_title = a
elif o in ('-b', '--body'):
body_string = a
elif o in ('-B', '--body-file'):
body_file = a
elif o in ('-Y', '--body-type'):
body_type = a
elif o in ('-p', '--page-rect'):
page_rect = parse_rect (a)
elif o in ('-o', '--output'):
output_file = a
elif o in ('-l', '--logo'):
a = string.split (a, '@')
if len (a) > 1:
r = parse_rect (a[1])
else:
r = CGRectMake (page_rect.origin.x + page_rect.size.width - page_inset[0] - default_logo_size[0],
page_rect.origin.y + page_rect.size.height - page_inset[1] - default_logo_size[1],
default_logo_size[0], default_logo_size[1])
logos = logos + [(a[0], r)]
elif o in ('-d', '--date'):
date_string = a
elif o in ('-D', '--date-title'):
date_title = a
elif o in ('-n', '--count'):
pages_string = a
elif o in ('-N', '--count-title'):
pages_title = a
elif o in ('-H', '--header-font-size'):
header_font_size = float (a)
c = CGPDFContextCreateWithURL(CFURLCreateFromFileSystemRepresentation(kCFAllocatorDefault, output_file, len(output_file), False), None, None)
if c:
CGContextBeginPage(c, page_rect);
body (c, CGRectInset(page_rect, page_inset[0], page_inset[1]))
CGContextEndPage(c)
for f in args:
pdf = createPDFDocumentWithPath(f);
if pdf:
for pageNum in range (1, CGPDFDocumentGetNumberOfPages(pdf) + 1):
page = CGPDFDocumentGetPage(pdf, pageNum)
if page:
rect = CGPDFPageGetBoxRect(page, kCGPDFMediaBox)
if CGRectIsEmpty(rect):
rect = None
CGContextBeginPage(c, rect)
CGContextDrawPDFPage(c, page)
CGContextEndPage(c)
del pdf
CGPDFContextClose(c)
del c
if __name__ == '__main__':
main ()
|
artful-addict/kh-squared-coming-soon
|
usr/libexec/feedback/filevault.py
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Imports
import json
import plistlib
import subprocess
import sys
# Constants
kCoreStorageLogicalVolumeConversionState = 'CoreStorageLogicalVolumeConversionState'
kCoreStorageLogicalVolumeFamilyEncryptionType = 'CoreStorageLogicalVolumeFamilyEncryptionType'
kCoreStorageLogicalVolumeSparse = 'CoreStorageLogicalVolumeGroupSparse'
kMemberOfCoreStorageLogicalVolumeFamily = 'MemberOfCoreStorageLogicalVolumeFamily'
kMemberOfCoreStorageLogicalVolumeGroup = 'MemberOfCoreStorageLogicalVolumeGroup'
# Implementation
def _diskutil_cs_info(plist=False, target='/'):
cmd = ['diskutil', 'coreStorage', 'information']
cmd.append('-plist') if plist else None
cmd.append(target) if isinstance(target, (str, unicode)) else None
try:
ret = subprocess.check_output(cmd)
except Exception:
return {}
return plistlib.readPlistFromString(ret) if plist else str(ret)
def _diskutil_cs_list(plist=False):
cmd = ['diskutil', 'coreStorage', 'list']
cmd.append('-plist') if plist else None
try:
ret = subprocess.check_output(cmd)
except Exception:
return {}
return plistlib.readPlistFromString(ret) if plist else str(ret)
def _diskutil_list(plist=False):
cmd = ['diskutil', 'list']
cmd.append('-plist') if plist else None
try:
ret = subprocess.check_output(cmd)
except Exception:
return {}
return plistlib.readPlistFromString(ret) if plist else str(ret)
def is_fv_sparse():
try:
lvg_uuid = _diskutil_cs_info(plist=True).get(kMemberOfCoreStorageLogicalVolumeGroup, None)
lvg = _diskutil_cs_info(plist=True, target=lvg_uuid)
ret = lvg.get(kCoreStorageLogicalVolumeSparse, None)
return bool(ret)
except Exception:
return False
def is_fv_finised():
try:
state = _diskutil_cs_info(plist=True).get(kCoreStorageLogicalVolumeConversionState, '')
return bool(is_fv_turned_on() and 'Complete' in state)
except Exception:
return None
def is_fv_turned_on():
try:
lvf_uuid = _diskutil_cs_info(plist=True).get(kMemberOfCoreStorageLogicalVolumeFamily, None)
lvf = _diskutil_cs_info(plist=True, target=lvf_uuid)
ret = lvf.get(kCoreStorageLogicalVolumeFamilyEncryptionType, '')
return bool(not 'None' in ret)
except Exception:
return None
def was_fv_adopted_at_macbuddy():
try:
ret = subprocess.check_output(['syslog', '-F', 'xml', '-d', '/var/log/DiagnosticMessages', '-k', 'com.apple.message.domain', 'com.apple.macbuddy.fvadopted'])
msgs = plistlib.readPlistFromString(ret)
msg = msgs[0] if msgs else {}
return bool(msg.get('com.apple.message.domain', False) == 'com.apple.macbuddy.fvadopted')
except Exception:
return None
# Go!
if __name__ == "__main__":
try:
data = {
'filevault_is_finised': is_fv_finised(),
'filevault_is_sparse': is_fv_sparse(),
'filevault_is_turned_on': is_fv_turned_on(),
'filevault_was_adopted_at_macbuddy': was_fv_adopted_at_macbuddy(),
}
out = json.dumps(data)
except Exception:
sys.stdout.write(json.dumps({}))
sys.stdout.flush()
sys.exit(1)
else:
sys.stdout.write(out)
sys.stdout.flush()
sys.exit(0)
|
artful-addict/kh-squared-coming-soon
|
usr/libexec/feedback/appearance.py
|
<filename>usr/libexec/feedback/appearance.py<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import Foundation as NS
import json
import sys
out = ""
try:
defaults = NS.NSUserDefaults.standardUserDefaults()
globalDomain = defaults.persistentDomainForName_(NS.NSGlobalDomain)
universalAccess = defaults.persistentDomainForName_(u"com.apple.universalaccess")
output = {
"appearance.interfaceStyle" : globalDomain.get("AppleInterfaceStyle", u"Default"),
"appearance.reduceTransparency" : universalAccess.get("reduceTransparency", False),
"appearance.increaseContrast" : universalAccess.get("increaseContrast", False),
}
out = json.dumps(output)
sys.stdout.write(out)
sys.stdout.flush()
sys.exit(0)
except Exception:
sys.stdout.write(json.dumps({}))
sys.stdout.flush()
sys.exit(1)
|
artful-addict/kh-squared-coming-soon
|
usr/bin/smtpd.py
|
#!/usr/bin/python -R
import sys, os
import glob, re
partA = """\
python version %d.%d.%d can't run %s. Try the alternative(s):
"""
partB = """
Run "man python" for more information about multiple version support in
Mac OS X.
"""
sys.stderr.write(partA % (sys.version_info[:3] + (sys.argv[0],)))
dir, base = os.path.split(sys.argv[0])
specialcase = (base == 'python-config')
if specialcase:
pat = "python*-config"
else:
pat = base + '*'
g = glob.glob(os.path.join(dir, pat))
# match a single digit, dot and possibly multiple digits, because we might
# have 2to32.6, where the program is 2to3 and the version is 2.6.
vpat = re.compile("\d\.\d+")
n = 0
for i in g:
vers = vpat.search(i)
if vers is None:
continue
sys.stderr.write("%s (uses python %s)\n" % (i, i[vers.start():vers.end()]))
n = 1
if n == 0:
sys.stderr.write("(Error: no alternatives found)\n")
sys.stderr.write(partB)
sys.exit(1)
|
thexdesk/aminator
|
aminator/plugins/volume/linux.py
|
# -*- coding: utf-8 -*-
#
#
# Copyright 2013 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""
aminator.plugins.volume.linux
=============================
basic linux volume allocator
"""
import logging
from aminator.util.linux import resize2fs, fsck, growpart
from aminator.exceptions import VolumeException
from aminator.plugins.volume.base import BaseVolumePlugin
__all__ = ('LinuxVolumePlugin',)
log = logging.getLogger(__name__)
class LinuxVolumePlugin(BaseVolumePlugin):
_name = 'linux'
def _attach(self, blockdevice):
with blockdevice(self._cloud) as dev:
self._dev = dev
if blockdevice.partition is not None:
devpart = '{0}{1}'.format(dev, blockdevice.partition)
self.context.volume['dev'] = devpart
else:
self.context.volume['dev'] = self._dev
self._cloud.attach_volume(self._dev)
def _detach(self):
self._cloud.detach_volume(self._dev)
def _resize(self):
log.info('Checking and repairing root volume as necessary')
fsck_op = fsck(self.context.volume.dev)
if not fsck_op.success:
raise VolumeException(
'fsck of {} failed: {}'.format(self.context.volume.dev, fsck_op.result.std_err))
log.info('Attempting to resize root fs to fill volume')
if self._blockdevice.partition is not None:
log.info('Growing partition if necessary')
growpart_op = growpart(self._dev, self._blockdevice.partition)
if not growpart_op.success:
volmsg = 'growpart of {} partition {} failed: {}'
raise VolumeException(
volmsg.format(
self._dev, self._blockdevice.partition, growpart_op.result.std_err))
resize_op = resize2fs(self.context.volume.dev)
if not resize_op.success:
raise VolumeException(
'resize of {} failed: {}'.format(self.context.volume.dev, resize_op.result.std_err))
def _delete(self):
self._cloud.delete_volume()
def __enter__(self):
self._attach(self._blockdevice)
if self.plugin_config.get('resize_volume', False):
self._resize()
return self
def __exit__(self, exc_type, exc_value, trace):
if exc_type:
log.debug('Exception encountered in linux volume plugin context manager',
exc_info=(exc_type, exc_value, trace))
if exc_type and self._config.context.get("preserve_on_error", False):
return False
self._detach()
self._delete()
return False
|
thexdesk/aminator
|
aminator/plugins/distro/linux.py
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
#
#
# Copyright 2013 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""
aminator.plugins.distro.linux
==================================
Simple base class for cases where there are small distro-specific corner cases
"""
import abc
import logging
import os.path
from aminator.exceptions import VolumeException
from aminator.plugins.distro.base import BaseDistroPlugin
from aminator.util import retry
from aminator.util.linux import (
lifo_mounts, mount, mounted, MountSpec, unmount, busy_mount)
from aminator.util.linux import install_provision_configs, remove_provision_configs
from aminator.util.linux import short_circuit_files, rewire_files
from aminator.util.metrics import fails, timer, raises
__all__ = ('BaseLinuxDistroPlugin',)
log = logging.getLogger(__name__)
class BaseLinuxDistroPlugin(BaseDistroPlugin):
"""
Most of what goes on between apt and yum provisioning is the same, so we factored that out,
leaving the differences in the actual implementations
"""
__metaclass__ = abc.ABCMeta
def _activate_provisioning_service_block(self):
"""
Enable service startup so that things work when the AMI starts
For RHEL-like systems, we undo the short_circuit
"""
config = self._config.plugins[self.full_name]
files = config.get('short_circuit_files', [])
if files:
if not rewire_files(self.root_mountspec.mountpoint, files):
log.warning("Unable to rewire some files")
return True
else:
log.debug('Files rewired successfully')
return True
else:
log.debug('No short circuit files configured, no rewiring done')
return True
def _deactivate_provisioning_service_block(self):
"""
Prevent packages installing the chroot from starting
For RHEL-like systems, we can use short_circuit which replaces the service call with /bin/true
"""
config = self._config.plugins[self.full_name]
files = config.get('short_circuit_files', [])
if files:
if not short_circuit_files(self.root_mountspec.mountpoint, files):
log.warning('Unable to short circuit some files')
return True
else:
log.debug('Files short-circuited successfully')
return True
else:
log.debug('No short circuit files configured')
return True
@fails("aminator.distro.linux.mount.error")
def _mount(self, mountspec):
if not mounted(mountspec):
result = mount(mountspec)
if not result.success:
msg = 'Unable to mount {0.dev} at {0.mountpoint}: {1}'.format(mountspec, result.result.std_err)
log.critical(msg)
return False
log.debug('Device {0.dev} mounted at {0.mountpoint}'.format(mountspec))
return True
@raises("aminator.distro.linux.umount.error")
@retry(VolumeException, tries=10, delay=1, backoff=1, logger=log, maxdelay=1)
def _unmount(self, mountspec):
recursive_unmount = self.plugin_config.get('recursive_unmount', False)
if mounted(mountspec):
result = unmount(mountspec, recursive=recursive_unmount)
if not result.success:
err = 'Failed to unmount {0}: {1}'
err = err.format(mountspec.mountpoint, result.result.std_err)
open_files = busy_mount(mountspec.mountpoint)
if open_files.success:
err = '{0}. Device has open files:\n{1}'.format(err, open_files.result.std_out)
raise VolumeException(err)
log.debug('Unmounted {0.mountpoint}'.format(mountspec))
@fails("aminator.distro.linux.configure_chroot.error")
@timer("aminator.distro.linux.configure_chroot.duration")
def _configure_chroot(self):
config = self.plugin_config
log.debug('Configuring chroot at {0.mountpoint}'.format(self.root_mountspec))
if not self._configure_chroot_mounts():
log.critical('Configuration of chroot mounts failed')
return False
if config.get('provision_configs', True):
if not self._install_provision_configs():
log.critical('Installation of provisioning config failed')
return False
log.debug("starting short_circuit ")
# TODO: kvick we should rename 'short_circuit' to something like 'disable_service_start'
if config.get('short_circuit', False):
if not self._deactivate_provisioning_service_block():
log.critical('Failure short-circuiting files')
return False
log.debug("finished short_circuit")
log.debug('Chroot environment ready')
return True
def _configure_chroot_mounts(self):
log.debug('Attempting to mount root volume: {0}'.format(self.root_mountspec))
if not self._mount(self.root_mountspec):
log.critical('Failed to mount root volume')
return False
if self.plugin_config.get('configure_mounts', True):
for mountdef in self.plugin_config.chroot_mounts:
dev, fstype, mountpoint, options = mountdef
mountpoint = mountpoint.lstrip('/')
mountpoint = os.path.join(self.root_mountspec.mountpoint, mountpoint)
mountspec = MountSpec(dev, fstype, mountpoint, options)
log.debug('Attempting to mount {0}'.format(mountspec))
if not self._mount(mountspec):
log.critical('Mount failure, unable to configure chroot')
return False
log.debug('Mounts configured')
return True
def _install_provision_configs(self):
config = self.plugin_config
files = config.get('provision_config_files', [])
if files:
if not install_provision_configs(files, self.root_mountspec.mountpoint):
log.critical('Error installing provisioning configs')
return False
else:
log.debug('Provision config files successfully installed')
return True
else:
log.debug('No provision config files configured')
return True
def _unmount_root(self):
try:
self._unmount(self.root_mountspec)
except VolumeException as ve:
return False
else:
return True
@fails("aminator.distro.linux.teardown_chroot.error")
@timer("aminator.distro.linux.teardown_chroot.duration")
def _teardown_chroot(self):
log.debug('Tearing down chroot at {0.mountpoint}'.format(self.root_mountspec))
# TODO: kvick we should rename 'short_circuit' to something like 'disable_service_start'
if self.plugin_config.get('short_circuit', True):
if not self._activate_provisioning_service_block():
log.critical('Failure during re-enabling service startup')
return False
if self.plugin_config.get('provision_configs', True):
if not self._remove_provision_configs():
log.critical('Removal of provisioning config failed')
return False
if not self._teardown_chroot_mounts():
log.critical('Teardown of chroot mounts failed')
return False
log.debug('Chroot environment cleaned')
return True
def _teardown_chroot_mounts(self):
if not self.plugin_config.get('recursive_unmount', False):
if self.plugin_config.get('configure_mounts', True):
for mountdef in reversed(self.plugin_config.chroot_mounts):
dev, fstype, mountpoint, options = mountdef
mountpoint = mountpoint.lstrip('/')
mountpoint = os.path.join(self.root_mountspec.mountpoint, mountpoint)
mountspec = MountSpec(dev, fstype, mountpoint, options)
log.debug('Attempting to unmount {0.mountpoint}'.format(mountspec))
try:
self._unmount(mountspec)
except VolumeException as ve:
log.critical('Unable to unmount {0.mountpoint}'.format(mountspec))
return False
log.debug('Checking for stray mounts')
for mountpoint in lifo_mounts(self.root_mountspec.mountpoint):
log.debug('Stray mount found: {0}, attempting to unmount'.format(mountpoint))
try:
self._unmount(mountpoint)
except VolumeException as ve:
log.critical('Unable to unmount {0}'.format(mountpoint))
return False
if not self._unmount_root():
err = 'Unable to unmount root volume at {0.mountpoint)'
err = err.format(self.root_mountspec)
log.critical(err)
return False
log.debug('Teardown of chroot mounts succeeded!')
return True
def _remove_provision_configs(self):
config = self.plugin_config
files = config.get('provision_config_files', [])
if files:
if not remove_provision_configs(files, self.root_mountspec.mountpoint):
log.critical('Error removing provisioning configs')
return False
else:
log.debug('Provision config files successfully removed')
return True
else:
log.debug('No provision config files configured')
return True
@property
def root_mountspec(self):
return self._root_mountspec
def __enter__(self):
if self._config.volume_dir.startswith(('~', '/')):
root_base = os.path.expanduser(self._config.volume_dir)
else:
root_base = os.path.join(self._config.aminator_root, self._config.volume_dir)
root_mountpoint = os.path.join(root_base, os.path.basename(self.context.volume.dev))
self._root_mountspec = MountSpec(self.context.volume.dev, None, root_mountpoint, None)
try:
chroot_setup = self._configure_chroot()
except Exception as e:
chroot_setup = False
log.critical('Error encountered during chroot setup. Attempting to clean up volumes.')
self._teardown_chroot_mounts()
if not chroot_setup:
raise VolumeException('Error configuring chroot')
return self
def __exit__(self, exc_type, exc_value, trace):
if exc_type:
log.debug('Exception encountered in Linux distro plugin context manager',
exc_info=(exc_type, exc_value, trace))
if exc_type and self._config.context.get("preserve_on_error", False):
return False
if not self._teardown_chroot():
raise VolumeException('Error tearing down chroot')
return False
|
ilomedia/snips-skill-clock
|
action-heure.py
|
<reponame>ilomedia/snips-skill-clock<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import configparser
from hermes_python.hermes import Hermes
from hermes_python.ontology import *
from datetime import datetime
from pytz import timezone
import io
CONFIGURATION_ENCODING_FORMAT = "utf-8"
CONFIG_INI = "config.ini"
class SnipsConfigParser(configparser.ConfigParser):
def to_dict(self):
return {section : {option_name : option for option_name, option in self.items(section)} for section in self.sections()}
def read_configuration_file(configuration_file):
try:
with io.open(configuration_file, encoding=CONFIGURATION_ENCODING_FORMAT) as f:
conf_parser = SnipsConfigParser()
conf_parser.read_file(f)
return conf_parser.to_dict()
except (IOError, configparser.Error) as e:
return dict()
def subscribe_intent_callback(hermes, intentMessage):
conf = read_configuration_file(CONFIG_INI)
if intentMessage.asr_confidence < float(conf['global']['confidence_threshold']):
hermes.publish_end_session(intentMessage.session_id)
else:
action_wrapper(hermes, intentMessage, conf)
def verbalise_hour(h, m):
if m in [40, 45, 50, 55]:
h += 1
if h == 0:
return "minuit"
elif h == 1:
return "une heure"
elif h == 12:
return "midi"
elif h == 21:
return "vingt et une heures"
else:
return "{0} heures".format(str(i))
def verbalise_minute(i):
if i == 0:
return ""
elif i == 1:
return "une"
elif i == 21:
return "vingt et une"
elif i == 31:
return "trente et une"
elif i == 41:
return "quarante et une"
elif i == 51:
return "cinquante et une"
elif i == 15:
return "et quart"
elif i == 30:
return "et demi"
elif i == 40:
return "moins vingt"
elif i == 45:
return "moins le quart"
elif i == 50:
return "moins dix"
elif i == 50:
return "moins cinq"
else:
return "{0}".format(str(i))
def action_wrapper(hermes, intentMessage, conf):
sentence = 'Il est '
print(intentMessage.intent.intent_name)
now = datetime.now(timezone('Europe/Paris'))
minute = verbalise_minute(now.minute)
if now.hour > 12:
heure = verbalise_hour(now.hour - 12, now.minute) + " " + minute + ", de laprès midi"
else:
heure = verbalise_hour(now.hour, now.minute) + " " + minute
sentence += heure
print(sentence)
hermes.publish_end_session(intentMessage.session_id, sentence)
if __name__ == "__main__":
mqtt_opts = MqttOptions()
with Hermes(mqtt_options=mqtt_opts) as h:
h.subscribe_intent("duch:askTime", subscribe_intent_callback).start()
|
FeliciaLeaton/Project3a
|
flask_wtforms_tutorial/charts.py
|
<reponame>FeliciaLeaton/Project3a
'''
This web service extends the Alphavantage api by creating a visualization module,
converting json query results retuned from the api into charts and other graphics.
This is where you should add your code to function query the api
'''
import requests
from datetime import datetime
from datetime import date
import pygal
#Helper function for converting date
def convert_date(str_date):
return datetime.strptime(str_date, '%Y-%m-%d').date()
api_key = "<KEY>"
#api docs https://www.alphavantage.co/documentation/
def getData(time_series, symbol, api_key):
#Daily Time Series API Func
#https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=IBM&apikey=demo
#TIME_SERIES = TIME_SERIES_DAILY
#symbol = symbol
#apikey = api_key
#Intraday Time Series API Func
#https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol=IBM&interval=5min&apikey=demo
#time_series = TIME_SERIES_INTRADAY
#symbol = symbol
#apikey = api_key
#interval = interaval? api required interval
#Weekly Time Series API Func
#https://www.alphavantage.co/query?function=TIME_SERIES_WEEKLY&symbol=IBM&apikey=demo
#time_series = TIME_SERIES_WEEKLY
#symbol = symbol
#apikey = api_key
#Monthly Time Series API Func
#https://www.alphavantage.co/query?function=TIME_SERIES_MONTHLY&symbol=IBM&apikey=demo
#time_series = TIME_SERIES_MONTHLY
#symbol = symbol
#apikey = api_key
#create string
apistring = "https://www.alphavantage.co/query?function="
if (time_series == 1):
time_series = "TIME_SERIES_INTRADAY"
elif (time_series == 2):
time_series = "TIME_SERIES_DAILY"
elif (time_series == 3):
time_series = "TIME_SERIES_WEEKLY"
elif (time_series == 4):
time_series = "TIME_SERIES_MONTHLY"
apistring = (apistring + (time_series + "&symbol=" + symbol))
if (time_series == "TIME_SERIES_INTRADAY"):
apistring += "&interval=30min"
apistring = apistring + ("&apikey=" + api_key)
data = requests.get(apistring).json()
return data
#main
do_program = True
while (do_program):
print("Stock Data Visualizer\n======================")
symbol = input("\nEnter the stock symbol are looking for: ")
#check and see if symbol exist > error handling
#probably just hit the api with a constant time_series value and input the requested symbol
#see if we get a good respone
checksym = "https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=" + symbol + "&apikey=" + api_key
print(checksym)
checksym = requests.get(checksym)
while checksym.status_code != 200:
print("Unknown Stock.\n")
symbol = input("\nEnter the stock symbol are looking for: ")
checksym = "https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=" + symbol + "&apikey=" + api_key
print("\nChart Types\n==================\n1. Bar\n2. Line")
chart_type = input("Enter the chart type you want (1 , 2):")
#make sure it's 1 or 2, while loop > error handling
#error handling chart type
if (chart_type != '1' or chart_type != '2'):
print("\nPlease try again! Pick number 1 or 2")
print("\nSelect the Time Series of chart you want to Generate\n=================================================================")
print("\n1. Intraday\n2. Daily\n3. Weekly\n4. Monthly")
time_series = int(input("Enter the time series option (1, 2, 3, 4): "))
# error check option picked > error handling
while time_series not in (1,2,3,4):
print("Enter a 1, 2, 3, or 4 for the time series option")
time_series = int(input("Enter the time series option (1, 2, 3, 4): "))
if (time_series == 1):
time = "Time Series (30min)"
elif (time_series == 2):
time = "Time Series (Daily)"
elif (time_series == 3):
time = "Weekly Time Series"
elif (time_series == 4):
time = "Monthly Time Series"
# error check option picked > error handling
valid = False
while not valid:
start_date = input("Enter the start Date (YYYY-MM-DD): ")
try:
timemod.strptime(start_date, "%Y-%m-%d")
valid = True
break
except ValueError:
print("Enter a valid start date")
valid = False
# end_date = input("Enter the end Date (YYYY-MM-DD):")
# error hadling > check valid date in YYYY-MM-DD and that it is after the start date
valid = False
while not valid:
end_date = input("Enter the end Date (YYYY-MM-DD): ")
try:
timemod.strptime(end_date, "%Y-%m-%d")
if timemod.strptime(end_date, "%Y-%m-%d") > timemod.strptime(start_date, "%Y-%m-%d"):
valid = True
else:
print("End date must be later than start date.")
valid = False
except ValueError:
print("Enter a valid end date")
valid = False
apidata = getData(time_series,symbol,api_key)
# variables for data transfer to lists.
x = 0
newdata = {}
datedata = []
opendata = []
highdata =[]
lowdata = []
closeddata = []
# for loop to transfer data from apidata dictionary to newdata dictionary and then transfer
# individual data to different lists.
# for loop to transfer data from apidata dictionary to newdata dictionary and then transfer
# individual data to different lists.
plt.bar(start_data)
plt.bar(end_data)
plt.show()
for key, value in apidata[time].items():
datedata = list(apidata[time].keys())
x+=1
holder = {x : value}
newdata.update(holder)
opendata.append(newdata[x]['1. open'])
highdata.append(newdata[x]['2. high'])
lowdata.append(newdata[x]['3. low'])
closeddata.append(newdata[x]['4. close'])
# method to convert data in list to float for chart data.
def convert(data):
for i in range(0, len(data)):
data[i] = float(data[i])
return data
# if statement to choose a line or bar chart and display onto default browser.
if (chart_type == '1'):
bar_chart = pygal.Bar(x_label_rotation = 70)
bar_chart.title = ('Stock Data for ' + symbol + ": " + start_date + ' to ' + end_date)
datedata.reverse()
bar_chart.x_labels = datedata
opendata.reverse()
bar_chart.add('Open', convert(opendata))
highdata.reverse()
bar_chart.add('High', convert(highdata))
lowdata.reverse()
bar_chart.add('Low', convert(lowdata))
closeddata.reverse()
bar_chart.add('Close', convert(closeddata))
bar_chart.render_in_browser()
pass
if (chart_type == '2'):
line_chart = pygal.Line(x_label_rotation = 70)
line_chart.title = ('Stock Data for ' + symbol + ": " + start_date + ' to ' + end_date)
datedata.reverse()
line_chart.x_labels = datedata
opendata.reverse()
line_chart.add('Open', convert(opendata))
highdata.reverse()
line_chart.add('High', convert(highdata))
lowdata.reverse()
line_chart.add('Low', convert(lowdata))
closeddata.reverse()
line_chart.add('Close', convert(closeddata))
line_chart.render_in_browser()
pass
|
SkullTech/portfolio-devclub
|
portfolio/projects/urls.py
|
<filename>portfolio/projects/urls.py
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.projects, name='projects'),
]
|
SkullTech/portfolio-devclub
|
portfolio/blog/models.py
|
from django.db import models
from django.utils import timezone
class Tag(models.Model):
name = models.CharField(max_length=50)
description = models.CharField(max_length=200, null=True, blank=True, default='')
def __str__(self):
return self.name
class Post(models.Model):
author = models.ForeignKey('auth.user')
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
tags = models.ManyToManyField(Tag)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
|
SkullTech/portfolio-devclub
|
portfolio/contact/admin.py
|
from django.contrib import admin
from .models import ContactMessage
admin.site.register(ContactMessage)
|
SkullTech/portfolio-devclub
|
portfolio/projects/views.py
|
from django.shortcuts import render
from .github import repositories
def projects(request):
repos = repositories()
return render(request, 'projects/projects.html', {'repos': repos})
|
SkullTech/portfolio-devclub
|
portfolio/contact/forms.py
|
from django import forms
from django.urls import reverse
from .models import ContactMessage
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
class ContactForm(forms.ModelForm):
class Meta:
model = ContactMessage
exclude = ['submitted_date']
def __init__(self, *args, **kwargs):
super(ContactForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_id = 'contactForm'
self.helper.form_method = 'post'
self.helper.form_action = reverse('contact')
|
SkullTech/portfolio-devclub
|
portfolio/contact/views.py
|
<reponame>SkullTech/portfolio-devclub
from django.shortcuts import render
from django.utils import timezone
from .forms import ContactForm
from .models import ContactMessage
def contact(request):
messages = ContactMessage.objects.filter(submitted_date__lte=timezone.now()).order_by('submitted_date')
form = ContactForm(request.POST or None)
if form.is_valid():
message = form.save(commit=False)
message.send()
return render(request, 'contact/contact.html', {'form': form, 'messages': messages})
|
SkullTech/portfolio-devclub
|
portfolio/contact/migrations/0001_initial.py
|
<reponame>SkullTech/portfolio-devclub<filename>portfolio/contact/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-03 15:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ContactMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('message', models.TextField()),
('name', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254)),
('submitted_date', models.DateTimeField()),
],
),
]
|
SkullTech/portfolio-devclub
|
portfolio/contact/models.py
|
<reponame>SkullTech/portfolio-devclub
from django.db import models
from django.utils import timezone
class ContactMessage(models.Model):
title = models.CharField(max_length=100)
message = models.TextField()
name = models.CharField(max_length=50)
email = models.EmailField()
submitted_date = models.DateTimeField()
def __str__(self):
return self.title
def send(self):
self.submitted_date = timezone.now()
self.save()
|
SkullTech/portfolio-devclub
|
portfolio/projects/github.py
|
import os
from github3 import login
def repositories():
github_username = os.environ.get('GITHUB_USERNAME', None)
github_password = os.environ.get('GITHUB_PASSWORD', None)
github = login(username=github_username, password=github_password)
repos = github.iter_user_repos(github_username)
return repos
|
RaymondKirk/cocoapi
|
PythonAPI/setup.py
|
<reponame>RaymondKirk/cocoapi
from setuptools import dist, setup, Extension
install_requires = [
'setuptools>=18.0',
'cython>=0.27.3',
'matplotlib>=2.1.0',
'numpy>=1.16.2,<=1.19'
]
dist.Distribution().fetch_build_eggs(install_requires)
import numpy as np
# To compile and install locally run "python setup.py build_ext --inplace"
# To install library to Python site-packages run "python setup.py build_ext install"
ext_modules = [
Extension(
'rays_pycocotools._mask',
sources=['../common/maskApi.c', 'rays_pycocotools/_mask.pyx'],
include_dirs = [np.get_include(), '../common'],
extra_compile_args=['-Wno-cpp', '-Wno-unused-function', '-std=c99'],
)
]
setup(
name='rays_pycocotools',
packages=['rays_pycocotools'],
package_dir={'rays_pycocotools': 'rays_pycocotools'},
description="Wrapper of pycocotools that correctly installs with pip.",
long_description=open("README.md").read(),
version='2.6.1',
ext_modules=ext_modules,
python_requires='>=3.6',
)
|
ccha23/CS1302ICP
|
_build/jupyter_execute/Lecture8/Dictionaries and Sets.py
|
<filename>_build/jupyter_execute/Lecture8/Dictionaries and Sets.py
#!/usr/bin/env python
# coding: utf-8
# # Dictionaries and Sets
# **CS1302 Introduction to Computer Programming**
# ___
# In[1]:
get_ipython().run_line_magic('reload_ext', 'mytutor')
# ## Motivation for associative container
# The following code simulates the outcomes from rolling a dice multiple times.
# In[2]:
import random
dice_rolls = [random.randint(1,6) for i in range(10)]
print(*dice_rolls)
# **What is the distribution, i.e., fractional counts?**
# In[3]:
distribution = [dice_rolls.count(i) / len(dice_rolls) for i in range(7)]
import matplotlib.pyplot as plt
plt.stem(range(7), distribution, use_line_collection=True)
plt.xlabel('Outcomes')
plt.title('Distribution')
plt.ylim(0, 1)
# In the above code, `distribution[i]` stores the fractional count of outcome `i`.
# However, `distribution[0]` is `0` because a dice does not have outcome `0`. Can we avoid such redundancy?
# In[4]:
distinct_outcomes = [
outcome for outcome in range(1, 7) if dice_rolls.count(outcome) > 0
]
distribution = [
dice_rolls.count(distinct_outcomes[i]) / len(dice_rolls)
for i in range(len(distinct_outcomes))
]
import matplotlib.pyplot as plt
plt.stem(distinct_outcomes, distribution, use_line_collection=True)
plt.xlabel('Outcomes')
plt.title('Distribution')
plt.ylim(0, 1)
# In the above code,
# - `distinct_outcomes` stores the list of distinct outcomes, and
# - `distribution[distinct_outcomes[i]]` stores the fractional count of the `i`-th distinct outcome.
# What about finding the distribution of characters in an article?
# There are 1,112,064 unicode characters.
# - How obtain the distribution efficiently without creating an entry for each unicode character?
# - How to compute the set of distinct characters efficiently without iterating over the set of all unicode characters?
# - Can we index `distribution` directly by the set of distinct characters?
# What we need is a composite data type that
# - can keep a set of *unique keys of different types* (such as the characters in our example), and
# - associate to different keys possibly different *values of any types* such as (the fractional counts of the characters).
#
# Such data structure is called an [associative container](https://en.wikipedia.org/wiki/Associative_containers).
# **How to use associative containers in Python?**
# There are two built-in classes for associative containers:
# - `set` can store a set of unique keys of possibly different types.
# - `dict`ionary can store a set of key-value pairs.
# We have already used sets and dictionaries before.
# In[5]:
get_ipython().run_cell_magic('mytutor', '-h 400', 'a = (lambda **kwargs: kwargs)(start=0, stop=5, step=1)\nb = set([1,1,2,3,3,3])\nassert len(a) == len(b)')
# Both `set` and `dict`
# - implement `len` method that returns the number of keys, and
# - are mutable, so we can mutate their keys and values.
# ## Constructing associative containers
# **How to create set/dictionary?**
# Similar to tuple/list, we can use enclosure, constructors, and comprehension.
# **How to create a set/dict by enumerating its keys/values?**
# For `dict`, enclose a comma-separated sequence of `key : value` pairs by braces `{` and `}`.
# In[6]:
get_ipython().run_cell_magic('mytutor', '-h 350', "empty_dictionary = {}\na = {'a': 0, 'b': 1}\nb = {**a, 'c': 0, 'd': 1}")
# For `set`, omit `: value`.
# In[7]:
get_ipython().run_cell_magic('mytutor', '-h 300', "a = {(1, 2.0), print, *range(2), *'23'}\nempty_set = {*()} # Why not use {}?")
# We can also create a set/dictionary from other objects using their constructors `set`/`dict`.
# In[8]:
get_ipython().run_cell_magic('mytutor', '-h 550', "empty_set = set()\nstring2set = set('abc')\nrange2set = set(range(2))\nlist2set = set(['abc',range(2)])\nset2set = set(list2set)")
# In[9]:
get_ipython().run_cell_magic('mytutor', '-h 650', "empty_dict = dict()\nenumerate2dict = dict(enumerate('abc'))\nzip2dict = dict(zip('abc','123'))\nkwargs2dict = dict(one=1,two=2)\ndict2dict = dict(kwargs2dict)")
# **Exercise** `dict` also has a [*class method* `fromkeys`](https://docs.python.org/3/library/stdtypes.html#dict.fromkeys) to construct a dictionary with keys from iterable pointing to a default value. Create a dictionary using `fromkeys` with keys being the non-negative integers smaller than `100` and values being `0`.
#
# *Hint:* Use `dict.fromkeys` since a class method is bound to the class rather than an object of the class.
# In[10]:
get_ipython().run_line_magic('pinfo', 'dict.fromkeys')
### BEGIN SOLUTION
fromkeys_dict = dict.fromkeys(range(100),0)
### END SOLUTION
# test
assert all(fromkeys_dict[k] == 0 for k in fromkeys_dict)
# **How to use a rule to construct a set/dictionary?**
# The following function uses a one-line dictionary comprehension to return the distribution of items in a sequence:
# In[11]:
def distribute(seq):
return {k : seq.count(k)/len(seq) for k in set(seq)}
# In[12]:
import matplotlib.pyplot as plt
def plot_distribution(seq):
dist = distribute(seq)
plt.stem(dist.keys(), # set-like view of the keys
dist.values(), # view of the values
use_line_collection=True)
plt.xlabel('Items')
plt.title('Distribution')
plt.ylim(0, 1)
plot_distribution('What is the distribution of different characters?')
# - The object methods `keys` and `values` provide a dynamic [view](https://docs.python.org/3/glossary.html#term-dictionary-view) of the keys.
# - Unlike a copy, subsequent changes to the dictionary are also reflected in a previously returned view.
# - `items` provides a set-like view of the key-value pairs.
# In[13]:
get_ipython().run_cell_magic('mytutor', '-h 500', "a = dict(enumerate('abc'))\nviews = a.keys(), a.values(), a.items()\na.pop(1) # remove the key 1 and its associated value\na.popitem() # remove and return a key-value pair\na.clear() # clear the dictionary")
# `set` has `pop` and `clear` but not `popitem`. However, `set.pop` behaves like `dict.popitem` instead of `dict.pop`. (Why?)
# In[14]:
get_ipython().run_cell_magic('mytutor', '-h 250', "a = set('abc')\na.pop() # remove and return an element\na.clear() # clear the set")
# **Exercise** Use one-line comprehension to return a set of composite numbers smaller than `stop`.
#
# *Hint:* You do not need to worry about duplicate elements for `set`.
# In[15]:
def composite_set(stop):
### BEGIN SOLUTION
return {x for factor in range(2,stop) for x in range(factor*2,stop,factor)}
### END SOLUTION
print(*sorted(composite_set(100)))
# ## Hashability
# For `set` and `dict`,
# - identical keys are merged to the same entry even though
# - values associated with different keys can be the same.
# In[16]:
get_ipython().run_cell_magic('mytutor', '-h 350', "a = {0: 'a', 0.0: 'b', 2: 'b'}\nb = {0j, 0, 0.0, '', False}\nassert 0 == 0.0 == 0j == False != ''")
# This is implemented efficiently by [*hashing*](https://docs.python.org/3/glossary.html#term-hashable). A key must be a hashable object which:
#
# - has a hash value (returned by `__hash__` method) that never changes during its lifetime, and
# - can be compared (using `__eq__` method) to other objects.
# *Hashable objects which compare equal must have the same hash value.*
# In[17]:
import collections
for i in 0, 0.0, 0j, '', False, (), [], {}, set(), frozenset():
if isinstance(i, collections.abc.Hashable):
print('{} is hashable. E.g., hash({!r}) == {}'.format(type(i),i,hash(i)))
else:
print('{} is NOT hashable.'.format(type(i)))
# **Why the key should be hashable?**
# **What is the use of a hash value?**
# Associative containers are implemented as *hash tables* for efficient lookup of key values.
# In[18]:
get_ipython().run_cell_magic('html', '', '<iframe width="912" height="513" src="https://www.youtube.com/embed/LPzN8jgbnvA" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>')
# Most mutable objects are not hashable. Why?
# Mutating a key makes it a different key, which is [hard to track](https://hynek.me/articles/hashes-and-equality/).
# `set` has an immutable counterpart called `frozenset`, but `dict` does not have any immutable counterpart. Why?
# While elements of a set must be hashable and therefore mostly immutable, dictionary values may be of mutable types.
# Python also uses dictionary for its global/local frames.
# Indeed, [hash collisions can slow down the lookup process](https://stackoverflow.com/questions/8271139/why-is-early-return-slower-than-else).
# **Exercise** Why equal objects must have the same hash but different objects may have the same hash? An example is given below:
# In[19]:
assert hash(0) == hash(0.0) == hash(0j) == hash(False) == hash('') and False != ''
# 1. To avoid duplicate keys occupying different entries in a hash table.
# 2. Hash collision can be detected by `==` and handled by [collision resolution](https://en.wikipedia.org/wiki/Hash_table#Collision_resolution) techniques. To keep the hash table small, hash collision is unavoidable.
# ## Accessing keys/values
# **How to traverse a set/dictionary?**
# Set and dictionaries are iterable.
# The for loop iterates over the keys.
# In[20]:
a = set('abcde')
b = dict(enumerate('abcde'))
print(*(element for element in a))
print(*((key,b[key]) for key in b))
a[0] # TypeError
# - For the dictionary `b`, we used subscription `b[key]` to access the value associated with `key`.
# - Unlike dictionary, set does not implement [`__getitem__`](https://docs.python.org/3/reference/datamodel.html#object.__getitem__) and is therefore not subscriptable.
# Unlike tuple/list, `b[-1]` does not refer to the value of the last entry. (Dictionary is not ordered.)
# In[21]:
b[-1] # KeyError
# The above raises a key error because `-1` is not a key in the dictionary `b`.
# Dictionary implements the [`__setitem__`](https://docs.python.org/3/reference/simple_stmts.html#assignment-statements) method so we can enter a key value pair to a dictionary using the assignment operator.
# In[22]:
b[-1] = 'f'
b[-1]
# To delete a key, we can use the function `del`.
# In[23]:
del b[-1]
b[-1]
# To avoid key error, we can check if a key is in a dictionary efficiently (due to hashing) using the `in` operator.
# The following is a different implementation of `distribute`.
# In[24]:
def distribute(seq):
dist = {}
for i in seq:
dist[i] = (dist[i] if i in dist else 0) + 1/len(seq)
return dist
plot_distribution('What is the distribution of different characters?')
# **Exercise** Unlike the previous implementation using one-line dictionary comprehension, the above alternative implementation uses multiple lines of code to build the dictionary incrementally starting from an empty dictionary.
# ```Python
# def distribute(seq):
# return {k : seq.count(k)/len(seq) for k in set(seq)}
# ```
# Explain whether the alternative is more efficient.
# It is more efficient because
# - the alternative implementation traverses `seq` once with near constant time lookup of the key, but
# - the list comprehension can traverse `seq` a multiple times linear in `len(seq)`, since every call to `seq.count` has to traverse `seq` once.
#
# Shorter code needs not be more efficient.
# **Exercise** `dict` also has a getter method `get` that conveniently returns a default value if the key does not exist. Rewrite the alternative implementation of `distribute` to use `get` instead of `in`.
# In[25]:
get_ipython().run_line_magic('pinfo', 'dict.get')
def distribute(seq):
dist = {}
for i in seq:
### BEGIN SOLUTION
dist[i] = dist.get(i,0) + 1/len(seq)
### END SOLUTION
return dist
plot_distribution('What is the distribution of different characters?')
# **How to traverse in ascending order of the keys?**
# We can apply the function `sorted` to a set/dictionary to return a sorted list of the keys.
# In[26]:
get_ipython().run_cell_magic('mytutor', '-h 600', "a = set(reversed('abcde'))\nb = dict(reversed([*enumerate('abcde')]))\nsorted_elements = sorted(a)\nsorted_keys = sorted(b)")
# **Exercise** Re-implement `plot_distribution` to plot the distribution in ascending order of the keys.
# In[27]:
def plot_distribution(seq):
dist = distribute(seq)
# pyplot.stem(dist.keys(), dist.values(), use_line_collection=True)
### BEGIN SOLUTION
dist_list = sorted(dist.items(), key = lambda p: p[0])
pyplot.stem([p[0] for p in dist_list], [p[1] for p in dist_list], use_line_collection=True)
### END SOLUTION
pyplot.xlabel('Items')
pyplot.title('Distribution')
pyplot.ylim(0, 1)
plot_distribution('What is the distribution of different characters?')
# **How to add an element to a set and remove an element from it?**
# Instead of subscription, `set` has the `add`/`discard`/`remove` methods for adding/removing elements.
# In[28]:
get_ipython().run_cell_magic('mytutor', '-h 400', "a = set('abc')\na.add('d') \na.discard('a') \na.remove('b')\na.clear()\na.discard('a') # no error\na.remove('b') # KeyError")
# ## Other operators and methods
# Unlike `str`/`tuple`/`list`, `set` and `dict` do not implement addition `+` and multiplication `*`:
# In[29]:
any(hasattr(container, attr) for attr in ('__add__', '__mult__')
for container in (dict, set, frozenset))
# **Exercise** Use the unpacking operators `*` and `**` to concatenate two sets/dictionaries below into a new set/dictionary.
# In[30]:
set1 = set('abc')
set2 = set('cde')
### BEGIN SOLUTION
concatenated_set = {*set1,*set2}
### END SOLUTION
concatenated_set
# In[31]:
dict1 = dict(enumerate('abc'))
dict2 = dict(enumerate('def',start=2))
### BEGIN SOLUTION
concatenated_dict = {**dict1,**dict2}
### END SOLUTION
concatenated_dict
# `set` overloads many other operators:
# In[32]:
get_ipython().run_cell_magic('mytutor', '-h 550', 'a, b = {1,2}, {2,3}\n\nunion = a | b\nassert all(i in union for i in a) and all(i in union for i in b)\n\nintersection = a & b\nassert all(i in a and i in b for i in intersection)\n\nassert intersection <= a <= union # subset\nassert union > b > intersection # proper superset\nassert len(a) + len(b) == len(intersection) + len(union) \n\nsymmetric_difference = a ^ b\nassert all((i in a or i in b) and not (i in a and i in b) \n for i in symmetric_difference)\nassert symmetric_difference == union - intersection\nassert set.isdisjoint(intersection, symmetric_difference)\nassert len(union) == len(intersection) + len(symmetric_difference)')
# The following uses `&` and `-` to compare the sets of public attributes for `set` and `dict`:
# In[33]:
set_attributes = {attr for attr in dir(set) if attr[0] != '_'}
dict_attributes = {attr for attr in dir(dict) if attr[0] != '_'}
print('Common attributes:',', '.join(set_attributes & dict_attributes))
print('dict-specific attributes:',', '.join(dict_attributes - set_attributes))
print('set-specific attributes:',', '.join(set_attributes - dict_attributes))
# For `set`, the intersection operation `&` can also be performed by
# - the class method `intersection` which returns the intersection of its arguments, and
# - the object method `intersection_update` which mutates a set object by intersecting the set with the arguments.
# In[34]:
get_ipython().run_cell_magic('mytutor', '-h 300', 'a = {0,1,2}\nb = {1,2,3}\nc = set.intersection(a,b,{2,3,4})\na.intersection_update(b,c)')
# - All other set-specific methods have an associated operator except `isdisjoint` as shown below.
# - The object method for `union` is `update` not `union_update`.
# | class method | object method | operator |
# | ---------------------- | ----------------------------- | ------------ |
# | `union` | `update` | `\| ` |
# | `intersection` | `intersection_update` | `&` |
# | `symmetric_difference` | `symmetric_difference_update` | `^` |
# | `issubset` | | `<=` |
# | `issuperset` | | `>=` |
# | `isdisjoint` | | |
# `dict` also has an `update` method that can update a dictionary using dictionary, iterables and keyword arguments:
# In[35]:
get_ipython().run_cell_magic('mytutor', '-h 300', "a = {}\na.update(enumerate('a'),b=2)\nb = a.copy()\na.update(b,c=3)")
# **Exercise** For `dict`, there is also a method called [`setdefault`](https://stackoverflow.com/questions/3483520/use-cases-for-the-setdefault-dict-method). Use it to define a function `group_by_type` that
# - takes a sequence `seq` of objects and
# - returns a dictionary `d` such that `d[repr(t)]` returns the list of objects in `seq` of type `t`
#
# If there is no objects of type `t`, raise a key error.
# In[36]:
def group_by_type(seq):
group = {}
for i in seq:
### BEGIN SOLUTION
group.setdefault(repr(type(i)),[]).append(i)
### END SOLUTION
return group
group_by_type([*range(3),
*'abc',
*[i/2 for i in range(3)],
*[(i,) for i in range(3)],
*[[i] for i in range(3)],
*[{i} for i in range(3)],
*[{i:i} for i in range(3)],
print,hash,
int,str,float,set,dict,
(i for i in range(10)),
enumerate('abc'),
range(3),
zip(),
set.add,
dict.copy])
|
ccha23/CS1302ICP
|
_build/jupyter_execute/Lecture3/Iteration.py
|
<filename>_build/jupyter_execute/Lecture3/Iteration.py
#!/usr/bin/env python
# coding: utf-8
# # Iteration
# **CS1302 Introduction to Computer Programming**
# ___
# In[1]:
get_ipython().run_line_magic('reload_ext', 'mytutor')
from ipywidgets import interact
# ## Motivation
# Many tasks are repetitive:
# - To print from 1 up to a user-specified number, which can be arbitrarily large.
# - To compute the maximum of a sequence of numbers, which can be arbitrarily long.
# - To repeatedly ask users for input until the input is within the right range.
# **How to write code to perform repetitive tasks?**
# E.g., can you complete the following code to print from 1 up to a user-specified number?
# In[2]:
get_ipython().run_cell_magic('mytutor', '-h 300', "num = int(input('>'))\nif 1 < num: print(1)\nif 2 < num: print(2)\nif 3 < num: print(3)\n# YOUR CODE HERE ")
# *code duplication* is not good because:
# - Duplicate code is hard to read/write/maintain.
# Imagine there is a small change needed to every duplicate code.
# - The number of repetitions may not be known before runtime.
# Instead, programmers write a *loop* which specifies a piece of code to be executed iteratively.
# ## For Loop
# ### Iterate over a sequence
# **How to print from 1 up to 4?**
# We can use a [`for` statement](https://docs.python.org/3.3/tutorial/controlflow.html#for-statements) as follows:
# In[3]:
get_ipython().run_cell_magic('mytutor', '-h 300', 'for i in 1, 2, 3, 4:\n print(i)')
# - `i` is automatically assigned to each element in the sequence `1, 2, 3, 4` one-by-one from left to right.
# - After each assignment, the body `print(i)` is executed.
#
# N.b., if `i` is defined before the for loop, its value will be overwritten.
# The assignment is not restricted to integers and can also be a tuple assignment.
# In[4]:
tuples = (0,'l'), (1,'o'), (2,'o'), (3,'p')
for i,c in tuples: print(i,c) # one-liner
# An even shorter code...
# In[5]:
for i,c in enumerate('loop'): print(i,c)
# ### Iterate over a range
# **How to print up to a user-specified number?**
# We can use [`range`](https://docs.python.org/3/library/stdtypes.html#range):
# In[6]:
stop = int(input('>')) + 1
for i in range(stop):
print(i)
# **Why add 1 to the user input number?**
# `range(stop)` generates a sequence of integers from `0` up to *but excluding* `stop`.
# **How to start from a number different from `0`?**
# In[7]:
for i in range(1,5): print(i)
# **What about a step size different from `1`?**
# In[8]:
for i in range(0,5,2): print(i) # starting number must also be specified. Why?
# **Exercise** How to count down from 4 to 0? Do it without addition or subtraction.
# In[9]:
### BEGIN SOLUTION
for i in range(4,-1,-1): print(i)
### END SOLUTION
# **Exercise** Print from `0` to a user-specified number but in steps of `0.5`.
# E.g., if the user inputs `2`, the program should print:
# ```
# 0.0
# 0.5
# 1.0
# 1.5
# 2.0
# ```
#
# *Note:* `range` only accepts integer arguments.
# In[10]:
num = int(input('>'))
### BEGIN SOLUTION
for i in range(0, 2 * num + 1, 1):
print(i / 2)
### END SOLUTION
# **Exercise** How to print the character `'*'` repeatedly for `m` rows and `n` columns?
# *Hint:* Use a *nested for loop*, i.e., write a for loop (called *inner loop*) inside the body of another for loop (*outer loop*).
# In[11]:
@interact(m=(0, 10), n=(0, 10))
def draw_rectangle(m=5, n=5):
### BEGIN SOLUTION
for i in range(m):
for j in range(n):
print('*', end='')
print()
### END SOLUTION
# ### Iterate over a string
# **What does the following do?**
# In[12]:
get_ipython().run_cell_magic('mytutor', '-h 300', "for character in 'loop': print(character)")
# A string is *iterable* because it can be regarded as a sequence of characters.
# - The function [`len`](https://docs.python.org/3/library/functions.html#len) can return the length of a string.
# - The indexing operator `[]` can return the character of a string at a specified location.
# In[13]:
message = "loop"
print('length:', len(message))
print('characters:', message[0], message[1], message[2], message[3])
# We can also iterate over a string as follows although it is less elegant:
# In[14]:
for i in range(len('loop')): print('loop'[i])
# **Exercise** Print a string assigned to `message` in reverse.
# E.g., `'loop'` should be printed as `'pool'`.
# In[15]:
@interact(message='loop')
def reverse_print(message):
### BEGIN SOLUTION
for i in range(len(message)):
print(message[-i - 1], end='')
### END SOLUTION
# ## While Loop
# **How to repeatedly ask the user to enter an input until the user input is not empty?**
# Python provides the [`while` statement](https://docs.python.org/3/reference/compound_stmts.html#while) to loop until a specified condition is false.
# In[16]:
while not input('Input something please:'): pass
# As long as the condition after `while` is true, the body gets executed repeatedly. In the above example,
# - if user press enter without inputting anything,
# - `input` returns an empty string `''`, which is [regarded as `False`](https://docs.python.org/3/reference/expressions.html#booleans), and so
# - the looping condition `not input('...')` is `True`.
# **Is it possible to use a for loop instead of a while loop?**
# - Not without hacks because the for loop is a *definite loop* which has a definite number of iterations before the execution of the loop.
# - `while` statement is useful for an *indefinite loop* where the number of iterations is unknown before the execution of the loop.
# It is possible, however, to replace a for loop by a while loop.
# E.g., the following code prints from `0` to `4` using a while loop instead of a for loop.
# In[17]:
i = 0
while i <= 4:
print(i)
i += 1
# - A while loop may not be as elegant (short), c.f., `for i in range(5): print(i)`, but
# - it can always be as efficient.
# **Should we just use while loop?**
# Consider using the following while loop to print from `0` to a user-specified value.
# In[18]:
num = int(input('>'))
i = 0
while i!=num+1:
print(i)
i += 1
# **Exercise** Is the above while loop doing the same thing as the for loop below?
# In[19]:
for i in range(int(input('>')) + 1): print(i)
# When user input negative integers smaller than or equal to -2,
# - the while loop becomes an infinite loop, but
# - the for loop terminates without printing any number.
# We have to be careful not to create unintended *infinite loops*.
# The computer can't always detect whether there is an infinite loop. ([Why not?](https://en.wikipedia.org/wiki/Halting_problem))
# ## Break/Continue/Else Constructs of a Loop
# ### Breaking out of a loop
# **Is the following an infinite loop?**
# In[20]:
get_ipython().run_cell_magic('mytutor', '-h 300', "while True:\n message = input('Input something please:')\n if message: break\nprint('You entered:', message)")
# The loop is terminated by the [`break` statement](https://docs.python.org/3/tutorial/controlflow.html#break-and-continue-statements-and-else-clauses-on-loops) when user input is non-empty.
# **Why is the `break` statement useful?**
# Recall the earlier `while` loop:
# In[21]:
get_ipython().run_cell_magic('mytutor', '-h 300', "while not input('Input something please:'): pass ")
# This while loop is not useful because it does not store the user input.
# **Is the `break` statement strictly necessary?**
# We can avoid `break` statement by using *flags*, which are boolean variables for flow control:
# In[22]:
get_ipython().run_cell_magic('mytutor', '-h 350', "has_no_input = True\nwhile has_no_input:\n message = input('Input something please:')\n if message: has_no_input = False\nprint('You entered:', message)")
# Using flags makes the program more readable, and we can use multiple flags for more complicated behavior.
# The variable names for flags are often `is_...`, `has_...`, etc.
# ### Continue to Next Iteration
# **What does the following program do?
# Is it an infinite loop?**
# In[23]:
get_ipython().run_cell_magic('mytutor', '-h 300', "while True:\n message = input('Input something please:')\n if not message: continue\n print('You entered:', message)")
# - The program repeatedly ask the user for input.
# - If the input is empty, the `continue` statement will skip to the next iteration.
# - The loop can only be terminated by interrupting the kernel.
# - Such an infinite loop can be useful. E.g., your computer clock continuously updates the current time.
# **Exercise** Is the `continue` statement strictly necessary? Can you rewrite the above program without the `continue` statement?
# In[24]:
get_ipython().run_cell_magic('mytutor', '-h 350', "while True:\n message = input('Input something please:')\n ### BEGIN SOLUTION\n if message:\n print('You entered:', message)\n ### END SOLUTION")
# ### Else construct for a loop
# The following program
# - checks whether the user input is a positive integer using `isdigit`, and if so,
# - check if the positive integer is a composite number, i.e., a product of two smaller positive integers.
# In[25]:
@interact(num='1')
def check_composite(num):
if num.isdigit():
num = int(num)
for divisor in range(2,num):
if num % divisor:
continue
else:
print('It is composite.')
break
else:
print('It is not composite.')
else:
print('Not a positive integer.')
# In[26]:
get_ipython().run_cell_magic('mytutor', '-h 500 ', "def check_composite(num):\n if num.isdigit():\n num = int(num)\n for divisor in range(2,num):\n if num % divisor:\n continue\n else:\n print('It is composite.')\n break\n else:\n print('It is not composite.')\n else:\n print('Not a positive integer.')\n \ncheck_composite('1')\ncheck_composite('2')\ncheck_composite('3')\ncheck_composite('4')")
# In addition to using `continue` and `break` in an elegant way,
# the code also uses an else clause that is executed only when the loop terminates *normally* not by `break`.
# **Exercise** There are three else claues in the earlier code. Which one is for the loop?
# - The second else clause that `print('It is not composite.')`.
# - The clause is called when there is no divisor found in the range from `2` to `num`.
# **Exercise** Convert the for loop to a while loop.
# Can you improve the code to use fewer iterations?
# In[27]:
@interact(num='1')
def check_composite(num):
if num.isdigit():
num = int(num)
# for divisor in range(2,num): # use while instead
divisor = 2
while divisor <= num**0.5:
if num % divisor:
divisor += 1
else:
print('It is composite.')
break
else:
print('It is not composite.')
else:
print('Not a positive integer.')
|
ccha23/CS1302ICP
|
_build/jupyter_execute/Lab1/Card.py
|
<reponame>ccha23/CS1302ICP<gh_stars>1-10
#!/usr/bin/env python
# coding: utf-8
# # Card guessing game
# **CS1302 Introduction to Computer Programming**
# ___
# ## Rules of the game
# Consider a deck of 52 cards:
# <table>
# <tr>
# <td></td>
# <th>1 (A)</th>
# <th>2</th>
# <th>3</th>
# <th>4</th>
# <th>5</th>
# <th>6</th>
# <th>7</th>
# <th>8</th>
# <th>9</th>
# <th>10</th>
# <th>11 (J)</th>
# <th>12 (Q)</th>
# <th>13 (K)</th>
# </tr>
# <tr>
# <th style="transform: rotate(-90deg);">Diamond</th>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-A-Diamond.svg"><img width="50" alt="Cards-A-Diamond" src="https://upload.wikimedia.org/wikipedia/commons/e/e9/Cards-A-Diamond.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-2-Diamond.svg"><img width="50" alt="Cards-2-Diamond" src="https://upload.wikimedia.org/wikipedia/commons/9/99/Cards-2-Diamond.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-3-Diamond.svg"><img width="50" alt="Cards-3-Diamond" src="https://upload.wikimedia.org/wikipedia/commons/4/44/Cards-3-Diamond.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-4-Diamond.svg"><img width="50" alt="Cards-4-Diamond" src="https://upload.wikimedia.org/wikipedia/commons/a/af/Cards-4-Diamond.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-5-Diamond.svg"><img width="50" alt="Cards-5-Diamond" src="https://upload.wikimedia.org/wikipedia/commons/d/dd/Cards-5-Diamond.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-6-Diamond.svg"><img width="50" alt="Cards-6-Diamond" src="https://upload.wikimedia.org/wikipedia/commons/4/44/Cards-6-Diamond.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-7-Diamond.svg"><img width="50" alt="Cards-7-Diamond" src="https://upload.wikimedia.org/wikipedia/commons/2/2b/Cards-7-Diamond.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-8-Diamond.svg"><img width="50" alt="Cards-8-Diamond" src="https://upload.wikimedia.org/wikipedia/commons/9/90/Cards-8-Diamond.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-9-Diamond.svg"><img width="50" alt="Cards-9-Diamond" src="https://upload.wikimedia.org/wikipedia/commons/2/25/Cards-9-Diamond.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-10-Diamond.svg"><img width="50" alt="Cards-10-Diamond" src="https://upload.wikimedia.org/wikipedia/commons/c/c2/Cards-10-Diamond.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-J-Diamond.svg"><img width="50" alt="Cards-J-Diamond" src="https://upload.wikimedia.org/wikipedia/commons/7/78/Cards-J-Diamond.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-Q-Diamond.svg"><img width="50" alt="Cards-Q-Diamond" src="https://upload.wikimedia.org/wikipedia/commons/c/c3/Cards-Q-Diamond.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-K-Diamond.svg"><img width="50" alt="Cards-K-Diamond" src="https://upload.wikimedia.org/wikipedia/commons/5/55/Cards-K-Diamond.svg"></a></td>
# </tr>
# <tr>
# <th style="transform: rotate(-90deg);">Club</th>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-A-Club.svg"><img width="50" alt="Cards-A-Club" src="https://upload.wikimedia.org/wikipedia/commons/c/c4/Cards-A-Club.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-2-Club.svg"><img width="50" alt="Cards-2-Club" src="https://upload.wikimedia.org/wikipedia/commons/b/b0/Cards-2-Club.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-3-Club.svg"><img width="50" alt="Cards-3-Club" src="https://upload.wikimedia.org/wikipedia/commons/e/e0/Cards-3-Club.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-4-Club.svg"><img width="50" alt="Cards-4-Club" src="https://upload.wikimedia.org/wikipedia/commons/6/69/Cards-4-Club.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-5-Club.svg"><img width="50" alt="Cards-5-Club" src="https://upload.wikimedia.org/wikipedia/commons/7/7e/Cards-5-Club.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-6-Club.svg"><img width="50" alt="Cards-6-Club" src="https://upload.wikimedia.org/wikipedia/commons/a/af/Cards-6-Club.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-7-Club.svg"><img width="50" alt="Cards-7-Club" src="https://upload.wikimedia.org/wikipedia/commons/8/8e/Cards-7-Club.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-8-Club.svg"><img width="50" alt="Cards-8-Club" src="https://upload.wikimedia.org/wikipedia/commons/f/fd/Cards-8-Club.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-9-Club.svg"><img width="50" alt="Cards-9-Club" src="https://upload.wikimedia.org/wikipedia/commons/a/ac/Cards-9-Club.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-10-Club.svg"><img width="50" alt="Cards-10-Club" src="https://upload.wikimedia.org/wikipedia/commons/2/25/Cards-10-Club.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-J-Club.svg"><img width="50" alt="Cards-J-Club" src="https://upload.wikimedia.org/wikipedia/commons/c/c7/Cards-J-Club.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-Q-Club.svg"><img width="50" alt="Cards-Q-Club" src="https://upload.wikimedia.org/wikipedia/commons/3/37/Cards-Q-Club.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-K-Club.svg"><img width="50" alt="Cards-K-Club" src="https://upload.wikimedia.org/wikipedia/commons/9/9e/Cards-K-Club.svg"></a></td>
# </tr>
# <tr>
# <th style="transform: rotate(-90deg);">Heart</th>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-A-Heart.svg"><img width="50" alt="Cards-A-Heart" src="https://upload.wikimedia.org/wikipedia/commons/6/60/Cards-A-Heart.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-2-Heart.svg"><img width="50" alt="Cards-2-Heart" src="https://upload.wikimedia.org/wikipedia/commons/6/6e/Cards-2-Heart.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-3-Heart.svg"><img width="50" alt="Cards-3-Heart" src="https://upload.wikimedia.org/wikipedia/commons/5/57/Cards-3-Heart.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-4-Heart.svg"><img width="50" alt="Cards-4-Heart" src="https://upload.wikimedia.org/wikipedia/commons/3/39/Cards-4-Heart.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-5-Heart.svg"><img width="50" alt="Cards-5-Heart" src="https://upload.wikimedia.org/wikipedia/commons/9/91/Cards-5-Heart.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-6-Heart.svg"><img width="50" alt="Cards-6-Heart" src="https://upload.wikimedia.org/wikipedia/commons/5/55/Cards-6-Heart.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-7-Heart.svg"><img width="50" alt="Cards-7-Heart" src="https://upload.wikimedia.org/wikipedia/commons/d/d4/Cards-7-Heart.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-8-Heart.svg"><img width="50" alt="Cards-8-Heart" src="https://upload.wikimedia.org/wikipedia/commons/5/55/Cards-8-Heart.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-9-Heart.svg"><img width="50" alt="Cards-9-Heart" src="https://upload.wikimedia.org/wikipedia/commons/d/d2/Cards-9-Heart.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-10-Heart.svg"><img width="50" alt="Cards-10-Heart" src="https://upload.wikimedia.org/wikipedia/commons/7/76/Cards-10-Heart.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-J-Heart.svg"><img width="50" alt="Cards-J-Heart" src="https://upload.wikimedia.org/wikipedia/commons/e/e7/Cards-J-Heart.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-Q-Heart.svg"><img width="50" alt="Cards-Q-Heart" src="https://upload.wikimedia.org/wikipedia/commons/2/28/Cards-Q-Heart.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-K-Heart.svg"><img width="50" alt="Cards-K-Heart" src="https://upload.wikimedia.org/wikipedia/commons/b/bf/Cards-K-Heart.svg"></a></td>
# </tr>
# <tr>
# <th style="transform: rotate(-90deg);">Spade</th>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-A-Spade.svg"><img width="50" alt="Cards-A-Spade" src="https://upload.wikimedia.org/wikipedia/commons/9/9d/Cards-A-Spade.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-2-Spade.svg"><img width="50" alt="Cards-2-Spade" src="https://upload.wikimedia.org/wikipedia/commons/e/e7/Cards-2-Spade.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-3-Spade.svg"><img width="50" alt="Cards-3-Spade" src="https://upload.wikimedia.org/wikipedia/commons/d/d0/Cards-3-Spade.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-4-Spade.svg"><img width="50" alt="Cards-4-Spade" src="https://upload.wikimedia.org/wikipedia/commons/4/4e/Cards-4-Spade.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-5-Spade.svg"><img width="50" alt="Cards-5-Spade" src="https://upload.wikimedia.org/wikipedia/commons/b/b1/Cards-5-Spade.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-6-Spade.svg"><img width="50" alt="Cards-6-Spade" src="https://upload.wikimedia.org/wikipedia/commons/6/68/Cards-6-Spade.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-7-Spade.svg"><img width="50" alt="Cards-7-Spade" src="https://upload.wikimedia.org/wikipedia/commons/c/c6/Cards-7-Spade.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-8-Spade.svg"><img width="50" alt="Cards-8-Spade" src="https://upload.wikimedia.org/wikipedia/commons/7/7e/Cards-8-Spade.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-9-Spade.svg"><img width="50" alt="Cards-9-Spade" src="https://upload.wikimedia.org/wikipedia/commons/0/0a/Cards-9-Spade.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-10-Spade.svg"><img width="50" alt="Cards-10-Spade" src="https://upload.wikimedia.org/wikipedia/commons/6/67/Cards-10-Spade.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-J-Spade.svg"><img width="50" alt="Cards-J-Spade" src="https://upload.wikimedia.org/wikipedia/commons/e/ea/Cards-J-Spade.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-Q-Spade.svg"><img width="50" alt="Cards-Q-Spade" src="https://upload.wikimedia.org/wikipedia/commons/e/ef/Cards-Q-Spade.svg"></a></td>
# <td><a title="GW Simulations / Public domain" href="https://commons.wikimedia.org/wiki/File:Cards-K-Spade.svg"><img width="50" alt="Cards-K-Spade" src="https://upload.wikimedia.org/wikipedia/commons/1/18/Cards-K-Spade.svg"></a></td>
# </tr>
# </table>
# - Each card is in one of the four suits: **Diamond**, **Club**, **Heart**, and **Spade**.
# - Each card has a value $1 \text{ (A)} < 2 < 3 < 4 < 5 < 6 < 7 < 8 < 9 < 10 < 11 \text{ (J)} < 12 \text{ (Q)} < 13 \text{ (K)}$.
# The following code creates a deck of cards. (You do not need to understand the code for now.)
# In[ ]:
# Create a deck of cards
from collections import namedtuple
suits = ("Diamond", "Club", "Heart", "Spade")
values = range(1, 14)
Card = namedtuple('Card', ['value', 'suit'])
deck = [Card(value, suit) for value in values for suit in suits]
print(deck)
# To play the game, a dealer randomly pick a card without letting you know, and you're going to guess what exactly that card is.
# In[ ]:
# Randomly draw a card from the deck with replacement
import random
print(random.choice(deck))
# You are allowed to make an informed guess after the dealer answers some of your **yes/no** questions.
# For instance, you may ask:
# - Is the suit club?
# - Is the card diamond 1 (ace)?
# - Is the value at least 10?
# However, you cannot ask:
# - What is the value?
# - What is the suite?
# **Exercise** You win if you can **guess the card correctly with no more than 6 questions**. What is the winning strategy?
# YOUR ANSWER HERE
# Hint 1: <span style="color:white">Obviously, you should not ask whether the card is precisely certain card, e.g., Is it Diamond Ace? Is it Diamond 2? ... Why not? The card may be one of the remaining $52-6=46$ possibilities you did not ask.</span>
# Hint 2: <span style="color:white">Think of each **Yes/No** question as splitting the set of possible cards into two smaller groups of possible cards corresponding to each possible answer **Yes/No**.</span>
# Hint 3: <span style="color:white">How many questions is required to split the set of 52 cards into groups of size $1$, i.e., with only one possible card?</span>
# ## Challenge the computer
# Play the role of the dealer and test if the program below can guess the card correctly after 6 questions.
# In[ ]:
suitIdx = 0
number = 0
if "y" == input(
"Is the suite either heart or spade? (y/[n]) ").strip().lower():
suitIdx += 2
if "y" == input("Is the suite either club or spade? (y/[n]) ").strip().lower():
suitIdx += 1
if "y" == input(
f"Is the number {number+8} or above? (y/[n]) ").strip().lower():
number += 8
if "y" == input(
f"Is the number {number+4} or above? (y/[n]) ").strip().lower():
number += 4
if "y" == input(
f"Is the number {number+2} or above? (y/[n]) ").strip().lower():
number += 2
if "y" == input(
f"Is the number {number+1} or above? (y/[n]) ").strip().lower():
number += 1
print(f"The card is {suits[suitIdx]} {number}")
# **Exercise** Does the above program always win? Explain your answer?
# YOUR ANSWER HERE
# ## Challenge your understanding
# The following table gives the binary representions of unsigned decimal integers from 0 to 7.
# <table>
# <tr><th>Binary</th><th>Decimal</th></tr>
# <tr><td>000</td><td>0</td></tr>
# <tr><td>001</td><td>1</td></tr>
# <tr><td>010</td><td>2</td></tr>
# <tr><td>011</td><td>3</td></tr>
# <tr><td><b style="color:magenta">1</b>00</td><td style="color:magenta">4</td></tr>
# <tr><td><b style="color:magenta">1</b>01</td><td style="color:magenta">5</td></tr>
# <tr><td><b style="color:magenta">1</b>10</td><td style="color:magenta">6</td></tr>
# <tr><td><b style="color:magenta">1</b>11</td><td style="color:magenta">7</td></tr>
# </table><br>
# To convert binary to decimal, think of the conversion as a guessing game where
# - the binary sequence is a sequence of **yes (1)** or **no (0)** answers to certain **yes/no** questions, and
# - the informed guess is the integer represented by the binary sequence.
# For instance, observe that the binary representation of 4, 5, 6, and 7 actually have <b style="color:magenta">1</b> in the leftmost (most significant) bit. Therefore we can consider that bit as the answer to the following **yes/no** question:
#
# > Is the integer 4 or above?
# **Exercise** What are the **yes/no** questions corresponding to the 2nd bit and 3rd bit?
# YOUR ANSWER HERE
# <h2>References</h2>
# <ul>
# <li><a href=https://www.mathsisfun.com/binary-number-system.html>Binary Number Sytem</a></li>
# <li><a href=https://www.purplemath.com/modules/numbbase.htm>Binary Number Conversions</a></li>
# </ul>
|
ccha23/CS1302ICP
|
_build/jupyter_execute/Lecture1/Introduction to Computer Programming.py
|
#!/usr/bin/env python
# coding: utf-8
# # Introduction to Computer Programming
# **CS1302 Introduction to Computer Programming**
# ___
# ## Computer
# ### What is a computer?
# Is computer a calculator that is bigger and more advanced?
# <center><figure>
# <a title="Ccha23 / CC BY-SA (https://creativecommons.org/licenses/by-sa/4.0)" href="https://commons.wikimedia.org/wiki/File:Calculator_app.png"><img width="400" alt="Calculator app" src="https://upload.wikimedia.org/wikipedia/commons/thumb/4/48/Calculator_app.png/512px-Calculator_app.png"></a>
# <figcaption>A calculator on a computer.</figcaption>
# </figure>
# </center>
# If computer is a calculator, then, is [abacus](https://en.wikipedia.org/wiki/Abacus) the first computer invented?
# <center><figure>
# <a title="Encyclopædia Britannica / Public domain" href="https://commons.wikimedia.org/wiki/File:Abacus_6.png"><img width="400" alt="Abacus 6" src="https://upload.wikimedia.org/wikipedia/commons/a/af/Abacus_6.png"></a>
# <figcaption>Abacus - an ancient mechanical computing device.</figcaption>
# </figure>
# </center>
# Is your [smartphone](https://en.wikipedia.org/wiki/Samsung_DeX) a computer?
# What defines a computer?
# - In addition to performing arithmetic calculations, a computer is designed in such a way that
# - we can write different programs (in a process called **programming** or **software development**)
# - for the computer to execute to perform different tasks.
# ### What is the architecture of a computer?
# A computer contains three main hardware components:
# - Input device
# - Processing unit
# - Output device
# #### Peripherals
# <center><figure>
# <a title="Unsplash" href="https://unsplash.com/photos/gyRa86ExKTw"><img width="600" alt="Computer peripherals" src="https://images.unsplash.com/flagged/photo-1551954810-43cd6aef5b1f?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=3580&q=80"></a>
# <figcaption>Computer Peripherals.</figcaption>
# </figure>
# </center>
# Input and output devices connected to a computer are called *peripherals*.
# They allow users to interact with the computer in different ways.
# **Exercise** Some examples of output devices are:
# - Monitor
# - Speaker
#
# Can you give an awesome example in the following box?
# - 3D printer available at [CityU](https://www.cityu.edu.hk/lib/create/3dprint.htm)
# **Exercise** Some examples of input devices are:
# - Keyboard
# - Mouse
#
# Can you give an awesome example?
# - 3D scanner available at [CityU](https://www.cityu.edu.hk/lib/create/3dscan.htm)
# **Exercise** Many devices are both input and output device. Can you give at least 3 examples?
# - hard disk
# - CD/DVD Rom (writable)
# - touch screen
# #### Central Processing Unit
# <center><figure>
# <a title="Unsplash" href="https://unsplash.com/photos/CKpBhTXvLis"><img width="600" alt="CPU" src="https://images.unsplash.com/photo-1555617981-dac3880eac6e?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=1950&q=80"></a>
# <figcaption>An Intel CPU.</figcaption>
# </figure>
# </center>
# The brain of a computer is its processor unit, or the [**C**entral **P**rocesisng **U**nit (CPU)](https://en.wikipedia.org/wiki/Central_processing_unit).
# It is located on the [*motherboard*](https://en.wikipedia.org/wiki/Motherboard) and connects to different peripherals using different [*connectors*](https://en.wikipedia.org/wiki/Category:Computer_connectors).
# Two important components in the CPU are:
# - **A**rithmetic and **L**ogic **U**nit (**ALU**): Performs arithmetics like a calculator (but for binary numbers)
# - **C**ontrol **U**nit (**CU**): Directs the operations of the processor in executing a program.
# Let's run a CPU Simulator below from a [GitHub project](https://github.com/pddring/cpu-simulator).
# - Note that all values are zeros in the RAM (**R**andom **A**cess **M**emory) initially.
# - Under Settings, click `Examples->Add two numbers`. Observe that the values in the RAM have changed.
# - Click `Run` at the bottom right-hand corner.
# In[1]:
get_ipython().run_cell_magic('html', '', '<iframe src="https://tools.withcode.uk/cpu" width="800" height="800">\n</iframe>')
# ## Programming
# ### What is programming?
# Programming is the process of writing programs.
# But what is a program?
# **Exercise** You have just seen a program written in [machine language](https://en.wikipedia.org/wiki/Machine_code). Where is it?
# The first six lines of binary sequences in the RAM. The last line `Ends` the program.
# - The CPU is capable of carrying out
# - a set of instructions such as *addition*, *negation*, *Copy*, etc.
# - some numbers stored in the RAM.
# - Both the instructions and the numbers are represented as binary sequences.
# - E.g., in Intel-based CPU, the command for addition is like **00000011 00000100**
# ### Why computer uses binary representation?
# In[2]:
get_ipython().run_cell_magic('html', '', '<iframe width="912" height="513" src="https://www.youtube.com/embed/Xpk67YzOn5w" allowfullscreen></iframe>')
# **Exercise** The first electronic computer, called [Electronic Numerical Integrator and Computer (ENIAC)](https://en.wikipedia.org/wiki/ENIAC), was programmed using binary circuitries, namely *switches* that can be either `On` or `Off`.
#
# <center>
# <figure>
# <a title="United States Army / Public domain" href="https://commons.wikimedia.org/wiki/File:Two_women_operating_ENIAC.gif"><img width="512" alt="Two women operating ENIAC" src="https://upload.wikimedia.org/wikipedia/commons/8/8c/Two_women_operating_ENIAC_%28full_resolution%29.jpg"></a>
# <figcaption>Programmers controlling the switches of ENIAC.</figcaption>
# </figure>
# </center>
#
# However, it did not represent values efficiently in binary. 10 binary digits (bits) was used to represent a decimal number 0 to 9.
# Indeed, how many decimals can be represented by 10 bits?
# In[3]:
2 ** 10 # because there are that many binary sequences of length 10.
# As mentioned in the video, there are *International Standards* for representing characters:
# - [ASCII](https://en.wikipedia.org/wiki/ASCII) (American Standard Code for Information Interchange) maps English letters and some other symbols to 8-bits (8 binary digits, also called a byte). E.g., `A` is `01000001`.
# - [Unicode](https://en.wikipedia.org/wiki/Unicode) can also represent characters in different languages such as Chinese, Japanese...etc.
# There are additional standards to represent numbers other than non-negative integers:
# - [2's complement format](https://en.wikipedia.org/wiki/Two%27s_complement) for negative integers (e.g. -123)
# - [IEEE floating point format](https://en.wikipedia.org/wiki/IEEE_754) for floating point numbers such as $1.23 x 10^{-4}$.
# **Why define different standards?**
# - Different standards have different benefits. ASCII requires less storage for a character, but it represents less characters.
# - Although digits are also represented in ASCII, the 2's complement format is designed for arithmetic operations.
# ## Different generations of programming languages
# Machine language is known as the **1st Generation Programming Language**.
# **Are we going to start with machine language?**
# Start with learning 2's complement and the binary codes for different instructions?
# No. Programmers do not write machine codes directly because it is too hard to think in binary representations.
# Instead, programmers write human-readable **mnemonics** such as **ADD**, **SUB**...,
# called **Assembly language**, or the **2nd Generation Programming Language**.
# <center>
# <figure>
# <a title="Swtpc6800 en:User:Swtpc6800 <NAME> / Public domain" href="https://commons.wikimedia.org/wiki/File:Motorola_6800_Assembly_Language.png"><img width="600" alt="Motorola 6800 Assembly Language" src="https://upload.wikimedia.org/wikipedia/commons/f/f3/Motorola_6800_Assembly_Language.png"></a>
# <figcaption>
# A Code written in an assembly language.
# </figcaption>
# </figure>
# </center>
# **Are you going to learn an assembly language?**
# Both machine language and assembly language are low-level language which
# - are difficult to write for complicated tasks (requiring many lines of code), and
# - are platform specific:
# - the sets of instructions and their binary codes can be different for different [types of CPUs](https://en.wikipedia.org/wiki/Comparison_of_CPU_microarchitectures), and
# - different operating systems use [different assembly languages/styles](https://en.wikipedia.org/wiki/X86_assembly_language).
# Anyone want to learn assembly languages, and write a program in many versions to support different platforms?
# Probably for programmers who need to write fast or energy-efficient code such as
# - a driver that controls a 3D graphics card, and
# - a program that control a microprocessor with limited power supply.
# But even in the above cases, there are often better alternatives. Play with the following microprocessor simulator:
# - Click `CHOOSE A DEMO->LED`.
# - Click `RUN SCRIPT` and observes the LED of the board.
# - Run the demos `ASSEMBLY` and `MATH` respectively and compare their capabilities.
#
# In[4]:
get_ipython().run_cell_magic('html', '', '<iframe width="900", height="1000" src="https://micropython.org/unicorn/"></iframe>')
# ## High-level Language
# In[5]:
get_ipython().run_cell_magic('html', '', '<iframe width="912" height="513" src="https://www.youtube.com/embed/QdVFvsCWXrA" allowfullscreen></iframe>')
# Programmer nowadays use human-readable language known as the **3rd generation language (3GL)** or **high-level language.**
# - Examples includes: C, C++, Java, JavaScript, Basic, Python, PHP, ...
# ### What is a high-level language?
# - A code written in high-level language gets converted automatically to a low-level machine code for the desired platform.
# - Hence, it *abstracts* away details that can be handled by the computer (low-level code) itself.
# For instance, a programmer needs not care where a value should be physically stored if the computer can find a free location automatically to store the value.
# Different high-level languages can have different implementations of the conversion processes:
# - **Compilation** means converting a program well before executing of the program. E.g., C++ and Java programs are compiled.
# - **Interpretation** means converting a program on-the-fly during the execution of a program. E.g., JavaScript and Python programs are often interpreted.
#
# Roughly speaking, compiled programs run faster but interpreted programs are more flexible and can be modified at run time.
# (The [truth](https://finematics.com/compiled-vs-interpreted-programming-languages/) is indeed more complicated than required for this course.)
# ### What programming language will you learn?
# You will learn to program using **Python**. The course covers:
# - Basic topics including *values*, *variables*, *conditional*, *iterations*, *functions*, *composite data types*.
# - Advanced topics that touch on functional and object-oriented programming.
# - Engineering topics such as *numerical methods*, *optimizations*, and *machine learning*.
#
# See the [course homepage](https://canvas.cityu.edu.hk/courses/36768) for details.
# **Why Python?**
# In[6]:
get_ipython().run_cell_magic('html', '', '<iframe width="912" height="513" src="https://www.youtube.com/embed/Y8Tko2YC5hA?end=200" allowfullscreen></iframe>')
# In summary:
# - Python is expressive and can get things done with fewer lines of code as compared to other languages.
# - Python is one of the most commonly used languages. It has an extensive set of libraries for Mathematics, graphics, AI, Machine Learning, etc.
# - Python is Free and Open Source, so you get to see everything and use it without restrictions.
# - Python is portable. The same code runs in different platforms without modifications.
# **How does a Python program look like?**
# In[7]:
# for step-by-step execution using mytutor
get_ipython().run_line_magic('reload_ext', 'mytutor')
# In[8]:
get_ipython().run_cell_magic('mytutor', '-h 400', '# The program here reads the cohort and reports which year the user is in\n# Assumption: Input is integer no greater than 2020\nimport datetime # load a library to tell the current year\ncohort = input("In which year did you join CityU? ")\nyear = datetime.datetime.now().year - int(cohort) + 1\nprint("So you are a year", year, "student.")')
# A Python program contains *statements* just like sentences in natural languages.
# E.g., `cohort = input("In which year did you join CityU? ")` is a statement that gives some value a name called `cohort`.
# For the purpose of computations, a statement often contains *expressions* that evaluate to certain values.
# E.g., `input("In which year did you join CityU? ")` is an expression with the value equal to what you input to the prompt.
# That value is then given the name `cohort`.
# Expressions can be composed of:
# - *Functions* such as `input`, `now`, and `int`, etc., which are like math functions the return some values based on its arguments, if any.
# - *Literals* such as the string `"In which year did you join CityU? "` and the integer `1`. They are values you type out literally.
# - *Variables* such as `cohort` and `year`, which are meaningful names to values.
# To help others understand the code, there are also *comments* that start with `#`.
# These are descriptions meant for human to read but not to be executed by the computer.
# **Exercise** What do you think the next generation programmimng should be?
# Perhaps programming using natural languages. Write programs that people enjoy reading, like [literate programming](https://www.youtube.com/watch?v=bTkXg2LZIMQ).
# Indeed, Jupyter notebook is arguably a step towards this direction. See [nbdev](https://github.com/fastai/nbdev).
# In[9]:
get_ipython().run_cell_magic('html', '', '<iframe width="912" height="513" src="https://www.youtube.com/embed/bTkXg2LZIMQ" allowfullscreen></iframe>')
|
ccha23/CS1302ICP
|
_build/jupyter_execute/Lecture6/More on Functions.py
|
<reponame>ccha23/CS1302ICP<gh_stars>1-10
#!/usr/bin/env python
# coding: utf-8
# # More on Functions
# **CS1302 Introduction to Computer Programming**
# ___
# In[1]:
get_ipython().run_line_magic('reload_ext', 'mytutor')
# ## Recursion
# Consider computing the [Fibonacci number](https://en.wikipedia.org/wiki/Fibonacci_number) of order $n$:
#
# $$
# F_n :=
# \begin{cases}
# F_{n-1}+F_{n-2} & n>1 \kern1em \text{(recurrence)}\\
# 1 & n=1 \kern1em \text{(base case)}\\
# 0 & n=0 \kern1em \text{(base case)}.
# \end{cases}$$
# Fibonacci numbers have practical applications in generating [pseudorandom numbers](https://en.wikipedia.org/wiki/Lagged_Fibonacci_generator).
# **Can we define the function by calling the function itself?**
# In[2]:
get_ipython().run_cell_magic('mytutor', '-r -h 450', 'def fibonacci(n):\n if n > 1:\n return fibonacci(n - 1) + fibonacci(n - 2) # recursion\n elif n == 1:\n return 1\n else:\n return 0\n\nfibonacci(2)')
# [*Recursion*](https://en.wikipedia.org/wiki/Recursion_(computer_science)) is a function that calls itself (*recurs*).
# **Exercise** Write a function `gcd` that implements the [Euclidean algorithm for the greatest common divisor](https://en.wikipedia.org/wiki/Euclidean_algorithm):
#
# $$\operatorname{gcd}(a,b)=\begin{cases}a & b=0\\ \operatorname{gcd}(b, a\operatorname{mod}b) & \text{otherwise} \end{cases}$$
# In[3]:
get_ipython().run_cell_magic('mytutor', '-r -h 550', 'def gcd(a, b):\n ### BEGIN SOLUTION\n return gcd(b, a % b) if b else a\n ### END SOLUTION\n\n\ngcd(3 * 5, 5 * 7)')
# **Is recursion strictly necessary?**
# No. We can always convert a recursion to an iteration.
# E.g., the following computes the Fibonnacci number of order using a while loop instead.
# In[4]:
get_ipython().run_cell_magic('mytutor', '-r -h 550', 'def fibonacci_iteration(n):\n if n > 1:\n _, F = 0, 1 # next two Fibonacci numbers\n while n > 1:\n _, F, n = F, F + _, n - 1\n return F\n elif n == 1:\n return 1\n else:\n return 0\n \nfibonacci_iteration(3)')
# In[5]:
# more tests
for n in range(5):
assert fibonacci(n) == fibonacci_iteration(n)
# **Exercise** Implement `gcd_iteration` using a while loop instead of a recursion.
# In[6]:
get_ipython().run_cell_magic('mytutor', '-r -h 550', 'def gcd_iteration(a, b):\n ### BEGIN SOLUTION\n while b:\n a, b = b, a % b\n return a\n ### END SOLUTION\n\n\ngcd_iteration(3 * 5, 5 * 7)')
# In[7]:
# test
for n in range(5):
assert fibonacci(n) == fibonacci_iteration(n)
# **What is the benefit of recursion?**
# - Recursion is often shorter and easier to understand.
# - It is also easier to write code by *wishful thinking* or *[declarative programming](https://en.wikipedia.org/wiki/Declarative_programming)*.
# **Is recusion more efficient than iteration?**
# **Exercise** Find the smallest values of `n` for`fibonacci(n)` and `fibonacci_iteration(n)` respectively to run for more than a second.
# In[8]:
# Assign n
### BEGIN SOLUTION
n = 33
### END SOLUTION
fib_recursion = fibonacci(n)
# In[9]:
# Assign n
### BEGIN SOLUTION
n = 300000
### END SOLUTION
fib_iteration = fibonacci_iteration(n)
# To see why recursion is slow, we will modify `fibonacci` to print each function call as follows.
# In[10]:
def fibonacci(n):
'''Returns the Fibonacci number of order n.'''
print('fibonacci({!r})'.format(n))
return fibonacci(n - 1) + fibonacci(n - 2) if n > 1 else 1 if n == 1 else 0
fibonacci(5)
# `fibonacci(5)` calls `fibonacci(3)` and `fibonacci(4)`, which in turn call `fibonacci(2)` and `fibonacci(3)`. `fibonacci(3)` is called twice.
# ## Global Variables
# Consider the problem of generating a sequence of Fibonacci numbers.
# In[11]:
for n in range(5):
print(fibonacci_iteration(n))
# **Is the above loop efficient?**
# No. Each call to `fibonacci_iteration(n)` recomputes the last two Fibonacci numbers $F_{n-1}$ and $F_{n-2}$ for $n\geq 2$.
# **How to avoid redundant computations?**
# One way is to store the last two computed Fibonacci numbers.
# In[12]:
get_ipython().run_cell_magic('mytutor', '-h 600', "def next_fibonacci():\n '''Returns the next Fibonacci number.'''\n global _Fn, _Fn1, _n # global declaration\n value = _Fn\n _Fn, _Fn1, _n = _Fn1, _Fn + _Fn1, _n + 1\n return value\n\ndef print_fibonacci_state():\n print('''States:\n _Fn : Next Fibonacci number = {}\n _Fn1 : Next next Fibonacci number = {}\n _n : Next order = {}'''.format(_Fn,_Fn1,_n))\n\n# global variables for next_fibonacci and print_fibonacci_state\n_Fn, _Fn1, _n = 0, 1, 0\n\nfor n in range(5):\n print(next_fibonacci())\nprint_fibonacci_state()")
# Rules for [*global/local variables*](https://docs.python.org/3/faq/programming.html#what-are-the-rules-for-local-and-global-variables-in-python):
# 1. A local variable must be defined within a function.
# 1. An assignment defines a local variable except in a [`global` statement](https://docs.python.org/3/reference/simple_stmts.html#the-global-statement).
# **Why `global` is NOT needed in `print_fibonacci_state`?**
# Without ambiguity, `_Fn, _Fn1, _n` in `print_fibonacci_state` are not local variables by Rule 1 because they are not defined within the function.
# **Why `global` is needed in `next_fibonacci`?**
# What happens otherwise:
# In[13]:
def next_fibonacci():
'''Returns the next Fibonacci number.'''
# global _Fn, _Fn1, _n
value = _Fn
_Fn, _Fn1, _n = _Fn1, _Fn + _Fn1, _n + 1
return value
next_fibonacci()
# Why is there an `UnboundLocalError`?
# - The assignment defines `_Fn` as a local variable by Rule 2.
# - However, the assignment requires first evaluating `_Fn`, which is not yet defined.
# **Are global variables preferred over local ones?**
# Suppose for aesthetic reasons we remove the underscores in global variable names?
# In[14]:
get_ipython().run_cell_magic('mytutor', '-h 600', "def next_fibonacci():\n '''Returns the next Fibonacci number.'''\n global Fn, Fn1, n\n value = Fn\n Fn, Fn1, n = Fn1, Fn + Fn1, n + 1\n return value\n\ndef print_fibonacci_state():\n print('''States:\n Fn : Next Fibonacci number = {}\n Fn1 : Next next Fibonacci number = {}\n n : Next order = {}'''.format(Fn,Fn1,n))\n\n# global variables renamed without underscores\nFn, Fn1, n = 0, 1, 0\n\nn = 0\nwhile n < 5:\n print(next_fibonacci())\n n += 1\nprint_fibonacci_state()")
# **Exercise** Why does the while loop prints only 3 instead of 5 Fibonacci numbers?
# There is a name collision. `n` is also incremented by `next_fibonacci()`, and so the while loop is only executed 3 times in total.
# With global variables
# - codes are less predictable, more difficult to reuse/extend, and
# - tests cannot be isolated, making debugging difficult.
# **Is it possible to store the function states without using global variables?**
# Yes. We can use nested functions and [`nonlocal` variables](https://docs.python.org/3/reference/simple_stmts.html#grammar-token-nonlocal-stmt).
# In[15]:
def fibonacci_closure(Fn, Fn1):
def next_fibonacci():
'''Returns the next (generalized) Fibonacci number starting with
Fn and Fn1 as the first two numbers.'''
nonlocal Fn, Fn1, n # declare nonlocal variables
value = Fn
Fn, Fn1, n = Fn1, Fn + Fn1, n + 1
return value
def print_fibonacci_state():
print('''States:
Next Fibonacci number = {}
Next next Fibonacci number = {}
Next order = {}'''.format(Fn, Fn1, n))
n = 0 # Fn and Fn1 specified in the function arguments
return next_fibonacci, print_fibonacci_state
next_fibonacci, print_fibonacci_state = fibonacci_closure(0, 1)
n = 0
while n < 5:
print(next_fibonacci())
n += 1
print_fibonacci_state()
# The state variables `Fn, Fn1, n` are now *encapsulated*, and so
# the functions returned by `fibonacci_closure` no longer depends on any global variables.
# Another benefit of using nested functions is that we can also create different Fibonacci sequence with different base cases.
# In[16]:
my_next_fibonacci, my_print_fibonacci_state = fibonacci_closure('cs', '1302')
for n in range(5):
print(my_next_fibonacci())
my_print_fibonacci_state()
# `next_fibonacci` and `print_fibonacci_state` are *local functions* of `fibonacci_closure`.
# - They can access (*capture*) the other local variables of `fibonacci_closure` by forming the so-called *closures*.
# - Similar to the use of `global` statement, a [`non-local` statement](https://docs.python.org/3/reference/simple_stmts.html#the-nonlocal-statement) is needed for assigning nonlocal variables.
# Each local function has an attribute named `__closure__` that stores the captured local variables.
# In[17]:
def print_closure(f):
'''Print the closure of a function.'''
print('closure of ', f.__name__)
for cell in f.__closure__:
print(' {} content: {!r}'.format(cell, cell.cell_contents))
print_closure(next_fibonacci)
print_closure(print_fibonacci_state)
# ## Generator
# Another way to generate a sequence of objects one-by-one is to write a *generator*.
# In[18]:
fibonacci_generator = (fibonacci_iteration(n) for n in range(3))
fibonacci_generator
# The above uses a [*generator expression*](https://docs.python.org/3/reference/expressions.html#grammar-token-generator-expression) to define `fibonacci_generator`.
# **How to obtain items from a generator?**
# We can use the [`next` function](https://docs.python.org/3/library/functions.html#next).
# In[19]:
while True:
print(next(fibonacci_generator)) # raises StopIterationException eventually
# A generator object is [*iterable*](https://www.programiz.com/python-programming/iterator), i.e., it implements both `__iter__` and `__next__` methods that are automatically called in a `for` loop as well as the `next` function.
# In[20]:
fibonacci_generator = (fibonacci_iteration(n) for n in range(5))
for fib in fibonacci_generator: # StopIterationException handled by for loop
print(fib)
# **Is `fibonacci_generator` efficient?**
# No again due to redundant computations.
# A better way to define the generator is to use the keyword [`yield`](https://docs.python.org/3/reference/expressions.html?highlight=yield#yield-expressions):
# In[21]:
get_ipython().run_cell_magic('mytutor', '-h 450', "def fibonacci_sequence(Fn, Fn1, stop):\n '''Return a generator that generates Fibonacci numbers\n starting from Fn and Fn1 until stop (exclusive).'''\n while Fn < stop:\n yield Fn # return Fn and pause execution\n Fn, Fn1 = Fn1, Fn1 + Fn\n\n\nfor fib in fibonacci_sequence(0, 1, 5):\n print(fib)")
# 1. `yield` causes the function to return a *generator* without executing the function body.
# 1. Calling `__next__` resumes the execution, which
# - pauses at the next `yield` expression, or
# - raises the `StopIterationException` at the end.
# **Exercise** The yield expression `yield ...` is mistaken in [Halterman17] to be a statement. It is actually an expression because
# - The value of a `yield` expression is `None` by default, but
# - it can be set by the `generator.send` method.
#
# Add the document string to the following function. In particular, explain the effect of calling the method `send` on the returned generator.
# In[22]:
get_ipython().run_cell_magic('mytutor', '-r -h 500', "def fibonacci_sequence(Fn, Fn1, stop):\n ### BEGIN SOLUTION\n '''Return a generator that generates Fibonacci numbers\n starting from Fn and Fn1 to stop (exclusive). \n generator.send(value) sets next number to value.'''\n ### END SOLUTION\n while Fn < stop:\n value = yield Fn\n if value is not None: \n Fn1 = value # set next number to the value of yield expression\n Fn, Fn1 = Fn1, Fn1 + Fn ")
# ## Optional Arguments
# **How to make function arguments optional?**
# In[23]:
def fibonacci_sequence(Fn=0, Fn1=1, stop=None):
while stop is None or Fn < stop:
value = yield Fn
Fn, Fn1 = Fn1, Fn1 + Fn
# In[24]:
for fib in fibonacci_sequence(0,1,5):
print(fib) # with all arguments specified
# In[25]:
for fib in fibonacci_sequence(stop=5):
print(fib) # with default Fn=0, Fn1=1
# `stop=5` is called a [keyword argument](https://docs.python.org/3/glossary.html#term-keyword-argument). Unlike `positional arguments`, it specifies the name of the argument explicitly.
# **Exercise** `stop` is an [optional argument](https://docs.python.org/3/tutorial/controlflow.html#default-argument-values) with the *default value* `None`. What is the behavior of the following code?
# In[26]:
for fib in fibonacci_sequence(5):
print(fib)
if fib > 10:
break # Will this be executed?
# With the default value of `None`, the while loop becomes an infinite loop. The generator will keep generating the next Fibonacci number without any bound on the order. In particular, `fibonacci_sequence(5)` creates an unstoppable (default) generator with base case `Fn=5` (specified) and `Fn1=1` (default).
# Rules for specifying arguments:
# 1. Keyword arguments must be after all positional arguments.
# 1. Duplicate assignments to an argument are not allowed.
# E.g., the following results in error:
# In[27]:
fibonacci_sequence(stop=10, 1)
# In[28]:
fibonacci_sequence(1, Fn=1)
# The following shows that the behavior of `range` is different.
# In[29]:
for count in range(1, 10, 2):
print(count, end=' ') # counts from 1 to 10 in steps of 2
print()
for count in range(1, 10):
print(count, end=' ') # default step=1
print()
for count in range(10):
print(count, end=' ') # default start=0, step=1
range(stop=10) # fails
# `range` takes only positional arguments.
# However, the first positional argument has different intepretations (`start` or `stop`) depending on the number of arguments (2 or 1).
# `range` is indeed NOT a generator.
# In[30]:
print(type(range),type(range(10)))
# ## Variable number of arguments
# We can simulate the behavior of range by having a [variable number of arguments](https://docs.python.org/3.4/tutorial/controlflow.html#arbitrary-argument-lists).
# In[31]:
def print_arguments(*args, **kwargs):
'''Take any number of arguments and prints them'''
print('args ({}): {}'.format(type(args),args))
print('kwargs ({}): {}'.format(type(kwargs),kwargs))
print_arguments(0, 10, 2, start=1, stop=2)
print("{k}".format(greeting="Hello",k=8),"*" )
# - `args` is a tuple of positional arguments.
# - `kwargs` is a dictionary of keyword arguments.
# `*` and `**` are *unpacking operators* for tuple/list and dictionary respectively:
# In[32]:
args = (0, 10, 2)
kwargs = {'start': 1, 'stop': 2}
print_arguments(*args, **kwargs)
# The following function converts all the arguments to a string.
# It will be useful later on.
# In[33]:
def argument_string(*args, **kwargs):
'''Return the string representation of the list of arguments.'''
return '({})'.format(', '.join([
*['{!r}'.format(v) for v in args], # arguments
*['{}={!r}'.format(k, v)
for k, v in kwargs.items()] # keyword arguments
]))
argument_string(0, 10, 2, start=1, stop=2)
# **Exercise** Redefine `fibonacci_sequence` so that the positional arguments depend on the number of arguments:
# In[34]:
def fibonacci_sequence(*args):
'''Return a generator that generates Fibonacci numbers
starting from Fn and Fn1 to stop (exclusive).
generator.send(value) sets next number to value.
fibonacci_sequence(stop)
fibonacci_sequence(Fn,Fn1)
fibonacci_sequence(Fn,Fn1,stop)
'''
Fn, Fn1, stop = 0, 1, None # default values
# handle different number of arguments
if len(args) is 1:
### BEGIN SOLUTION
stop = args[0]
### END SOLUTION
elif len(args) is 2:
Fn, Fn1 = args[0], args[1]
elif len(args) > 2:
Fn, Fn1, stop = args[0], args[1], args[2]
while stop is None or Fn < stop:
value = yield Fn
if value is not None:
Fn1 = value # set next number to the value of yield expression
Fn, Fn1 = Fn1, Fn1 + Fn
# In[35]:
for fib in fibonacci_sequence(5): # default Fn=0, Fn=1
print(fib)
# In[36]:
for fib in fibonacci_sequence(1, 2): # default stop=None
print(fib)
if fib>5:
break
# In[37]:
args = (1, 2, 5)
for fib in fibonacci_sequence(*args): # default stop=None
print(fib)
# ## Decorator
# **What is function decoration?**
# **Why decorate a function?**
# In[38]:
def fibonacci(n):
'''Returns the Fibonacci number of order n.'''
global count, depth
count += 1
depth += 1
print('{:>3}: {}fibonacci({!r})'.format(count, '|' * depth, n))
value = fibonacci(n - 1) + fibonacci(n - 2) if n > 1 else 1 if n == 1 else 0
depth -= 1
if depth is -1: # recursion done
print('Done')
count = 0 # reset count for subsequent recursions
return value
count, depth = 0, -1
for n in range(6):
print(fibonacci(n))
# The code decorates the `fibonacci` function by printing each recursive call and the depth of the call stack.
# The decoration is useful in showing the efficiency of the function, but it rewrites the function definition.
# **How to decorate a function without changing its code?**
# - What if the decorations are temporary and should be removed later?
# - Go through the source codes of all decorated functions to remove the decorations?
# - When updating a piece of code, switch back and forth between original and decorated codes?
# What about defining a new function that calls and decorates the original function?
# In[39]:
def fibonacci(n):
'''Returns the Fibonacci number of order n.'''
return fibonacci(n - 1) + fibonacci(n - 2) if n > 1 else 1 if n is 1 else 0
def fibonacci_decorated(n):
'''Returns the Fibonacci number of order n.'''
global count, depth
count += 1
depth += 1
print('{:>3}: {}fibonacci({!r})'.format(count, '|' * depth, n))
value = fibonacci(n)
depth -= 1
if depth is -1: # recursion done
print('Done')
count = 0 # reset count for subsequent recursions
return value
count, depth = 0, -1
for n in range(6):
print(fibonacci_decorated(n))
# We want `fibonacci` to call `fibonacci_decorated` instead.
# What about renaming `fibonacci_decorated` to `fibonacci`?
#
# ```Python
# fibonacci = fibonacci_decorated
# count, depth = 0, -1
# fibonacci_decorated(10)
# ```
#
# (If you are faint-hearted, don't run the above code.)
# We want `fibonacci_decorated` to call the original `fibonacci`.
# The solution is to capture the original `fibonacci` in a closure:
# In[40]:
import functools
def print_function_call(f):
'''Return a decorator that prints function calls.'''
@functools.wraps(f) # give wrapper the identity of f and more
def wrapper(*args, **kwargs):
nonlocal count, depth
count += 1
depth += 1
call = '{}{}'.format(f.__name__, argument_string(*args, **kwargs))
print('{:>3}:{}{}'.format(count, '|' * depth, call))
value = f(*args, **kwargs) # wrapper calls f
depth -= 1
if depth is -1:
print('Done')
count = 0
return value
count, depth = 0, -1
return wrapper # return the decorated function
# `print_function_call` takes in `f` and returns `wrapper`, which captures and decorates `f`:
# - `wrapper` expects the same set of arguments for `f`,
# - returns the same value returned by `f` on the arguments, but
# - can execute additional codes before and after calling `f` to print the function call.
# By redefining `fibonacci` as the returned `wrapper`, the original `fibonacci` captured by `wrapper` calls `wrapper` as desired.
# In[41]:
def fibonacci(n):
return fibonacci(n - 1) + fibonacci(n - 2) if n > 1 else 1 if n is 1 else 0
fibonacci = print_function_call(
fibonacci) # so original fibonnacci calls wrapper
fibonacci(5)
# The redefinition does not change the original `fibonacci` captured by `wrapper`.
# In[42]:
import inspect
for cell in fibonacci.__closure__:
if callable(cell.cell_contents):
print(inspect.getsource(cell.cell_contents))
# Python provides the syntatic sugar below to simplify the redefinition.
# In[43]:
@print_function_call
def fibonacci(n):
return fibonacci(n - 1) + fibonacci(n - 2) if n > 1 else 1 if n is 1 else 0
fibonacci(5)
# There are many techniques used in the above decorator.
# **Why use a variable number of arguments in `wrapper`**
# To decorate any function with possibly different number of arguments.
# **Why decorate the wrapper with `@functools.wraps(f)`?**
# - Ensures some attributes (such as `__name__`) of the wrapper function is the same as those of `f`.
# - Add useful attributes. E.g., `__wrapped__` stores the original function so we can undo the decoration.
#
# In[44]:
fibonacci, fibonacci_decorated = fibonacci.__wrapped__, fibonacci # recover
print('original fibonacci:')
print(fibonacci(5))
fibonacci = fibonacci_decorated # decorate
print('decorated fibonacci:')
print(fibonacci(5))
# **How to use decorator to improve recursion?**
# We can also use a decorator to make recursion more efficient by caching the return values.
# `cache` is a dictionary where `cache[n]` stores the computed value of $F_n$ to avoid redundant computations.
# In[45]:
def caching(f):
'''Return a decorator that caches a function with a single argument.'''
@functools.wraps(f)
def wrapper(n):
if n not in cache:
cache[n] = f(n)
else:
print('read from cache')
return cache[n]
cache = {}
wrapper.clear_cache = lambda : cache.clear() # add method to clear cache
return wrapper
@print_function_call
@caching
def fibonacci(n):
return fibonacci(n - 1) + fibonacci(n - 2) if n > 1 else 1 if n == 1 else 0
# In[46]:
fibonacci(5)
fibonacci(5)
fibonacci.clear_cache()
fibonacci(5)
# A method `clear_cache` is added to the wrapper to clear the cache.
# `lambda <argument list> : <expression>`is called a [*lambda* expression](https://docs.python.org/3/reference/expressions.html#lambda), which conveniently defines an *anonymous function*.
# In[47]:
type(fibonacci.clear_cache), fibonacci.clear_cache.__name__
# ## Module
# **How to create a module?**
# To create a module, simply put the code in a python source file `<module name>.py` in
# - the current directory, or
# - a python *site-packages* directory in system path.
# In[48]:
import sys
print(sys.path)
# For example, to create a module for generating Fibonacci numbers:
# In[49]:
get_ipython().run_line_magic('more', 'fibonacci.py')
# In[50]:
import fibonacci as fib # as statement shortens name
help(fib)
# In[51]:
print(fib.fibonacci(5))
print(fib.fibonacci_iteration(5))
|
ccha23/CS1302ICP
|
_build/jupyter_execute/Lab4/Mastermind.py
|
#!/usr/bin/env python
# coding: utf-8
# # Mastermind
# **CS1302 Introduction to Computer Programming**
# ___
# In this notebook, you will write a game called [*Mastermind*](https://en.wikipedia.org/wiki/Mastermind_(board_game)).
# Play the video below to learn about the rule of the game.
# In[ ]:
get_ipython().run_cell_magic('html', '', '<iframe width="912" height="513" src="https://www.youtube.com/embed/wsYPsrzCKiA" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>')
# 1. **Mastermind** first creates a hidden `code` of length `code_length` consisting code pegs with possibly duplicate colors chosen from a sequence of `colors`.
# 1. **Coderbreaker** provides a `guess` of the `code`.
# 1. **Mastermind** generates a `feedback` consisting of key pegs of black and white colors:
# - The number of black pegs (`black_key_pegs_count`) is the number of code pegs that are the correct colors in the correct positions.
# - The number of white pegs (`white_key_pegs_count`) is the number of code pegs that are the correct colors but in incorrect positions.
# - Each code peg should be counted only once, i.e., a code peg cannot be awarded more than one key peg. E.g.,
# - If the `code` is `'RBGG'` and `guess` is `'BGGG'`, then
# - the feedback should be `'bbw'` with
# - `black_key_pegs_count == 2` because of `__GG` in the guess, and
# - `white_key_pegs_count == 1` because of `_B__` in the guess.
# - `_G__` in the `guess` should not be awarded an additional white peg because `__GG` in the `code` has been counted.
# 1. **Codebreaker** wins if the code is correctly guessed within certain number (`max_num_guesses`) of guesses.
# ## Random Code Generation
# The first exercise is to generate a random hidden code so even one person can play the game as Codebreaker.
# Watch the following video to understand how computers generate random objects.
# In[ ]:
get_ipython().run_cell_magic('html', '', '<iframe width="912" height="513" src="https://www.youtube.com/embed/GtOt7EBNEwQ" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>')
# To generate random content in Python, we can import the **random** module, which provides some useful functions to generate random objects as follows.
# In[ ]:
import random
for i in range(10): print(random.random()) # random floating point numbers in [0,1)
# In[ ]:
for i in range(10): print(random.randint(3,10),end=' ') # random integer in range [3,10]
# In[ ]:
for i in range(10): print(random.choice('RBG'),end='') # random element in the sequence 'RBG'
# We can generate a reproducible pseudo-random sequence by specifying the seed.
# By default Python uses the system time as seed.
# In[ ]:
# repeatedly run the cell to see new sequences.
random.seed(123456)
for i in range(10): print(random.randint(3,10),end=' ')
# **Exercise** Define a function that generates a random `code`. The functions takes in
# - a string `colors` whose characters represent distinct colors to choose from, and
# - a positive integer `code_length` representing the length of the code.
#
# For instance, `get_code('ROYGBP',4)` returns a code of `4` code pegs randomly with colors chosen from `'R'`ed, `'O'`range, `'Y'`ellow, `'G'`reen, `'B'`lue, and `'P'`urple. One possible outcome is `'ROYY'`.
# In[ ]:
import random
def get_code(colors, code_length):
code = ''
"""
The function body will iterate code_length times.
In each iteration, generate a random (integer) position in range 0 to len(colors)-1.
From color, get the character at that random position and append the character to code
"""
# YOUR CODE HERE
raise NotImplementedError()
return code
# In[ ]:
# ## Guess Validation
# **Exercise** Define a function `valid_code` that
# - takes `colors`, `code_length`, and `guess` as the first, second, and third arguments respectively, and
# - return `True` if `guess` is a valid code, i.e., a string of length `code_length` with characters from those of `colors`, and
# - `False` otherwise.
# *Hint:* Solution template:
# ```Python
# def __________(colors, code_length, guess):
# if len(guess) __ code_length:
# is_valid = ____
# else:
# for peg in guess:
# for color in colors:
# if peg == color: ____
# else:
# is_valid = _____
# ____
# else:
# is_valid = ____
# return is_valid
# ```
# In[ ]:
# YOUR CODE HERE
raise NotImplementedError()
# In[ ]:
# tests
assert valid_code('RBG',1,'R') == True
assert valid_code('RBG',2,'B') == False
assert valid_code('RBG',2,'RP') == False
assert valid_code('RBG',0,'') == True
# ## Feedback Generation
# According to the rules of Mastermind, double-counting of a single peg (as black and white) is not allowed. To facilitate this check, we have written a new module `markposition` that allows you to mark any non-negative integer position as counted.
# **Exercise** Write an `import` statement to import from the module `markposition` the functions
# - `mark_as_counted`
# - `check_if_counted`, and
# - `reset_all_to_not_counted`.
# In[ ]:
# YOUR CODE HERE
raise NotImplementedError()
# In[ ]:
# Tests
reset_all_to_not_counted()
mark_as_counted(3)
assert check_if_counted(3) and not check_if_counted(0)
# **Exercise** Using the functions imported from `markposition`, mark only the positions `0`, `2`, `4`, `6`, `8`, and `10` as counted. All other positions are not counted.
# *Hint*: Use `help` to learn how to use the imported functions.
# In[ ]:
# YOUR CODE HERE
raise NotImplementedError()
# In[ ]:
# Tests
for i in range(11):
assert not check_if_counted(i) if i % 2 else check_if_counted(i)
# **Exercise** Define a function `get_feedback` that
# - takes `code` and `guess` as the first and second arguments respectively, and
# - returns a feedback string that starts with the appropriate number of characters `'b'` (for black key pegs) followed by the appropriate number of characters `'w'` (for white key pegs).
# *Hint:* Solution template:
# ```Python
# def get_feedback(code, guess):
# black_key_pegs_count = white_key_pegs_count = counted = 0
# reset_all_to_not_counted()
# for i in _________________:
# if ___________________:
# black_key_pegs_count += 1
# mark_as_counted(i)
# for i in range(len(guess)):
# for j in range(len(code)):
# if __________________________________________________________:
# white_key_pegs_count += 1
# mark_as_counted(j)
# break
# key = 'b' * black_key_pegs_count + 'w' * white_key_pegs_count
# return key
# ```
# In[ ]:
# YOUR CODE HERE
raise NotImplementedError()
# In[ ]:
# tests
def test_get_feedback(feedback,code,guess):
feedback_ = get_feedback(code,guess)
correct = feedback == feedback_
if not correct:
print(
f'With code="{code}" and guess="{guess}", feedback should be "{feedback}", not "{feedback_}".'
)
assert correct
test_get_feedback(10*'b'+'w'*0,"RGBRGBRGBY","RGBRGBRGBY")
test_get_feedback(0*'b'+'w'*10,"RGBRGBRGBY","YRGBRGBRGB")
test_get_feedback(8*'b'+'w'*0,"RGRGRGRG","RGRGRGRG")
test_get_feedback(0*'b'+'w'*8,"RGRGRGRG","GRGRGRGR")
test_get_feedback(0*'b'+'w'*6,"RRRRGGG","GGGGRRR")
test_get_feedback(1*'b'+'w'*6,"RRRRGGG","GGGRRRR")
test_get_feedback(5*'b'+'w'*2,"RRRRGGG","RRRGGGR")
test_get_feedback(1*'b'+'w'*0,"RRRRGGG","RYYPPBB")
test_get_feedback(0*'b'+'w'*1,"RRRRG","GBBBB")
test_get_feedback(0*'b'+'w'*0,"RRRRG","YBBBB")
# ## Play the Game
# After finishing the previous exercises, you can play the game as a code breaker against a random mastermind.
# In[ ]:
# mastermind
import ipywidgets as widgets
from IPython.display import display, HTML
def main():
'''The main function that runs the mastermind game.'''
max_num_guesses = code_length = code = num_guesses_left = None
is_game_ended = True
colors = 'ROYGBP'
color_code = {
"R": "#F88,#F00,#800",
"O": "#FD8,#F80,#840",
"Y": "#FF8,#FF0,#AA0",
"G": "#8F8,#0F0,#080",
"B": "#88F,#00F,#008",
"P": "#F8F,#F0F,#808",
"b": "#888,#000,#000",
"w": "#FFF,#EEE,#888"
}
# returns the HTML code for a colored peg.
def getPeg(color, size=30):
return '''<div style='display:inline-block;
background-image: radial-gradient(circle, {0});
width:{1}px; height:{1}px; border-radius:50%;'>
</div>'''.format(color_code[color], size)
colors_display = widgets.HBox([widgets.Label(value='Color codes:')] + [
widgets.HBox([widgets.Label(value=color),
widgets.HTML(getPeg(color))]) for color in colors
])
max_num_guesses_input = widgets.IntSlider(min=5,
max=15,
value=10,
description="# guesses:")
code_length_input = widgets.IntSlider(min=2,
max=10,
value=4,
description="Code length:")
code_input = widgets.Password(description="Code:")
start_new_game_button = widgets.Button(description="Start a new game")
guess_input = widgets.Text(description="Guess:")
submit_guess_button = widgets.Button(description="Submit guess")
board = widgets.Output()
message = widgets.Output()
display(
widgets.VBox([
max_num_guesses_input, code_length_input, colors_display,
widgets.HBox([code_input, start_new_game_button]),
widgets.HBox([guess_input, submit_guess_button]), board, message
]))
# A listener that starts a new game
def start_new_game(button):
nonlocal code, num_guesses_left, is_game_ended, max_num_guesses, code_length
max_num_guesses = max_num_guesses_input.value
code_length = code_length_input.value
board.clear_output()
message.clear_output()
code = code_input.value or get_code(colors, code_length)
with message:
if not valid_code(colors, code_length, code):
display(
HTML('''<p>The code {} is invalid.<br>
Leave the code box empty to randomly generated a code.
</p>'''.format(code)))
is_game_ended = True
else:
num_guesses_left = max_num_guesses
is_game_ended = num_guesses_left <= 0
display(
HTML('<p>Game started! {} Guesses left.</p>'.format(
num_guesses_left)))
# A listener that submits a guess
def submit_guess(button):
nonlocal num_guesses_left, is_game_ended
guess = guess_input.value
with message:
message.clear_output()
if is_game_ended:
display(
HTML('''<p>Game has not started.<br>
Please start a new game.</p>'''))
return
if not valid_code(colors, code_length, guess):
display(HTML('<p>Invalid guess.</p>'))
return
feedback = get_feedback(code, guess)
num_guesses_left -= 1
with board:
content = ""
for k in guess:
content += getPeg(k)
content += '''<div style='display:inline-block;
margin: 0px 5px 0px 30px;
position:relative; top:5px;'>Feeback:</div>
<div style='display:inline-block;
border: 1px solid; width:120px; height:30px;'>'''
for k in feedback:
content += getPeg(k, 28)
content += "</div>"
display(HTML(content))
with message:
message.clear_output()
if feedback == 'b' * code_length:
is_game_ended = True
display(
HTML('<p>You won with {} guesses left!</p>'.format(
num_guesses_left)))
return
is_game_ended = num_guesses_left <= 0
if is_game_ended:
display(HTML('<p>Game over...</p>'))
return
display(HTML('<p>{} Guesses left.</p>'.format(num_guesses_left)))
start_new_game_button.on_click(start_new_game)
submit_guess_button.on_click(submit_guess)
main()
# In[ ]:
|
ccha23/CS1302ICP
|
_build/jupyter_execute/Lab7/Cybersecurity.py
|
<filename>_build/jupyter_execute/Lab7/Cybersecurity.py
#!/usr/bin/env python
# coding: utf-8
# # Cybersecurity
# **CS1302 Introduction to Computer Programming**
# ___
# Python is a popular tool among hackers and engineers. In this lab, you will learn Cryptology in cybersecurity, which covers
# - [Cryptography](https://en.wikipedia.org/wiki/Cryptography): Encryption and decryption using a cipher.
# - [Cryptanalysis](https://en.wikipedia.org/wiki/Cryptanalysis): Devising an attack to break a cipher.
# ## Caesar symmetric key cipher
# We first implements a simple cipher called the [Caesar cipher](https://en.wikipedia.org/wiki/Caesar_cipher).
# In[ ]:
get_ipython().run_cell_magic('html', '', '<iframe width="912" height="513" src="https://www.youtube.com/embed/sMOZf4GN3oc" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>')
# ### Encrypt/decrypt a character
# **How to encrypt a character?**
# The following code encrypts a character `char` using a non-negative integer `key`.
# In[ ]:
cc_n = 1114112
def cc_encrypt_character(char, key):
'''
Return the encryption of a character by an integer key using Caesar cipher.
Parameters
----------
char (str): a unicode (UTF-8) character to be encrypted.
key (int): secret key to encrypt char.
'''
char_code = ord(char)
shifted_char_code = (char_code + key) % cc_n
encrypted_char = chr(shifted_char_code)
return encrypted_char
# For example, to encrypt the letter `'A'` using a secret key `5`:
# In[ ]:
cc_encrypt_character('A', 5)
# The character `'A'` is encrypted to the character `'F'` as follows:
#
# 1. `ord(char)` return the integer `65` that is the code point (integer representation) of the unicode of `'A'`.
# 2. `(char_code + key) % cc_n` cyclic shifts the code by the key `5`.
# 3. `chr(shifted_char_code)` converts the shifted code back to a character, which is `'F'`.
#
# | Encryption | | | | | | | | |
# | ------------------------------- | --- | ----- | --- | --- | --- | --- | --- | --- |
# | `char` | ... | **A** | B | C | D | E | F | ... |
# | `ord(char)` | ... | **65**| 66 | 67 | 68 | 69 | 70 | ... |
# | `(ord(char) + key) % cc_n` | ... | **70**| 71 | 72 | 73 | 74 | 75 | ... |
# | `(chr(ord(char) + key) % cc_n)` | ... | **F** | G | H | I | J | K | ... |
# You may learn more about `ord` and `chr` from their docstrings:
# In[ ]:
help(ord)
help(chr)
# **How to decrypt a character?**
# Mathematically, we define the encryption and decryption of a character for Caesar cipher as
#
# $$ \begin{aligned} E(x,k) &:= x + k \mod n & \text{(encryption)} \\
# D(x,k) &:= x - k \mod n & \text{(decryption),} \end{aligned}
# $$
# where $x$ is the character code in $\{0,\dots,n\}$ and $k$ is the secret key. `mod` operator above is the modulo operator. In Mathematics, it has a lower precedence than addition and multiplication and is typeset with an extra space accordingly.
# The encryption and decryption satisfies the recoverability condition
#
# $$ D(E(x,k),k) = x $$
# so two people with a common secret key can encrypt and decrypt a character, but others not knowing the key cannot. This is a defining property of a [symmetric cipher](https://en.wikipedia.org/wiki/Symmetric-key_algorithm).
# The following code decrypts a character using a key.
# In[ ]:
def cc_decrypt_character(char, key):
'''
Return the decryption of a character by the key using Caesar cipher.
Parameters
----------
char (str): a unicode (UTF-8) character to be decrypted.
key (int): secret key to decrypt char.
'''
char_code = ord(char)
shifted_char_code = (char_code - key) % cc_n
decrypted_char = chr(shifted_char_code)
return decrypted_char
# For instance, to decrypt the letter `'F'` by the secret key `5`:
# In[ ]:
cc_decrypt_character('F',5)
# The character `'F'` is decrypted back to `'A'` because
# `(char_code - key) % cc_n` reverse cyclic shifts the code by the key `5`.
#
# | Encryption | | | | | | | | | Decryption |
# | ------------------------------- | --- | ----- | --- | --- | --- | --- | --- | --- | ------------------------------- |
# | `char` | ... | **A** | B | C | D | E | F | ... | `(chr(ord(char) - key) % cc_n)` |
# | `ord(char)` | ... | **65**| 66 | 67 | 68 | 69 | 70 | ... | `(ord(char) - key) % cc_n` |
# | `(ord(char) + key) % cc_n` | ... | **70**| 71 | 72 | 73 | 74 | 75 | ... | `ord(char)` |
# | `(chr(ord(char) + key) % cc_n)` | ... | **F** | G | H | I | J | K | ... | `char` |
# **Exercise** Why did we set `cc_n = 1114112`? Explain whether the recoverability property may fail if we set `cc_n` to a bigger number or remove `% cc_n` for both `cc_encrypt_character` and `cc_decrypt_character`.
# YOUR ANSWER HERE
# ### Encrypt a plaintext and decrypt a ciphertext
# Of course, it is more interesting to encrypt a string instead of a character. The following code implements this in one line.
# In[ ]:
def cc_encrypt(plaintext, key):
'''
Return the ciphertext of a plaintext by the key using Caesar cipher.
Parameters
----------
plaintext (str): a unicode (UTF-8) message in to be encrypted.
key (int): secret key to encrypt plaintext.
'''
return ''.join([chr((ord(char) + key) % cc_n) for char in plaintext])
# The above function encrypts a message, referred to as the *plaintext*, by replacing each character with its encryption.
# This is referred to as a [*substitution cipher*](https://en.wikipedia.org/wiki/Substitution_cipher).
# **Exercise** Define a function `cc_decrypt` that
# - takes a string `ciphertext` and an integer `key`, and
# - returns the plaintext that encrypts to `ciphertext` by the key using Caesar cipher.
# In[ ]:
def cc_decrypt(ciphertext, key):
'''
Return the plaintext that encrypts to ciphertext by the key using Caesar cipher.
Parameters
----------
ciphertext (str): message to be decrypted.
key (int): secret key to decrypt the ciphertext.
'''
# YOUR CODE HERE
raise NotImplementedError()
# In[ ]:
# tests
assert cc_decrypt(r'bcdefghijklmnopqrstuvwxyz{',1) == 'abcdefghijklmnopqrstuvwxyz'
assert cc_decrypt(r'Mjqqt1%\twqi&',5) == 'Hello, World!'
# ## Brute-force attack
# ### Create an English dictionary
# You will launch a brute-force attack to guess the key that encrypts an English text. The idea is simple:
#
# - You try decrypting the ciphertext with different keys, and
# - see which of the resulting plaintexts make most sense (most english-like).
# To check whether a plaintext is English-like, we need to have a list of English words. One way is to type them out
# but this is tedious. Alternatively, we can obtain the list from the *Natural Language Toolkit (NLTK)*:
# In[ ]:
import nltk
nltk.download('words')
from nltk.corpus import words
# `words.words()` returns a list of words. We can check whether a string is in the list using the operator `in`.
# In[ ]:
for word in 'Ada', 'ada', 'Hello', 'hello':
print('{!r} in dictionary? {}'.format(word, word in words.words()))
# However there are two issues:
# - Checking membership is slow for a long list.
# - Both 'Hello' and 'ada' are English-like but they are not in the words_list.
# **Exercise** Using the method `lower` of `str` and the constructor `set`, assign `dictionary` to a set of lowercase English words from `words.words()`.
# In[ ]:
# YOUR CODE HERE
raise NotImplementedError()
# In[ ]:
# tests
assert isinstance(dictionary,set) and len(dictionary) == 234377
assert all(word in dictionary for word in ('ada', 'hello'))
assert all(word not in dictionary for word in ('Ada', 'hola'))
### BEGIN TESTS
assert 'world' in dictionary
assert not 'mundo' in dictionary
### END TESTS
# ### Identify English-like text
# To determine how English-like a text is, we calculate the following score:
#
# $$
# \frac{\text{number of English words in the text}}{\text{number of tokens in the text}}
# $$
# where tokens are substrings (not necessarily an English word) separated by white space characters in the text.
# In[ ]:
def tokenizer(text):
'''Returns the list of tokens of the text.'''
return text.split()
def get_score(text):
'''Return the fraction of tokens which appear in dictionary.'''
tokens = tokenizer(text)
words = [token for token in tokens if token in dictionary]
return len(words)/len(tokens)
# tests
get_score('hello world'), get_score('Hello, World!')
# As shown in tests above, the code fails to handle text with punctuations and uppercase letters properly.
# In particular,
# - while `get_score` recognizes `hello world` as English-like and returns the maximum score of 1,
# - it fails to recognize `Hello, World!` as English-like and returns the minimum score of 0.
# Why? This is because every words in `dictionary`
# - are in lowercase, and
# - have no leading/trailing punctuations.
# **Exercise** Define a funtion `tokenizer` that
# - takes a string `text` as an argument, and
# - returns a `list` of tokens obtained by
# 1. splitting `text` into a list using `split()`;
# 2. removing leading/trailing punctuations in `string.punctuation` using the `strip` method; and
# 3. converting all items of the list to lowercase using `lower()`.
# In[ ]:
import string
def tokenizer(text):
'''Returns the list of tokens of the text such that
1) each token has no leading or training spaces/punctuations, and
2) all letters in each tokens are in lowercase.'''
# YOUR CODE HERE
raise NotImplementedError()
# In[ ]:
# tests
assert tokenizer('Hello, World!') == ['hello', 'world']
assert get_score('Hello, World!') >= 0.99999
assert tokenizer('Do you know Jean-Pierre?') == ['do', 'you', 'know', 'jean-pierre']
assert get_score('Do you know Jean-Pierre?') >= 0.99999
# ### Launch a brute-force attack
# **Exercise** Define the function `cc_attack` that
# - takes as arguments
# - a string `ciphertext`,
# - a floating point number `threshold` in the interval $(0,1)$ with a default value of $0.6$, and
# - returns a generator that
# - generates one-by-one in ascending order guesses of the key that
# - decrypt `ciphertext` to texts with scores at least the `threshold`.
# In[ ]:
def cc_attack(ciphertext, threshold = 0.6):
'''Returns a generator that generates the next guess of the key that
decrypts the ciphertext to a text with get_score(text) at least the threshold.
'''
# YOUR CODE HERE
raise NotImplementedError()
# In[ ]:
# tests
ciphertext = cc_encrypt("Hello, World!",12345)
key_generator = cc_attack(ciphertext)
key_guess = next(key_generator)
assert key_guess == 12345
text = cc_decrypt(ciphertext, key_guess)
print('guess of the key: {}\nscore: {}\ntext :{}'.format(key_guess,get_score(text),text))
# ## Challenge
# Another symmetric key cipher is [columnar transposition cipher](https://en.wikipedia.org/wiki/Transposition_cipher#Columnar_transposition). A transposition cipher encrypts a text by permuting instead of substituting characters.
# **Exercise** Study and implement the irregular case of the [columnar transposition cipher](https://en.wikipedia.org/wiki/Transposition_cipher#Columnar_transposition) as described in Wikipedia page. Define the functions
# - `ct_encrypt(plaintext, key)` for encryption, and
# - `ct_decrypt(ciphertext, key)` for decryption.
#
# You can assume the plaintext is in uppercase and has no spaces/punctuations.
# *Hints:* See the text cases for an example of `plaintext`, `key`, and the corresponding `ciphertext`. You can but are not required to follow the solution template below:
#
# ```Python
# def argsort(seq):
# '''A helper function that returns the tuple of indices that would sort the
# sequence seq.'''
# return tuple(x[0] for x in sorted(enumerate(seq), key=lambda x: x[1]))
#
#
# def ct_idx(length, key):
# '''A helper function that returns the tuple of indices that would permute
# the letters of a message according to the key using the irregular case of
# columnar transposition cipher.'''
# seq = tuple(range(length))
# return [i for j in argsort(key) for i in _______________]
#
#
# def ct_encrypt(plaintext, key):
# '''
# Return the ciphertext of a plaintext by the key using the irregular case
# of columnar transposition cipher.
#
# Parameters
# ----------
# plaintext (str): a message in uppercase without punctuations/spaces.
# key (str): secret key to encrypt plaintext.
# '''
# return ''.join([plaintext[i] for i in ct_idx(len(plaintext), key)])
#
#
# def ct_decrypt(ciphertext, key):
# '''
# Return the plaintext of the ciphertext by the key using the irregular case
# of columnar transposition cipher.
#
# Parameters
# ----------
# ciphertext (str): a string in uppercase without punctuations/spaces.
# key (str): secret key to decrypt ciphertext.
# '''
# return _______________________________________________________________________
# ```
# In[ ]:
# YOUR CODE HERE
raise NotImplementedError()
# In[ ]:
# tests
key = 'ZEBRAS'
plaintext = 'WEAREDISCOVEREDFLEEATONCE'
ciphertext = 'EVLNACDTESEAROFODEECWIREE'
assert ct_encrypt(plaintext, key) == ciphertext
assert ct_decrypt(ciphertext, key) == plaintext
|
ccha23/CS1302ICP
|
_build/jupyter_execute/Lecture7/Lists and Tuples.py
|
<reponame>ccha23/CS1302ICP<filename>_build/jupyter_execute/Lecture7/Lists and Tuples.py
#!/usr/bin/env python
# coding: utf-8
# # Lists and Tuples
# **CS1302 Introduction to Computer Programming**
# ___
# In[1]:
get_ipython().run_line_magic('reload_ext', 'mytutor')
# ## Motivation of composite data type
# The following code calculates the average of five numbers:
# In[2]:
def average_five_numbers(n1, n2, n3, n4, n5):
return (n1 + n2 + n3 + n4 + n5) / 5
average_five_numbers(1, 2, 3, 4, 5)
# What about using the above function to compute the average household income in Hong Kong.
# The labor size in Hong Kong in 2018 is close to 4 million.
# - Should we create a variable to store the income of each individual?
# - Should we recursively apply the function to groups of five numbers?
# What we need is
# - a *composite data type* that can keep a variable numbers of items, so that
# - we can then define a function that takes an object of the *composite data type*,
# - and returns the average of all items in the object.
# **How to store a sequence of items in Python?**
# `tuple` and `list` are two built-in classes for ordered collections of objects of possibly different types.
# Indeed, we have already used tuples and lists before.
# In[3]:
get_ipython().run_cell_magic('mytutor', '-h 300', "a_list = '1 2 3'.split()\na_tuple = (lambda *args: args)(1,2,3)\na_list[0] = 0\na_tuple[0] = 0")
# **What is the difference between tuple and list?**
# - List is [*mutable*](https://docs.python.org/3/library/stdtypes.html#index-21) so programmers can change its items.
# - Tuple is [*immutable*](https://docs.python.org/3/glossary.html#term-immutable) like `int`, `float`, and `str`, so
# - programmers can be certain the content stay unchanged, and
# - Python can preallocate a fixed amount of memory to store its content.
# ## Constructing sequences
# **How to create tuple/list?**
# Mathematicians often represent a set of items in two different ways:
# 1. [Roster notation](https://en.wikipedia.org/wiki/Set_(mathematics)#Roster_notation), which enumerates the elements in the sequence. E.g.,
# $$ \{0, 1, 4, 9, 16, 25, 36, 49, 64, 81\} $$
# 2. [Set-builder notation](https://en.wikipedia.org/wiki/Set-builder_notation), which describes the content using a rule for constructing the elements.
# $$ \{x^2| x\in \mathbb{N}, x< 10 \}, $$
# namely the set of perfect squares less than 100.
# Python also provides two corresponding ways to create a tuple/list:
# 1. [Enclosure](https://docs.python.org/3/reference/expressions.html?highlight=literals#grammar-token-enclosure)
# 2. [Comprehension](https://docs.python.org/3/reference/expressions.html#index-12)
# **How to create a tuple/list by enumerating its items?**
# To create a tuple, we enclose a comma separated sequence by parentheses:
# In[4]:
get_ipython().run_cell_magic('mytutor', '-h 450', "empty_tuple = ()\nsingleton_tuple = (0,) # why not (0)?\nheterogeneous_tuple = (singleton_tuple,\n (1, 2.0),\n print)\nenclosed_starred_tuple = (*range(2),\n *'23')")
# Note that:
# - If the enclosed sequence has one term, there must be a comma after the term.
# - The elements of a tuple can have different types.
# - The unpacking operator `*` can unpack an iterable into a sequence in an enclosure.
# To create a list, we use square brackets to enclose a comma separated sequence of objects.
# In[5]:
get_ipython().run_cell_magic('mytutor', '-h 450', "empty_list = []\nsingleton_list = [0] # no need to write [0,]\nheterogeneous_list = [singleton_list, \n (1, 2.0), \n print]\nenclosed_starred_list = [*range(2),\n *'23']")
# We can also create a tuple/list from other iterables using the constructors `tuple`/`list` as well as addition and multiplication similar to `str`.
# In[6]:
get_ipython().run_cell_magic('mytutor', '-h 950', "str2list = list('Hello')\nstr2tuple = tuple('Hello')\nrange2list = list(range(5))\nrange2tuple = tuple(range(5))\ntuple2list = list((1, 2, 3))\nlist2tuple = tuple([1, 2, 3])\nconcatenated_tuple = (1,) + (2, 3)\nconcatenated_list = [1, 2] + [3]\nduplicated_tuple = (1,) * 2\nduplicated_list = 2 * [1]")
# **Exercise** Explain the difference between following two expressions. Why a singleton tuple must have a comma after the item.
# In[7]:
print((1+2)*2,
(1+2,)*2, sep='\n')
# `(1+2)*2` evaluates to `6` but `(1+2,)*2` evaluates to `(3,3)`.
# - The parentheses in `(1+2)` indicate the addition needs to be performed first, but
# - the parentheses in `(1+2,)` creates a tuple.
#
# Hence, singleton tuple must have a comma after the item to differentiate these two use cases.
# **How to use a rule to construct a tuple/list?**
# We can specify the rule using a [comprehension](https://docs.python.org/3/reference/expressions.html#index-12),
# which we have used in a generator expression.
# E.g., the following is a python one-liner that returns a generator for prime numbers.
# In[8]:
get_ipython().run_line_magic('pinfo', 'all')
prime_sequence = lambda stop: (x for x in range(2, stop)
if all(x % divisor for divisor in range(2, x)))
print(*prime_sequence(100))
# There are two comprehensions used:
# - In `all(x % divisor for divisor in range(2, x))`, the comprehension creates a generator of remainders to the function `all`, which returns true if all the remainders are `True` in boolean expression.
# - In the return value `(x for x in range(2, stop) if ...)` of the anonymous function, the comprehension creates a generator of numbers from 2 to `stop-1` that satisfy the condition of the `if` clause.
# **Exercise** Use comprehension to define a function `composite_sequence` that takes a non-negative integer `stop` and returns a generator of composite numbers strictly smaller than `stop`. Use `any` instead of `all` to check if a number is composite.
# In[9]:
get_ipython().run_line_magic('pinfo', 'any')
### BEGIN SOLUTION
composite_sequence = lambda stop: (x for x in range(2, stop)
if any(x % divisor == 0 for divisor in range(2, x)))
### END SOLUTION
print(*composite_sequence(100))
# We can construct a list instead of a generator using comprehension:
# In[10]:
print(list(x**2 for x in range(10))) # Use the list constructor
print([x**2 for x in range(10)]) # Enclose comprehension by brackets
# We can also use comprehension to construct a tuple:
# In[11]:
print(tuple(x**2 for x in range(10))) # Use the tuple constructor
# **Exercise** Explain the difference between the following expressions.
# In[12]:
print((x**2 for x in range(10)),
(*(x**2 for x in range(10)),), sep='\n')
# - The first is a generator expression, not a tuple.
# - The second is a tuple constructed by enclosing the sequence from unpacking the generator.
# There must be a comma after the generator since there is only one enclosed term, even though that term generates multiple items.
# **Exercise** Explain the difference between the following expressions.
# In[13]:
print([x for x in range(10)],
[(lambda arg: arg)(x for x in range(10))], sep='\n')
# - In the second expression, the comprehension provided as an argument to a function becomes a generator object,
# which is returned by the anonymous function and enclosed to form the singleton list.
# - In the first expression, the comprehension is not converted to a generator.
# With list comprehension, we can simulate a sequence of biased coin flips.
# In[14]:
from random import random as rand
p = rand() # unknown bias
coin_flips = ['H' if rand() <= p else 'T' for i in range(1000)]
print('Chance of head:', p)
print('Coin flips:',*coin_flips)
# We can then estimate the bias by the fraction of heads coming up.
# In[15]:
def average(seq):
return sum(seq)/len(seq)
head_indicators = [1 if outcome == 'H' else 0 for outcome in coin_flips]
fraction_of_heads = average(head_indicators)
print('Fraction of heads:', fraction_of_heads)
# Note that `sum` and `len` returns the sum and length of the sequence.
# **Exercise** Define a function `variance` that takes in a sequence `seq` and returns the [variance](https://en.wikipedia.org/wiki/Variance) of the sequence.
# In[16]:
def variance(seq):
### BEGIN SOLUTION
return sum(i**2 for i in seq)/len(seq) - average(seq)**2
### END SOLUTION
delta = (variance(head_indicators)/len(head_indicators))**0.5
print('95% confidence interval: [{:.2f},{:.2f}]'.format(p-2*delta,p+2*delta))
# ## Selecting items in a sequence
# **How to traverse a tuple/list?**
# Instead of calling the dunder method directly, we can use a for loop to iterate over all the items in order.
# In[17]:
a = (*range(5),)
for item in a: print(item, end=' ')
# To do it in reverse, we can use the `reversed` function.
# In[18]:
get_ipython().run_line_magic('pinfo', 'reversed')
a = [*range(5)]
for item in reversed(a): print(item, end=' ')
# We can also traverse multiple tuples/lists simultaneously by `zip`ping them.
# In[19]:
get_ipython().run_line_magic('pinfo', 'zip')
a = (*range(5),)
b = reversed(a)
for item1, item2 in zip(a,b):
print(item1,item2)
# **How to select an item in a sequence?**
# Sequence objects such as `str`/`tuple`/`list` implements the [*getter method* `__getitem__`](https://docs.python.org/3/reference/datamodel.html#object.__getitem__) to return their items.
# We can select an item by [subscription](https://docs.python.org/3/reference/expressions.html#subscriptions)
# ```Python
# a[i]
# ```
# where `a` is a list and `i` is an integer index.
# A non-negative index indicates the distance from the beginning.
# $$\boldsymbol{a} = (a_0, ... , a_{n-1})$$
# In[20]:
a = (*range(10),)
print(a)
print('Length:', len(a))
print('First element:',a[0])
print('Second element:',a[1])
print('Last element:',a[len(a)-1])
print(a[len(a)]) # IndexError
# `a[i]` with `i >= len(a)` results in an `IndexError`.
# A negative index represents a negative offset from an imaginary element one past the end of the sequence.
# $$\begin{aligned} \boldsymbol{a} &= (a_0, ... , a_{n-1})\\
# & = (a_{-n}, ..., a_{-1})
# \end{aligned}$$
# In[21]:
a = [*range(10)]
print(a)
print('Last element:',a[-1])
print('Second last element:',a[-2])
print('First element:',a[-len(a)])
print(a[-len(a)-1]) # IndexError
# `a[i]` with `i < -len(a)` results in an `IndexError`.
# **How to select multiple items?**
# We can use a [slicing](https://docs.python.org/3/reference/expressions.html#slicings) to select a range of items:
# ```Python
# a[start:stop]
# a[start:stop:step]
# ```
# where `a` is a list;
# - `start` is an integer representing the index of the starting item in the selection;
# - `stop` is an integer that is one larger than the index of the last item in the selection; and
# - `step` is an integer that specifies the step/stride size through the list.
# In[22]:
a = (*range(10),)
print(a[1:4])
print(a[1:4:2])
# The parameters take their default values if missing or equal to None.
# In[23]:
a = [*range(10)]
print(a[:4]) # start defaults to 0
print(a[1:]) # stop defaults to len(a)
print(a[1:4:]) # step defaults to 1
# They can take negative values.
# In[24]:
print(a[-1:])
print(a[:-1])
print(a[::-1])
# They can also take a mixture of negative and postive values.
# In[25]:
print(a[-1:1]) # equal [a[-1], a[0]]?
print(a[1:-1]) # equal []?
print(a[1:-1:-1]) # equal [a[1], a[0]]?
print(a[-100:100]) # result in IndexError like subscription?
# We can now implement a practical sorting algorithm called [quicksort](https://en.wikipedia.org/wiki/Quicksort) to sort a sequence.
# In[26]:
import random
def quicksort(seq):
'''Return a sorted list of items from seq.'''
if len(seq) <= 1:
return list(seq)
i = random.randint(0, len(seq) - 1)
pivot, others = seq[i], [*seq[:i], *seq[i + 1:]]
left = quicksort([x for x in others if x < pivot])
right = quicksort([x for x in others if x >= pivot])
return [*left, pivot, *right]
seq = [random.randint(0, 99) for i in range(10)]
print(seq, quicksort(seq), sep='\n')
# The above recursion creates a sorted list as `[*left, pivot, *right]` where
# - `pivot` is a randomly picked an item in `seq`,
# - `left` is the sorted list of items smaller than `pivot`, and
# - `right` is the sorted list of items no smaller than `pivot`.
#
# The base case happens when `seq` contains at most one item, in which case `seq` is already sorted.
# There is a built-in function `sorted` for sorting a sequence. It uses the [Timsort](https://en.wikipedia.org/wiki/Timsort) algorithm.
# In[27]:
get_ipython().run_line_magic('pinfo', 'sorted')
sorted(sorted(seq))
# ## Mutating a list
# For list (but not tuple), subscription and slicing can also be used as the target of an assignment operation to mutate the list.
# In[28]:
get_ipython().run_cell_magic('mytutor', '-h 300', 'b = [*range(10)] # aliasing\nb[::2] = b[:5]\nb[0:1] = b[:5]\nb[::2] = b[:5] # fails')
# Last assignment fails because `[::2]` with step size not equal to `1` is an *extended slice*, which can only be assigned to a list of equal size.
# **What is the difference between mutation and aliasing?**
# In the previous code:
# - The first assignment `b = [*range(10)]` is aliasing, which gives the list the target name/identifier `b`.
# - Other assignments such as `b[::2] = b[:5]` are mutations that [calls `__setitem__`](https://docs.python.org/3/reference/simple_stmts.html#assignment-statements) because the target `b[::2]` is not an identifier.
# **Exercise** Explain the outcome of the following checks of equivalence?
# In[29]:
get_ipython().run_cell_magic('mytutor', '-h 400', "a = [10, 20, 30, 40]\nb = a\nprint('a is b? {}'.format(a is b))\nprint('{} == {}? {}'.format(a, b, a == b))\nb[1:3] = b[2:0:-1]\nprint('{} == {}? {}'.format(a, b, a == b))")
# - `a is b` and `a == b` returns `True` because the assignment `b = a` makes `b` an alias of the same object `a` points to.
# - In particular, the operation`b[1:3] = b[2:0:-1]` affects the same list `a` points to.
# **Why mutate a list?**
# The following is another implementation of `composite_sequence` that takes advantage of the mutability of list.
# In[30]:
def sieve_composite_sequence(stop):
is_composite = [False] * stop # initialization
for factor in range(2,stop):
if is_composite[factor]: continue
for multiple in range(factor*2,stop,factor):
is_composite[multiple] = True
return (x for x in range(4,stop) if is_composite[x])
for x in sieve_composite_sequence(100): print(x, end=' ')
# The algorithm
# 1. changes `is_composite[x]` from `False` to `True` if `x` is a multiple of a smaller number `factor`, and
# 2. returns a generator that generates composite numbers according to `is_composite`.
# **Exercise** Is `sieve_composite_sequence` more efficient than your solution `composite_sequence`? Why?
# In[31]:
for x in composite_sequence(10000): pass
# In[32]:
for x in sieve_composite_sequence(1000000): pass
# The line `if is_composite[factor]: continue` avoids the redundant computations of checking composite factors.
# **Exercise** Note that the multiplication operation `*` is the most efficient way to [initialize a 1D list with a specified size](https://www.geeksforgeeks.org/python-which-is-faster-to-initialize-lists/), but we should not use it to initialize a 2D list. Fix the following code so that `a` becomes `[[1, 0], [0, 1]]`.
# In[33]:
get_ipython().run_cell_magic('mytutor', '-h 250', 'a = [[0] * 2] * 2\na[0][0] = a[1][1] = 1\nprint(a)')
# In[34]:
### BEGIN SOLUTION
a = [[0] * 2 for i in range(2)]
### END SOLUTION
a[0][0] = a[1][1] = 1
print(a)
# ## Different methods to operate on a sequence
# The following compares the lists of public attributes for `tuple` and `list`.
# - We determine membership using the [operator `in` or `not in`](https://docs.python.org/3/reference/expressions.html#membership-test-operations).
# - Different from the [keyword `in` in a for loop](https://docs.python.org/3/reference/compound_stmts.html#the-for-statement), operator `in` calls the method `__contains__`.
# In[35]:
list_attributes = dir(list)
tuple_attributes = dir(tuple)
print(
'Common attributes:', ', '.join([
attr for attr in list_attributes
if attr in tuple_attributes and attr[0] != '_'
]))
print(
'Tuple-specific attributes:', ', '.join([
attr for attr in tuple_attributes
if attr not in list_attributes and attr[0] != '_'
]))
print(
'List-specific attributes:', ', '.join([
attr for attr in list_attributes
if attr not in tuple_attributes and attr[0] != '_'
]))
# - There are no public tuple-specific attributes, and
# - all the list-specific attributes are methods that mutate the list, except `copy`.
# The common attributes
# - `count` method returns the number of occurrences of a value in a tuple/list, and
# - `index` method returns the index of the first occurrence of a value in a tuple/list.
# In[36]:
get_ipython().run_cell_magic('mytutor', '-h 300', 'a = (1,2,2,4,5)\nprint(a.index(2))\nprint(a.count(2))')
# `reverse` method reverses the list instead of returning a reversed list.
# In[37]:
get_ipython().run_cell_magic('mytutor', '-h 300', 'a = [*range(10)]\nprint(reversed(a))\nprint(*reversed(a))\nprint(a.reverse())')
# - `copy` method returns a copy of a list.
# - `tuple` does not have the `copy` method but it is easy to create a copy by slicing.
# In[38]:
get_ipython().run_cell_magic('mytutor', '-h 400', 'a = [*range(10)]\nb = tuple(a)\na_reversed = a.copy()\na_reversed.reverse()\nb_reversed = b[::-1]')
# `sort` method sorts the list *in place* instead of returning a sorted list.
# In[39]:
get_ipython().run_cell_magic('mytutor', '-h 300', 'import random\na = [random.randint(0,10) for i in range(10)]\nprint(sorted(a))\nprint(a.sort())')
# - `extend` method that extends a list instead of creating a new concatenated list.
# - `append` method adds an object to the end of a list.
# - `insert` method insert an object to a specified location.
# In[40]:
get_ipython().run_cell_magic('mytutor', '-h 300', "a = b = [*range(5)]\nprint(a + b)\nprint(a.extend(b))\nprint(a.append('stop'))\nprint(a.insert(0,'start'))")
# - `pop` method deletes and return the last item of the list.
# - `remove` method removes the first occurrence of a value in the list.
# - `clear` method clears the entire list.
#
# We can also use the function `del` to delete a selection of a list.
# In[41]:
get_ipython().run_cell_magic('mytutor', '-h 300', 'a = [*range(10)]\ndel a[::2]\nprint(a.pop())\nprint(a.remove(5))\nprint(a.clear())')
|
ccha23/CS1302ICP
|
_build/jupyter_execute/Review/Review Questions.py
|
<reponame>ccha23/CS1302ICP
#!/usr/bin/env python
# coding: utf-8
# # Review Questions
# **CS1302 Introduction to Computer Programming**
# ___
# In[1]:
get_ipython().run_line_magic('reload_ext', 'mytutor')
# ## Dictionaries and Sets
# **Exercise (Concatenate two dictionaries with precedence)** Define a function `concat_two_dicts` that accepts two arguments of type `dict` such that `concat_two_dicts(a, b)` will return a new dictionary containing all the items in `a` and the items in `b` that have different keys than those in `a`. The input dictionaries should not be mutated.
# In[2]:
def concat_two_dicts(a, b):
### BEGIN SOLUTION
return {**b, **a}
### END SOLUTION
# In[3]:
#tests
a={'x':10, 'z':30}; b={'y':20, 'z':40}
a_copy = a.copy(); b_copy = b.copy()
assert concat_two_dicts(a, b) == {'x': 10, 'z': 30, 'y': 20}
assert concat_two_dicts(b, a) == {'x': 10, 'z': 40, 'y': 20}
assert a == a_copy and b == b_copy
### BEGIN HIDDEN TESTS
a={'x':10, 'z':30}; b={'y':20}
a_copy = a.copy(); b_copy = b.copy()
assert concat_two_dicts(a, b) == {'x': 10, 'z': 30, 'y': 20}
assert concat_two_dicts(b, a) == {'x': 10, 'z': 30, 'y': 20}
assert a == a_copy and b == b_copy
### END HIDDEN TESTS
# - `{**dict1,**dict2}` creates a new dictionary by unpacking the dictionaries `dict1` and `dict2`.
# - By default, `dict2` overwrites `dict1` if they have identical keys.
# **Exercise (Count characters)** Define a function `count_characters` which
# - accepts a string and counts the numbers of each character in the string, and
# - returns a dictionary that stores the results.
# In[4]:
def count_characters(string):
### BEGIN SOLUTION
counts = {}
for char in string:
counts[char] = counts.get(char, 0) + 1
return counts
### END SOLUTION
# In[5]:
# tests
assert count_characters('abcbabc') == {'a': 2, 'b': 3, 'c': 2}
assert count_characters('aababcccabc') == {'a': 4, 'b': 3, 'c': 4}
### BEGIN HIDDEN TESTS
assert count_characters('abcdefgabc') == {'a': 2, 'b': 2, 'c': 2, 'd': 1, 'e': 1, 'f': 1, 'g': 1}
assert count_characters('ab43cb324abc') == {'2': 1, '3': 2, '4': 2, 'a': 2, 'b': 3, 'c': 2}
### END HIDDEN TESTS
# - Create an empty dictionary `counts`.
# - Use a `for` loop to iterate over each character of `string` to count their numbers of occurrences.
# - The `get` method of `dict` can initialize the count of a new character before incrementing it.
# **Exercise (Count non-Fibonacci numbers)** Define a function `count_non_fibs` that
# - accepts a container as an argument, and
# - returns the number of items in the container that are not [fibonacci numbers](https://en.wikipedia.org/wiki/Fibonacci_number).
# In[6]:
def count_non_fibs(container):
### BEGIN SOLUTION
def fib_sequence_inclusive(stop):
Fn, Fn1 = 0, 1
while Fn <= stop:
yield Fn
Fn, Fn1 = Fn1, Fn + Fn1
non_fibs = set(container)
non_fibs.difference_update(fib_sequence_inclusive(max(container)))
return len(non_fibs)
### END SOLUTION
# In[7]:
# tests
assert count_non_fibs([0, 1, 2, 3, 5, 8]) == 0
assert count_non_fibs({13, 144, 99, 76, 1000}) == 3
### BEGIN HIDDEN TESTS
assert count_non_fibs({5, 8, 13, 21, 34, 100}) == 1
assert count_non_fibs({0.1, 0}) == 1
### END HIDDEN TESTS
# - Create a set of Fibonacci numbers up to the maximum of the items in the container.
# - Use `difference_update` method of `set` to create a set of items in the container but not in the set of Fibonacci numbers.
# **Exercise (Calculate total salaries)** Suppose `salary_dict` contains information about the name, salary, and working time about employees in a company. An example of `salary_dict` is as follows:
# ```Python
# salary_dict = {
# 'emp1': {'name': 'John', 'salary': 15000, 'working_time': 20},
# 'emp2': {'name': 'Tom', 'salary': 16000, 'working_time': 13},
# 'emp3': {'name': 'Jack', 'salary': 15500, 'working_time': 15},
# }
# ```
#
# Define a function `calculate_total` that accepts `salary_dict` as an argument, and returns a `dict` that uses the same keys in `salary_dict` but the total salaries as their values. The total salary of an employee is obtained by multiplying his/her salary and his/her working_time.
# E.g.,, for the `salary_dict` example above, `calculate_total(salary_dict)` should return
# ```Python
# {'emp1': 300000, 'emp2': 208000, 'emp3': 232500}.
# ```
# where the total salary of `emp1` is $15000 \times 20 = 300000$.
# In[8]:
def calculate_total(salary_dict):
### BEGIN SOLUTION
return {
emp: record['salary'] * record['working_time']
for emp, record in salary_dict.items()
}
### END SOLUTION
# In[9]:
# tests
salary_dict = {
'emp1': {'name': 'John', 'salary': 15000, 'working_time': 20},
'emp2': {'name': 'Tom', 'salary': 16000, 'working_time': 13},
'emp3': {'name': 'Jack', 'salary': 15500, 'working_time': 15},
}
assert calculate_total(salary_dict) == {'emp1': 300000, 'emp2': 208000, 'emp3': 232500}
### BEGIN HIDDEN TESTS
salary_dict = {
'emp1': {'name': 'John', 'salary': 15000, 'working_time': 20},
'emp2': {'name': 'Tom', 'salary': 16000, 'working_time': 13},
'emp3': {'name': 'Jack', 'salary': 15500, 'working_time': 15},
'emp4': {'name': 'Bob', 'salary': 20000, 'working_time': 10}
}
assert calculate_total(salary_dict) == {'emp1': 300000, 'emp2': 208000, 'emp3': 232500, 'emp4': 200000}
### END HIDDEN TESTS
# - Use `items` method of `dict` to return the list of key values pairs, and
# - use a dictionary comprehension to create the desired dictionary by iterating through the list of items.
# **Exercise (Delete items with value 0 in dictionary)** Define a function `zeros_removed` that
# - takes a dictionary as an argument,
# - mutates the dictionary to remove all the keys associated with values equal to `0`,
# - and return `True` if at least one key is removed else `False`.
# In[10]:
def zeros_removed(d):
### BEGIN SOLUTION
to_delete = [k for k in d if d[k] == 0]
for k in to_delete:
del d[k]
return len(to_delete) > 0
## Memory-efficient but not computationally efficient
# def zeros_removed(d):
# has_deleted = False
# while True:
# for k in d:
# if d[k] == 0:
# del d[k]
# has_deleted = True
# break
# else: return has_deleted
### END SOLUTION
# In[11]:
# tests
d = {'a':0, 'b':1, 'c':0, 'd':2}
assert zeros_removed(d) == True
assert zeros_removed(d) == False
assert d == {'b': 1, 'd': 2}
### BEGIN HIDDEN TESTS
d = {'a':0, 'b':1, 'c':0, 'd':2, 'e':0, 'f':'0'}
assert zeros_removed(d) == True
assert zeros_removed(d) == False
assert d == {'b': 1, 'd': 2, 'f':'0'}
### END HIDDEN TESTS
# - The main issue is that, for any dicionary `d`,
# ```Python
# for k in d:
# if d[k] == 0: del d[k]
# ```
# raises the [`RuntimeError: dictionary changed size during iteration`](https://www.geeksforgeeks.org/python-delete-items-from-dictionary-while-iterating/).
# - One solution is to duplicate the list of keys, but this is memory inefficient especially when the list of keys is large.
# - Another solution is to record the list of keys to delete before the actual deletion. This is memory efficient if the list of keys to delete is small.
# **Exercise (Fuzzy search a set)** Define a function `search_fuzzy` that accepts two arguments `myset` and `word` such that
# - `myset` is a `set` of `str`s;
# - `word` is a `str`; and
# - `search_fuzzy(myset, word)` returns `True` if `word` is in `myset` by changing at most one character in `word`, and returns `False` otherwise.
# In[12]:
def search_fuzzy(myset, word):
### BEGIN SOLUTION
for myword in myset:
if len(myword) == len(word) and len(
[True
for mychar, char in zip(myword, word) if mychar != char]) <= 1:
return True
return False
### END SOLUTION
# In[13]:
# tests
assert search_fuzzy({'cat', 'dog'}, 'car') == True
assert search_fuzzy({'cat', 'dog'}, 'fox') == False
### BEGIN HIDDEN TESTS
myset = {'cat', 'dog', 'dolphin', 'rabbit', 'monkey', 'tiger'}
assert search_fuzzy(myset, 'lion') == False
assert search_fuzzy(myset, 'cat') == True
assert search_fuzzy(myset, 'cat ') == False
assert search_fuzzy(myset, 'fox') == False
assert search_fuzzy(myset, 'ccc') == False
### END HIDDEN TESTS
# - Iterate over each word in `myset`.
# - Check whether the length of the word is the same as that of the word in the arguments.
# - If the above check passes, use a list comprehension check if the words differ by at most one character.
# **Exercise (Get keys by value)** Define a function `get_keys_by_value` that accepts two arguments `d` and `value` where `d` is a dictionary, and returns a set containing all the keys in `d` that have `value` as its value. If no key has the query value `value`, then return an empty set.
# In[14]:
def get_keys_by_value(d, value):
### BEGIN SOLUTION
return {k for k in d if d[k] == value}
### END SOLUTION
# In[15]:
# tests
d = {'Tom':'99', 'John':'88', 'Lucy':'100', 'Lily':'90', 'Jason':'89', 'Jack':'100'}
assert get_keys_by_value(d, '99') == {'Tom'}
### BEGIN HIDDEN TESTS
d = {'Tom':'99', 'John':'88', 'Lucy':'100', 'Lily':'90', 'Jason':'89', 'Jack':'100'}
assert get_keys_by_value(d, '100') == {'Jack', 'Lucy'}
d = {'Tom':'99', 'John':'88', 'Lucy':'100', 'Lily':'90', 'Jason':'89', 'Jack':'100'}
assert get_keys_by_value(d, '0') == set()
### END HIDDEN TESTS
# - Use set comprehension to create the set of keys whose associated values is `value`.
# **Exercise (Count letters and digits)** Define a function `count_letters_and_digits` which
# - take a string as an argument,
# - returns a dictionary that stores the number of letters and digits in the string using the keys 'LETTERS' and 'DIGITS' respectively.
# In[16]:
def count_letters_and_digits(string):
### BEGIN SOLUTION
check = {'LETTERS': str.isalpha, 'DIGITS': str.isdigit}
counts = dict.fromkeys(check.keys(), 0)
for char in string:
for t in check:
if check[t](char):
counts[t] += 1
return counts
### END SOLUTION
# In[17]:
assert count_letters_and_digits('hello world! 2020') == {'DIGITS': 4, 'LETTERS': 10}
assert count_letters_and_digits('I love CS1302') == {'DIGITS': 4, 'LETTERS': 7}
### BEGIN HIDDEN TESTS
assert count_letters_and_digits('Hi CityU see you in 2021') == {'DIGITS': 4, 'LETTERS': 15}
assert count_letters_and_digits('When a dog runs at you, whistle for him. (Philosopher <NAME>, 1817-1862)') == {'DIGITS': 8, 'LETTERS': 58}
### END HIDDEN TESTS
# - Use the class method `fromkeys` of `dict` to initial the dictionary of counts.
# **Exercise (Dealers with lowest price)** Suppose `apple_price` is a list in which each element is a `dict` recording the dealer and the corresponding price, e.g.,
# ```Python
# apple_price = [{'dealer': 'dealer_A', 'price': 6799},
# {'dealer': 'dealer_B', 'price': 6749},
# {'dealer': 'dealer_C', 'price': 6798},
# {'dealer': 'dealer_D', 'price': 6749}]
# ```
# Define a function `dealers_with_lowest_price` that takes `apple_price` as an argument, and returns the `set` of dealers providing the lowest price.
# In[18]:
def dealers_with_lowest_price(apple_price):
### BEGIN SOLUTION
dealers = {}
lowest_price = None
for pricing in apple_price:
if lowest_price == None or lowest_price > pricing['price']:
lowest_price = pricing['price']
dealers.setdefault(pricing['price'], set()).add(pricing['dealer'])
return dealers[lowest_price]
## Shorter code that uses comprehension
# def dealers_with_lowest_price(apple_price):
# lowest_price = min(pricing['price'] for pricing in apple_price)
# return set(pricing['dealer'] for pricing in apple_price
# if pricing['price'] == lowest_price)
### END SOLUTION
# In[19]:
# tests
apple_price = [{'dealer': 'dealer_A', 'price': 6799},
{'dealer': 'dealer_B', 'price': 6749},
{'dealer': 'dealer_C', 'price': 6798},
{'dealer': 'dealer_D', 'price': 6749}]
assert dealers_with_lowest_price(apple_price) == {'dealer_B', 'dealer_D'}
### BEGIN HIDDEN TESTS
apple_price = [{'dealer': 'dealer_A', 'price': 6799},
{'dealer': 'dealer_B', 'price': 6799},
{'dealer': 'dealer_C', 'price': 6799},
{'dealer': 'dealer_D', 'price': 6799}]
assert dealers_with_lowest_price(apple_price) == {'dealer_A', 'dealer_B', 'dealer_C', 'dealer_D'}
### END HIDDEN TESTS
# - Use the class method `setdefault` of `dict` to create a dictionary that maps different prices to different sets of dealers.
# - Compute the lowest price at the same time.
# - Alternatively, use comprehension to find lowest price and then create the desired set of dealers with the lowest price.
#
# ## Lists and Tuples
# **Exercise** (Binary addition) Define a function `add_binary` that
# - accepts two arguments of type `str` which represent two non-negative binary numbers, and
# - returns the binary number in `str` equal to the sum of the two given binary numbers.
# In[20]:
def add_binary(*binaries):
### BEGIN SOLUTION
def binary_to_decimal(binary):
return sum(2**i * int(b) for i, b in enumerate(reversed(binary)))
def decimal_to_binary(decimal):
return ((decimal_to_binary(decimal // 2) if decimal > 1 else '') +
str(decimal % 2)) if decimal else '0'
return decimal_to_binary(sum(binary_to_decimal(binary) for binary in binaries))
## Alternative 1 using recursion
# def add_binary(bin1, bin2, carry=False):
# if len(bin1) > len(bin2):
# return add_binary(bin2, bin1)
# if bin1 == '':
# return add_binary('1', bin2, False) if carry else bin2
# s = int(bin1[-1]) + int(bin2[-1]) + carry
# return add_binary(bin1[:-1], bin2[:-1], s > 1) + str(s % 2)
## Alternatve 2 using iteration
# def add_binary(a, b):
# answer = []
# n = max(len(a), len(b))
# # fill necessary '0' to the beginning to make a and b have the same length
# if len(a) < n: a = str('0' * (n -len(a))) + a
# if len(b) < n: b = str('0' * (n -len(b))) + b
# carry = 0
# for i in range(n-1, -1, -1):
# if a[i] == '1': carry += 1
# if b[i] == '1': carry += 1
# answer.insert(0, '1') if carry % 2 == 1 else answer.insert(0, '0')
# carry //= 2
# if carry == 1: answer.insert(0, '1')
# answer_str = ''.join(answer) # you can also use "answer_str = ''; for x in answer: answer_str += x"
# return answerastr
### END SOLUTION
# In[21]:
# tests
assert add_binary('0', '0') == '0'
assert add_binary('11', '11') == '110'
assert add_binary('101', '101') == '1010'
### BEGIN HIDDEN TESTS
assert add_binary('1111', '10') == '10001'
assert add_binary('111110000011','110000111') == '1000100001010'
### END HIDDEN TESTS
# - Use comprehension to convert the binary numbers to decimal numbers.
# - Use comprehension to convert the sum of the decimal numbers to a binary number.
# - Alternatively, perform bitwise addition using a recursion or iteration.
# **Exercise (Even-digit numbers)** Define a function `even_digit_numbers`, which finds all numbers between `lower_bound` and `upper_bound` such that each digit of the number is an even number. Please return the numbers as a list.
# In[22]:
def even_digit_numbers(lower_bound, upper_bound):
### BEGIN SOLUTION
return [
x for x in range(lower_bound, upper_bound)
if not any(int(d) % 2 for d in str(x))
]
### END SOLUTION
# In[23]:
# tests
assert even_digit_numbers(1999, 2001) == [2000]
assert even_digit_numbers(2805, 2821) == [2806,2808,2820]
### BEGIN HIDDEN TESTS
assert even_digit_numbers(1999, 2300) == [2000,2002,2004,2006,2008,2020,2022,2024,2026,2028,2040,2042,2044,2046,2048,2060,2062,2064,2066,2068,2080,2082,2084,2086,2088,2200,2202,2204,2206,2208,2220,2222,2224,2226,2228,2240,2242,2244,2246,2248,2260,2262,2264,2266,2268,2280,2282,2284,2286,2288]
assert even_digit_numbers(8801, 8833) == [8802,8804,8806,8808,8820,8822,8824,8826,8828]
assert even_digit_numbers(3662, 4001) == [4000]
### END HIDDEN TESTS
# - Use list comprehension to generate numbers between the bounds, and
# - use comprehension and the `any` function to filter out those numbers containing odd digits.
# **Exercise (Maximum subsequence sum)** Define a function `max_subsequence_sum` that
# - accepts as an argument a sequence of numbers, and
# - returns the maximum sum over nonempty contiguous subsequences.
#
# E.g., when `[-6, -4, 4, 1, -2, 2]` is given as the argument, the function returns `5` because the nonempty subsequence `[4, 1]` has the maximum sum `5`.
# In[24]:
def max_subsequence_sum(a):
### BEGIN SOLUTION
## see https://en.wikipedia.org/wiki/Maximum_subarray_problem
t = s = 0
for x in a:
t = max(0, t + x)
s = max(s, t)
return s
## Alternative (less efficient) solution using list comprehension
# def max_subsequence_sum(a):
# return max(sum(a[i:j]) for i in range(len(a)) for j in range(i,len(a)+1))
### END SOLUTION
# In[25]:
# tests
assert max_subsequence_sum([-6, -4, 4, 1, -2, 2]) == 5
assert max_subsequence_sum([2.5, 1.4, -2.5, 1.4, 1.5, 1.6]) == 5.9
### BEGIN HIDDEN TESTS
seq = [-24.81, 25.74, 37.29, -8.77, 0.78, -15.33, 30.21, 34.94, -40.64, -20.06]
assert round(max_subsequence_sum(seq),2) == 104.86
### BEGIN HIDDEN TESTS
# In[26]:
# test of efficiency
assert max_subsequence_sum([*range(1234567)]) == 762077221461
# - For a list $[a_0,a_1,\dots]$, let
#
# $$
# t_k:=\max_{j<k} \sum_{i=j}^{k-1} a_i = \max\{t_{k-1}+a_{k-1},0\},
# $$
# namely the maximum tail sum of $[a_0,\dots,a_{k-1}]$.
# - Then, the maximum subsequence sum of $[a_0,\dots,a_{k-1}]$ is
#
# $$
# s_k:=\max_{j\leq k} t_j.
# $$
# **Exercise (Mergesort)** *For this question, do not use the `sort` method or `sorted` function.*
#
# Define a function called `merge` that
#
# - takes two sequences sorted in ascending orders, and
# - returns a sorted list of items from the two sequences.
#
# Then, define a function called `mergesort` that
#
# - takes a sequence, and
# - return a list of items from the sequence sorted in ascending order.
#
# The list should be constructed by
#
# - recursive calls to `mergesort` the first and second halves of the sequence individually, and
# - merge the sorted halves.
# In[27]:
def merge(left,right):
### BEGIN SOLUTION
if left and right:
if left[-1] > right[-1]: left, right = right, left
return merge(left,right[:-1]) + [right[-1]]
return list(left or right)
### END SOLUTION
# In[28]:
def mergesort(seq):
### BEGIN SOLUTION
if len(seq) <= 1:
return list(seq)
i = len(seq)//2
return merge(mergesort(seq[:i]),mergesort(seq[i:]))
### END SOLUTION
# In[29]:
# tests
assert merge([1,3],[2,4]) == [1,2,3,4]
assert mergesort([3,2,1]) == [1,2,3]
### BEGIN HIDDEN TESTS
assert mergesort([3,5,2,4,2,1]) == [1,2,2,3,4,5]
### END HIDDEN TESTS
# ## More Functions
# **Exercise (Arithmetic geometric mean)** Define a function `arithmetic_geometric_mean_sequence` which
#
# - takes two floating point numbers `x` and `y` and
# - returns a generator that generates the tuple \\((a_n, g_n)\\) where
#
# $$
# \begin{aligned}
# a_0 &= x, g_0 = y \\
# a_n &= \frac{a_{n-1} + g_{n-1}}2 \quad \text{for }n>0\\
# g_n &= \sqrt{a_{n-1} g_{n-1}}
# \end{aligned}
# $$
# In[30]:
def arithmetic_geometric_mean_sequence(x, y):
### BEGIN SOLUTION
a, g = x, y
while True:
yield a, g
a, g = (a + g)/2, (a*g)**0.5
### END SOLUTION
# In[31]:
# tests
agm = arithmetic_geometric_mean_sequence(6,24)
assert [next(agm) for i in range(2)] == [(6, 24), (15.0, 12.0)]
### BEGIN HIDDEN TESTS
agm = arithmetic_geometric_mean_sequence(100,400)
for sol, ans in zip([next(agm) for i in range(5)], [(100, 400), (250.0, 200.0), (225.0, 223.60679774997897), (224.30339887498948, 224.30231718318308), (224.30285802908628, 224.30285802843423)]):
for a, b in zip(sol,ans):
assert round(a,5) == round(b,5)
### END HIDDEN TESTS
# - Use the `yield` expression to return each tuple of $(a_n,g_n)$ efficiently without redundant computations.
|
ccha23/CS1302ICP
|
_build/jupyter_execute/Lab3a/Improved Quadratic Equation Solver.py
|
#!/usr/bin/env python
# coding: utf-8
# # Improved Quadratic Equation Solver
# **CS1302 Introduction to Computer Programming**
# ___
# In this notebook, we will improve the quadratic equation solver in the previous lab using conditional executions.
# First of all, run the following to setup the environment.
# In[ ]:
get_ipython().run_line_magic('reset', '-f')
from ipywidgets import interact
import math
# ## Zero Discriminant
# Recall that the quadratic equation is
#
# $$
# ax^2+bx+c=0
# $$
# where $a$, $b$, and $c$ are real-valued coefficients, and $x$ is the unknown variable. The roots are normally given by
#
# $$
# \frac{-b-\sqrt{b^2-4ac}}{2a}, \frac{-b+\sqrt{b^2-4ac}}{2a}.
# $$
# The roots are the same (repeated) when the discriminant $b^2-4ac$ is zero.
# **Exercise** Assign to `roots` only one root when the discriminant is zero. E.g., if $(a,b,c)=(1,-2,1)$, then `roots` should be assigned the value `1.0` instead of `1.0, 1.0`. If there are two roots, give them in the order of the above formula.
# *Hint*: Use the [`if` statement](https://docs.python.org/3/reference/compound_stmts.html#if).
# *Hint:* The following is a solution template with some missing code. You are NOT required to follow the template.
# ```Python
# def get_roots(a, b, c):
# d = b**2 - 4 * a * c # discriminant
# if math.isclose(d, 0):
# roots = __________ # repeated root
# else:
# d **= 0.5
# roots = __________________________________
# return roots
# ```
# In[ ]:
def get_roots(a, b, c):
d = b**2 - 4 * a * c # discriminant
if math.isclose(d, 0):
# YOUR CODE HERE
raise NotImplementedError()
return roots
# In[ ]:
# tests
def test_get_roots(roots, a, b, c):
roots_ = get_roots(a, b, c)
if roots is None:
correct = roots_ is None
elif isinstance(roots, float):
correct = isinstance(roots_, float) and math.isclose(roots, roots_)
else:
correct = isinstance(roots_, tuple) and len(roots_) == 2 and all([
math.isclose(root, roots_) for root, roots_ in zip(roots, roots_)
])
if not correct:
print(f'With (a, b, c)={a,b,c}, roots should be {roots} not {roots_}.')
assert correct
test_get_roots((-1.0, 0.0), 1, 1, 0)
test_get_roots(0.0, 1, 0, 0)
# **Exercise** Why use `math.isclose(d,0)` instead of `d == 0`?
# YOUR ANSWER HERE
# ## Linear Equation
# If $a=0$, the earlier formula for the roots are invalid due to division by zero. Nevertheless, the equation remains valid:
#
# $$
# bx + c=0.
# $$
# **Exercise** Improve the function `get_roots` to return the root $-\frac{c}{b}$ if $a=0$.
# *Hint:* Solution template:
# ```Python
# def get_roots(a, b, c):
# d = b**2 - 4 * a * c # discriminant
# if __________________:
# roots = ______
# elif math.isclose(d, 0):
# roots = __________ # repeated root
# else:
# d **= 0.5
# roots = __________________________________
# return roots
# ```
# In[ ]:
def get_roots(a, b, c):
d = b**2 - 4 * a * c
# YOUR CODE HERE
raise NotImplementedError()
return roots
# In[ ]:
# tests
def test_get_roots(roots, a, b, c):
roots_ = get_roots(a, b, c)
if roots is None:
correct = roots_ is None
elif isinstance(roots, float):
correct = isinstance(roots_, float) and math.isclose(roots, roots_)
else:
correct = isinstance(roots_, tuple) and len(roots_) == 2 and all([
math.isclose(root, roots_) for root, roots_ in zip(roots, roots_)
])
if not correct:
print(f'With (a, b, c)={a,b,c}, roots should be {roots} not {roots_}.')
assert correct
test_get_roots((-1.0, -0.0), 1, 1, 0)
test_get_roots(0.0, 1, 0, 0)
test_get_roots(0.5, 0, -2, 1)
# ## Degenerate Cases
# What if $a=b=0$? In that case, the equation becomes
#
# $$
# c = 0
# $$
# which is always satisfied if $c=0$, but never satisfied if $c\neq 0$.
# **Exercise** Improve the function `get_roots` to return root(s) under all cases:
# - If $a=0$ and $b\neq 0$, assign `roots` to the single root $-\frac{c}{b}$.
# - If $a=b=0$ and $c\neq 0$, assign `roots` to `None`.
# Note that `None` is an object, not a string.
# - If $a=b=c=0$, there are infinitely many roots. Assign to `roots` the tuple `-float('inf'), float('inf')`.
# Note that `float('inf')` converts the string `'inf'` to a floating point value that represents $\infty$.
# *Hint:* Use nested `if` statements such as the followings (with the blanks filled in properly):
# ```Python
# def get_roots(a, b, c):
# d = b**2 - 4 * a * c
# if __________________:
# if __________________:
# if __________________:
# roots = -float('inf'), float('inf')
# else:
# roots = None
# else:
# ______________
# elif math.isclose(d, 0):
# roots = __________ # repeated root
# else:
# d **= 0.5
# roots = __________________________________
# return roots
# ```
# In[ ]:
def get_roots(a, b, c):
d = b**2 - 4 * a * c
# YOUR CODE HERE
raise NotImplementedError()
return roots
# In[ ]:
# tests
def test_get_roots(roots, a, b, c):
roots_ = get_roots(a, b, c)
if roots is None:
correct = roots_ is None
elif isinstance(roots, float):
correct = isinstance(roots_, float) and math.isclose(roots, roots_)
else:
correct = isinstance(roots_, tuple) and len(roots_) == 2 and all([
math.isclose(root, roots_) for root, roots_ in zip(roots, roots_)
])
if not correct:
print(f'With (a, b, c)={a,b,c}, roots should be {roots} not {roots_}.')
assert correct
test_get_roots((-1.0, 0.0), 1, 1, 0)
test_get_roots(0.0, 1, 0, 0)
test_get_roots((-float('inf'), float('inf')), 0, 0, 0)
test_get_roots(None, 0, 0, 1)
test_get_roots(0.5, 0, -2, 1)
test_get_roots(1.0, 1, -2, 1)
# ## Run the calculator
# After you have complete the exercises, you can run your robust solver below:
# In[ ]:
# quadratic equations solver
@interact(a=(-10,10,1),b=(-10,10,1),c=(-10,10,1))
def quadratic_equation_solver(a=1,b=2,c=1):
print('Root(s):',get_roots(a,b,c))
|
ccha23/CS1302ICP
|
_build/jupyter_execute/Lab6/Combinatorics.py
|
#!/usr/bin/env python
# coding: utf-8
# # Combinatorics
# **CS1302 Introduction to Computer Programming**
# ___
# ## Permutation using Recursion
# A [$k$-permutation of $n$](https://en.wikipedia.org/wiki/Permutation#k-permutations_of_n) items $a_0,\dots,a_{n-1}$ is an ordered tuple
#
# $$
# (a_{i_0},\dots,a_{i_{k-1}})
# $$
# of $k$ out of the $n$ objects, where $0\leq i_0,\dots,i_{k-1}<n$ are distinct indices. An $n$-permutation of $n$ objects is simply called a permutation of $n$ objects.
# For examples:
# - The list of ($3$-)permutations of `1,2,3` is:
# In[ ]:
[(1, 2, 3), (1, 3, 2), (2, 1, 3), (2, 3, 1), (3, 1, 2), (3, 2, 1)]
# - The list of $2$-permutations of `1,2,3` is:
# In[ ]:
[(1, 2), (1, 3), (2, 1), (2, 3), (3, 1), (3, 2)]
# In the above, we used
# - a `tuple` delimited by `()` such as `(1,2,3)` to store items of a permutation, and
# - a `list` delimited by `[]` such as `[(1, 2, 3), (1, 3, 2), (2, 1, 3), (2, 3, 1), (3, 1, 2), (3, 2, 1)]` to store all the permutations.
# Generating permutations is a fundamental problem in computing and combinatorics.
# A simple way to generate permutations is by recursion. (There are also [other algorithms](https://www.topcoder.com/generating-permutations/).)
# **Recurrence relation (Line 10):**
# - Removing the first element of a $k$-permutation of $n$ objects gives a different $(k-1)$-permutation of the remaining $n-1$ objects.
#
# $$ (a_{i_0}, \underbrace{a_{i_1},\dots,a_{i_{k-1}}}_{\text{($k-1$)-permutation of $a_{i_1},\dots,a_{i_{k-1}}$.}\kern-5em} ) $$
# - Reversing the above removal process gives a way of constructing a $k$-permutation from a $(k-1)$-permutation.
# E.g., the permutations of $1,2,3$ can be constructed as follows:
#
# $$[\overbrace{({\color{red} 1}, {\color{blue}{2, 3}}), ({\color{red} 1}, {\color{blue}{3, 2}})}^{\text{prepend 1 to permutations of $2,3$.} }, \overbrace{({\color{red} 2}, {\color{blue}{1, 3}}), ({\color{red} 2}, {\color{blue}{3, 1}})}^{\text{prepend 2 to permutations of $1,3$.} }, \overbrace{({\color{red} 3}, {\color{blue}{1, 2}}), ({\color{red} 3}, {\color{blue}{2, 1}})}^{\text{prepend 3 to permutations of $1,2$.} }]$$
# The following is an implementation of the recursion `permutation(*a,k=None)` that
# - takes in a variable number $n$ of objects as positional arguments (in `a`),
# - takes in an integer $k$ using a keyword argument (`k`, with the default `k=None` for $k=n$), and
# - returns the list of all $k$-permutations represented as ordered tuples of the $n$ objects.
# In[ ]:
def permutation(*a, k=None):
'''Returns the list of (k-)permutations of the position arguments.'''
n = len(a)
output = []
if k is None:
k = n
if 0 < k <= n:
for i in range(n):
output.extend([(a[i], ) + p for p in permutation(*a[:i], *a[i + 1:], k=k - 1)])
elif k == 0:
return [()]
return output
print(permutation(1, 2, 3))
print(permutation(1, 2, 3, k=2))
# The recurrence is implemented by the for loop:
# ```Python
# ...
# for i in range(n):
# output.extend([(a[i], ) + p
# for p in permutation(*a[:i], *a[i + 1:], k=k - 1)])
# ...
# ```
# In the above code, `(a[i], ) + p` creates a $k$-permutation of the items in `a` by concatenating for each index `i`
# - a singleton tuple `(a[i], )` and
# - a $k-1$ permutation `p` of all elements but `a[i]`.
#
# (See the example in the recurrence relation described earlier.)
# Note that:
# - The comma in `(a[i], )` is not a typo. Without commas, `(...)` does not create a tuple.
# - `a[:i]` returns a tuple of `a[0]` up to and excluding `a[i]`. `*a[:i]` unpacks the tuple such that its items are separate arguments to `permutation`.
# - Similarly, `*a[i + 1:]` provides items as separate arguments starting from `a[i + 1]` until the last item in `a`.
# - `[... for ...]` generates a list of $k$-permutations, and `output.extend([...])` added the list to the end of the `output` list.
# **Exercise** One of the base cases of the recusion happens when $k=0$, in which case there is only one $k$-permutation, namely the empty tuple $()$, and so the function returns `[()]`. There is another base case of the recursion. Explain the condition of that base case and its return value.
# YOUR ANSWER HERE
# ## Number of permutations
# Computing permutations using recursion is slow. Why?
# There are simply too many permutations even for a reasonably small $n$.
# In[ ]:
n = 9
output = permutation(*range(1,n+1))
print('# permutations:',len(output))
# Surprisingly, the number $P_n$ of ($n-$)permutations of $n$ items can be calculated much faster without enumerating all the permutations. It satisfies the following recurrence:
#
# $$
# P_n = \begin{cases}
# n P_{n-1} & n>0\\
# 1 & n=0\\
# 0 & \text{otherwise.}
# \end{cases}
# $$
#
# This quantity is fundamental in the field of [combinatorics](https://en.wikipedia.org/wiki/Combinatorics) with enormous applications.
# **Exercise** Implement the above recurrence equation as a recursion `num_permutation(n)` which
# - takes in an integer `n`, and
# - returns the number of permutations of `n` items.
#
# *Hint: Ensure all base cases are covered, and can run efficiently for large $n$.*
# In[ ]:
def num_permutation(n):
# YOUR CODE HERE
raise NotImplementedError()
# In[ ]:
# tests
assert num_permutation(10) == 3628800
assert num_permutation(0) == 1
assert num_permutation(-1) == 0
# **Exercise** Extend the function to `num_permutation(n,k=None)` which
# - takes in an additional optional keyword argument `k`, and
# - returns the INTEGER number of `k`-permutations of `n` items.
#
# The number is given by the formula
#
# $$P_{n,k} = \begin{cases}
# \frac{P_n}{P_{n-k}} & 0\leq k \leq n\\
# 0 & \text{otherwise.}
# \end{cases}$$
# In[ ]:
def num_permutation(n,k=None):
# YOUR CODE HERE
raise NotImplementedError()
# In[ ]:
# tests
assert isinstance(num_permutation(0), int)
assert num_permutation(3) == 6
assert num_permutation(3,0) == 1
assert num_permutation(3,2) == 6
assert num_permutation(10,5) == 30240
# ## Permutation using Iteration
# The following function `permutation_sequence(*a)` returns a generator that generates the list of $k$-permutations one-by-one for $k$ from $0$ to `len(a)`.
# In[ ]:
def permutation_sequence(*a):
'''Returns a generator for the k-permuations of the positional arguments
for k from 0 to len(a).'''
n = len(a)
output, idx_left = [()], [tuple(range(n))]
for k in range(0, n + 1):
yield output
next_output, next_idx_left = [], []
for m in range(len(idx_left)):
for j in range(len(idx_left[m])):
i = idx_left[m][j]
next_output.append(output[m] + (a[i], ))
next_idx_left.append(idx_left[m][:j] + idx_left[m][j + 1:])
output, idx_left = next_output, next_idx_left
for permutation_list in permutation_sequence(1, 2, 3):
print(permutation_list)
# In[ ]:
a=tuple(range(3))
print(a)
# Unlike the recursion `permutation`, the above generates a $k$-permutation $(a_{i_0},\dots,a_{i_{k-1}})$ of $n$ object iteratively by
# - choosing $i_j$ for $j$ from $0$ to $k-1$ such that
# - $i_j$ is not already chosen, i.e., $i_j\not\in \{i_0,\dots,i_{j-1}\}$.
# E.g., the permutations of $1,2,3$ is generated iteratively as follows:
# - 1
# - 1,2
# - **(1,2,3)**
# - 1,3
# - **(1,3,2)**
# - 2
# - 2,1
# - **(2,1,3)**
# - 2,3
# - **(2,3,1)**
# - 3
# - 3,1
# - **(3,1,2)**
# - 3,2
# - **(3,2,1)**
# **Invariance maintained at the beginning of iteration:**
# - `output` is the list of $k$-permutations, and
# - `idx_left[m]` is the list of indices of arguments not yet in `output[m]`.
#
# A $(k+1)$-permutation (in `next_output`) can then be generated by (Line 10) appending an argument (with an index from `idx_left`) to a $k$-permutation (in `output`).
# Is iteration significantly faster?
# In[ ]:
n = 9
for k, permutation_list in enumerate(permutation_sequence(*range(1,n+1))):
print('# {}-permutations of {} items: {}'.format(k, n, len(permutation_list)))
# Unfortunately, there is not much improvement. Nevertheless, we can efficiently compute the number of $k$-permutations based on the previously computed number of $k-1$-permutations:
#
# For $k$ from $0$ to $n$,
#
# $$
# P_{n,k} = \underbrace{\overbrace{n\times (n-1)\times \cdots }^{P_{n,k-1}\text{ if }k>0}\times(n-k+1)}_{\text{$k$ terms in the product.}}.$$
# **Exercise** Use the `yield` statement to write the function `num_permutation_sequence(n)` that returns a generator of $P_{n,k}$ with `k` from `0` to `n`.
# In[ ]:
def num_permutation_sequence(n):
output = 1
for k in range(0, n + 1):
# YOUR CODE HERE
raise NotImplementedError()
# In[ ]:
# tests
assert [m for m in num_permutation_sequence(3)] == [1, 3, 6, 6]
assert [m for m in num_permutation_sequence(10)] == [1, 10, 90, 720, 5040, 30240, 151200, 604800, 1814400, 3628800, 3628800]
# **Exercise (Challenge)** Extend the function `num_permutation_sequence(n)` so that calling `send(0)` method causes the generator to increment $n$ instead of $k$ for the next number to generate. i.e., for $0\leq k \leq n$,
#
# $$\dots P_{n,k-1}\to P_{n,k} \xrightarrow{\text{send(0)}} P_{n+1,k} \to P_{n+1,k+1}\dots$$
# where $\to$ without labels is the normal transition without calling the `send` method.
#
# *Hint:*
#
# $$P_{n+1,k}=P_{n,k} \times \frac{n+1}{n-k+1}.$$
# In[ ]:
def num_permutation_sequence(n):
# YOUR CODE HERE
raise NotImplementedError()
# In[ ]:
# tests
g = num_permutation_sequence(3)
assert (next(g), next(g), g.send(0), next(g), next(g), next(g), g.send(0)) == (1, 3, 4, 12, 24, 24, 120)
# ## Deduplication using Decorator
# An issue with the function `permutation` is that it regards arguments at different positions as distinct even if they may have the same value. E.g.,
# `permutation(1,1,2)` returns `[(1, 1, 2), (1, 2, 1), (1, 1, 2), (1, 2, 1), (2, 1, 1), (2, 1, 1)]`
# where each distinct permutation appears twice.
# To remove duplicates from a list, we can
# - convert the list to a `set` (which automatically remove duplicates),
# - and then convert the set back to a list.
# In[ ]:
print('Deduplicated:',list(set(permutation(1,1,2))))
# Using a decorator, we can fix `permutation` without rewriting the function.
# In[ ]:
import functools
def deduplicate_output(f):
'''Takes in a function f that returns a list possibly with duplicates,
returns a decorator that remove duplicates from the output list.'''
@functools.wraps(f)
def wrapper(*args, **kwargs):
return list(set(f(*args, **kwargs)))
return wrapper
permutation = deduplicate_output(permutation)
print('Deduplicated: ', permutation(1, 1, 2))
permutation = permutation.__wrapped__
print('Original: ', permutation(1, 1, 2))
# **Exercise:** Create a decorator to eliminate duplicate input positional arguments instead of the ouput, i.e.,
# `permutation(1,1,2)` will return the same result as `permutation(1,2)`.
# In[ ]:
def deduplicate_input(f):
'''Takes in a function f that takes a variable number of arguments
possibly with duplicates, returns a decorator that remove duplicates
in the positional argument.'''
@functools.wraps(f)
def wrapper(*args, **kwargs):
# YOUR CODE HERE
raise NotImplementedError()
return wrapper
# In[ ]:
# tests
permutation = deduplicate_input(permutation)
assert set(permutation(1,1,2)) == set([(1, 2), (2, 1)])
permutation = permutation.__wrapped__
|
ccha23/CS1302ICP
|
_build/jupyter_execute/Lab2/Calculators.py
|
#!/usr/bin/env python
# coding: utf-8
# # Calculators
# **CS1302 Introduction to Computer Programming**
# ___
# Run the following to load additional tools required for this lab.
# In particular, the `math` library provides many useful mathematical functions and constants.
# In[ ]:
get_ipython().run_line_magic('reset', '-f')
from ipywidgets import interact
import matplotlib.pyplot as plt
import numpy as np
import math
from math import log, exp, sin, cos, tan, pi
# The following code is a Python one-liner that creates a calculator.
# Evaluate the cell with `Ctrl+Enter`:
# In[ ]:
print(eval(input()))
# Try some calculations below using this calculator:
# 1. $2^3$ by entering `2**3`;
# 1. $\frac23$ by entering `2/3`;
# 1. $\left\lceil\frac32\right\rceil$ by entering `3//2`;
# 1. $3\mod 2$ by entering `3%2`;
# 1. $\sqrt{2}$ by entering `2**0.5`; and
# 1. $\sin(\pi/6)$ by entering `sin(pi/6)`;
# For this lab, you will create more powerful and dedicated calculators.
# We will first show you a demo. Then, it will be your turn to create the calculators.
# ## Hypotenuse Calculator (Demo)
# 
# Using the Pythagoras theorem below, we can define the following function `length_of_hypotenus` to calculate the length `c` of the hypotenuse given the lengths `a` and `b` of the other sides of a right-angled triangle:
# $$c = \sqrt{a^2 + b^2}$$
# In[ ]:
def length_of_hypotenuse(a, b):
c = (a**2 + b**2)**(0.5) # Pythagoras
return c
# - You need not understand how a function is defined, but
# - you should know how to *write the formula as a Python expression*, and
# - *assign to the variable* `c` the value of the expression (Line 2).
# For example, you may be asked to write Line 2, while Line 1 and 3 are given to you:
# **Exercise** Complete the function below to return the length `c` of the hypotenuse given the lengths `a` and `b`.
# In[ ]:
def length_of_hypotenuse(a, b):
# YOUR CODE HERE
raise NotImplementedError()
return c
# Note that indentation affects the execution of Python code. The assignment statement must be indented to indicate that it is part of the *body* of the function.
# (Try removing the indentation and see what happens.)
# We will use widgets (`ipywidgets`) to let user interact with the calculator more easily:
# In[ ]:
# hypotenuse calculator
@interact(a=(0, 10, 1), b=(0, 10, 1))
def calculate_hypotenuse(a=3, b=4):
print('c: {:.2f}'.format(length_of_hypotenuse(a, b)))
# After running the above cell, you can move the sliders to change the values of `a` and `b`. The value of `c` will be updated immediately.
# - For this lab, you need not know how write widgets, but
# - you should know how to *format a floating point number* (Line 3).
# You can check your code with a few cases listed in the test cell below.
# In[ ]:
# tests
def test_length_of_hypotenuse(a, b, c):
c_ = length_of_hypotenuse(a, b)
correct = math.isclose(c, c_)
if not correct:
print(f'For a={a} and b={b}, c should be {c}, not {c_}.')
assert correct
test_length_of_hypotenuse(3, 4, 5)
test_length_of_hypotenuse(0, 0, 0)
test_length_of_hypotenuse(4, 7, 8.06225774829855)
# ## Quadratic Equation
# ### Graphical Calculator for Parabola
# 
# In mathematics, the collection of points $(x,y)$ satisfying the following equation forms a *parabola*:
#
# $$
# y=ax^2+bx+c
# $$
# where $a$, $b$, and $c$ are real numbers called the *coefficients*.
# **Exercise** Given the variables `x`, `a`, `b`, and `c` store the $x$-coordinate and the coefficients $a$, $b$, and $c$ respectively, assign to `y` the corresponding $y$-coordinate for the parabola.
# In[ ]:
def get_y(x, a, b, c):
# YOUR CODE HERE
raise NotImplementedError()
return y
# In[ ]:
# tests
def test_get_y(y,x,a,b,c):
y_ = get_y(x,a,b,c)
correct = math.isclose(y,y_)
if not correct:
print(f'With (x, a, b, c)={x,a,b,c}, y should be {y} not {y_}.')
assert correct
test_get_y(0,0,0,0,0)
test_get_y(1,0,1,2,1)
test_get_y(2,0,2,1,2)
# In[ ]:
# graphical calculator for parabola
@interact(a=(-10, 10, 1), b=(-10, 10, 1), c=(-10, 10, 1))
def plot_parabola(a, b, c):
xmin, xmax, ymin, ymax, resolution = -10, 10, -10, 10, 50
ax = plt.gca()
ax.set_title(r'$y=ax^2+bx+c$')
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymin, ymax])
ax.grid()
x = np.linspace(xmin, xmax, resolution)
ax.plot(x, get_y(x, a, b, c))
# ### Quadratic Equation Solver
# 
# For the quadratic equation
#
# $$
# ax^2+bx+c=0,
# $$
# the *roots* (solutions for $x$) are give by
#
# $$
# \frac{-b-\sqrt{b^2-4ac}}{2a},\frac{-b+\sqrt{b^2-4ac}}{2a}.
# $$
# **Exercise** Assign to `root1` and `root2` the values of the first and second roots above respectively.
# In[ ]:
def get_roots(a, b, c):
# YOUR CODE HERE
raise NotImplementedError()
return root1, root2
# In[ ]:
# tests
def test_get_roots(roots, a, b, c):
roots_ = get_roots(a, b, c)
correct = all([math.isclose(roots[i], roots_[i]) for i in range(2)])
if not correct:
print(f'With (a, b, c)={a,b,c}, roots should be {roots} not {roots_}.')
assert correct
test_get_roots((-1.0, 0.0), 1, 1, 0)
test_get_roots((-1.0, -1.0), 1, 2, 1)
test_get_roots((-2.0, -1.0), 1, 3, 2)
# In[ ]:
# quadratic equations solver
@interact(a=(-10,10,1),b=(-10,10,1),c=(-10,10,1))
def quadratic_equation_solver(a=1,b=2,c=1):
print('Roots: {}, {}'.format(*get_roots(a,b,c)))
# ## Number Conversion
# ### Byte-to-Decimal Calculator
# 
# Denote a binary number stored as a byte ($8$-bit) as
#
# $$
# b_7\circ b_6\circ b_5\circ b_4\circ b_3\circ b_2\circ b_1\circ b_0,
# $$
# where $\circ$ concatenates $b_i$'s together into a binary string.
# The binary string can be converted to a decimal number by the formula
#
# $$
# b_7\cdot 2^7 + b_6\cdot 2^6 + b_5\cdot 2^5 + b_4\cdot 2^4 + b_3\cdot 2^3 + b_2\cdot 2^2 + b_1\cdot 2^1 + b_0\cdot 2^0.
# $$
# E.g., the binary string `'11111111'` is the largest integer represented by a byte:
#
# $$
# 2^7+2^6+2^5+2^4+2^3+2^2+2^1+2^0=255=2^8-1.
# $$
# **Exercise** Assign to `decimal` the *integer* value represented by a byte.
# The byte is a sequence of bits assigned to the variables `b7,b6,b5,b4,b3,b2,b1,b0` as *characters*, either `'0'` or `'1'`.
# In[ ]:
def byte_to_decimal(b7, b6, b5, b4, b3, b2, b1, b0):
# YOUR CODE HERE
raise NotImplementedError()
return decimal
# In[ ]:
# tests
def test_byte_to_decimal(decimal, b7, b6, b5, b4, b3, b2, b1, b0):
decimal_ = byte_to_decimal(b7, b6, b5, b4, b3, b2, b1, b0)
correct = decimal == decimal_ and isinstance(decimal_, int)
if not correct:
print(
f'{b7}{b6}{b5}{b4}{b3}{b2}{b1}{b0} should give {decimal} not {decimal_}.'
)
assert correct
test_byte_to_decimal(38, '0', '0', '1', '0', '0', '1', '1', '0')
test_byte_to_decimal(20, '0', '0', '0', '1', '0', '1', '0', '0')
test_byte_to_decimal(22, '0', '0', '0', '1', '0', '1', '1', '0')
# In[ ]:
# byte-to-decimal calculator
bit = ['0', '1']
@interact(b7=bit, b6=bit, b5=bit, b4=bit, b3=bit, b2=bit, b1=bit, b0=bit)
def convert_byte_to_decimal(b7, b6, b5, b4, b3, b2, b1, b0):
print('decimal:', byte_to_decimal(b7, b6, b5, b4, b3, b2, b1, b0))
# ### Decimal-to-Byte Calculator
# 
# **Exercise** Assign to `byte` a *string of 8 bits* that represents the value of `decimal`, a non-negative decimal integer from $0$ to $2^8-1=255$.
# *Hint: Use `//` and `%`.*
# In[ ]:
def decimal_to_byte(decimal):
# YOUR CODE HERE
raise NotImplementedError()
return byte
# In[ ]:
# tests
def test_decimal_to_byte(byte,decimal):
byte_ = decimal_to_byte(decimal)
correct = byte == byte_ and isinstance(byte, str) and len(byte) == 8
if not correct:
print(
f'{decimal} should be represented as the byte {byte}, not {byte_}.'
)
assert correct
test_decimal_to_byte('01100111', 103)
test_decimal_to_byte('00000011', 3)
test_decimal_to_byte('00011100', 28)
# In[ ]:
# decimal-to-byte calculator
@interact(decimal=(0,255,1))
def convert_decimal_to_byte(decimal=0):
print('byte:', decimal_to_byte(decimal))
|
ccha23/CS1302ICP
|
_build/jupyter_execute/Lecture2/Values and Variables.py
|
<reponame>ccha23/CS1302ICP
#!/usr/bin/env python
# coding: utf-8
# # Values and Variables
# **CS1302 Introduction to Computer Programming**
# ___
# In[1]:
get_ipython().run_line_magic('reload_ext', 'mytutor')
# ## Integers
# **How to enter an [integer](https://docs.python.org/3/reference/lexical_analysis.html#integer-literals) in a program?**
# In[2]:
15 # an integer in decimal
# In[3]:
0b1111 # a binary number
# In[4]:
0xF # hexadecimal (base 16) with possible digits 0, 1,2,3,4,5,6,7,8,9,A,B,C,D,E,F
# **Why all outputs are the same?**
# - What you have entered are *integer literals*, which are integers written out literally.
# - All the literals have the same integer value in decimal.
# - By default, if the last line of a code cell has a value, the jupyter notebook (*IPython*) will store and display the value as an output.
# In[5]:
3 # not the output of this cell
4 + 5 + 6
# - The last line above also has the same value, `15`.
# - It is an *expression* (but not a literal) that *evaluates* to the integer value.
# **Exercise** Enter an expression that evaluates to an integer value, as big as possible.
# (You may need to interrupt the kernel if the expression takes too long to evaluate.)
# In[6]:
# There is no maximum for an integer for Python3.
# See https://docs.python.org/3.1/whatsnew/3.0.html#integers
11 ** 100000
# ## Strings
# **How to enter a [string](https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals) in a program?**
# In[7]:
'\U0001f600: I am a string.' # a sequence of characters delimited by single quotes.
# In[8]:
"\N{grinning face}: I am a string." # delimited by double quotes.
# In[9]:
"""\N{grinning face}: I am a string.""" # delimited by triple single/double quotes.
# - `\` is called the *escape symbol*.
# - `\U0001f600` and `\N{grinning face}` are *escape sequences*.
# - These sequences represent the same grinning face emoji by its Unicode in hexadecimal and its name.
# **Why use different quotes?**
# In[10]:
print('I\'m line #1.\nI\'m line #2.') # \n is a control code for line feed
print("I'm line #3.\nI'm line #4.") # no need to escape single quote.
print('''I'm line #5.
I'm line #6.''') # multi-line string
# Note that:
# - The escape sequence `\n` does not represent any symbol.
# - It is a *control code* that creates a new line when printing the string.
# - Another common control code is `\t` for tab.
# Using double quotes, we need not escape the single quote in `I'm`.
# Triple quotes delimit a multi-line string, so there is no need to use `\n`.
# (You can copy and paste a multi-line string from elsewhere.)
# In programming, there are often many ways to do the same thing.
# The following is a one-line code ([one-liner](https://en.wikipedia.org/wiki/One-liner_program)) that prints multiple lines of strings without using `\n`:
# In[11]:
print("I'm line #1", "I'm line #2", "I'm line #3", sep='\n') # one liner
# - `sep='\n'` is a *keyword argument* that specifies the separator of the list of strings.
# - By default, `sep=' '`, a single space character.
# In IPython, we can get the *docstring* (documentation) of a function conveniently using the symbol `?`.
# In[12]:
get_ipython().run_line_magic('pinfo', 'print')
# In[13]:
get_ipython().run_line_magic('pinfo', 'print')
# **Exercise** Print a cool multi-line string below.
# In[14]:
print('''
(ง •̀_•́)ง
╰(●’◡’●)╮
(..•˘_˘•..)
(づ ̄ 3 ̄)づ
''')
# See also https://github.com/glamp/bashplotlib
# Star Wars via Telnet http://asciimation.co.nz/
# ## Variables and Assignment
# It is useful to store a value and retrieve it later.
# To do so, we assign the value to a variable:
# In[15]:
x = 15
x # output the value of x
# **Is assignment the same as equality?**
# No because:
# - you cannot write `15 = x`, but
# - you can write `x = x + 1`, which increases the value of `x` by `1`.
# **Exercise** Try out the above code yourself.
# In[16]:
x = x + 1
x
# Let's see the effect of assignment step-by-step:
# 1. Run the following cell.
# 1. Click `Next >` to see the next step of the execution.
# In[17]:
get_ipython().run_cell_magic('mytutor', '-h 200', 'x = 15\nx = x + 1')
# The following *tuple assignment* syntax can assign multiple variables in one line.
# In[18]:
get_ipython().run_cell_magic('mytutor', '-h 200', "x, y, z = '15', '30', 15")
# One can also use *chained assignment* to set different variables to the same value.
# In[19]:
get_ipython().run_cell_magic('mytutor', '-h 250', 'x = y = z = 0')
# Variables can be deleted using `del`. Accessing a variable before assignment raises a Name error.
# In[20]:
del x, y
x, y
# ## Identifiers
# *Identifiers* such as variable names are case sensitive and follow certain rules.
# **What is the syntax for variable names?**
# 1. Must start with a letter or `_` (an underscore) followed by letters, digits, or `_`.
# 1. Must not be a [keyword](https://docs.python.org/3.7/reference/lexical_analysis.html#keywords) (identifier reserved by Python):
# <pre>False await else import pass
# None break except in raise
# True class finally is return
# and continue for lambda try
# as def from nonlocal while
# assert del global not with
# async elif if or yield</pre>
# **Exercise** Evaluate the following cell and check if any of the rules above is violated.
# In[21]:
from ipywidgets import interact
@interact
def identifier_syntax(assignment=['a-number = 15',
'a_number = 15',
'15 = 15',
'_15 = 15',
'del = 15',
'Del = 15',
'type = print',
'print = type',
'input = print']):
exec(assignment)
print('Ok.')
# 1. `a-number = 15` violates Rule 1 because `-` is not allowed. `-` is interpreted as an operator.
# 1. `15 = 15` violates Rule 1 because `15` starts with a digit instead of letter or _.
# 1. `del = 15` violates Rule 2 because `del` is a keyword.
# What can we learn from the above examples?
# - `del` is a keyword and `Del` is not because identifiers are case sensitive.
# - Function/method/type names `print`/`input`/`type` are not keywords and can be reassigned.
# This can useful if you want to modify the default implementations without changing their source code.
# To help make code more readable, additional style guides such as [PEP 8](https://www.python.org/dev/peps/pep-0008/#function-and-variable-names) are available:
# - Function names should be lowercase, with words separated by underscores as necessary to improve readability.
# - Variable names follow the same convention as function names.
# ## User Input
# **How to let the user input a value at *runtime*,
# i.e., as the program executes?**
# We can use the method `input`:
# - There is no need to delimit the input string by quotation marks.
# - Simply press `enter` after typing a string.
# In[22]:
print('Your name is', input('Please input your name: '))
# - The `input` method prints its argument, if any, as a [prompt](https://en.wikipedia.org/wiki/Command-line_interface#Command_prompt).
# - It takes user's input and *return* it as its value. `print` takes in that value and prints it.
# **Exercise** Explain whether the following code prints `'My name is Python'`. Does `print` return a value?
# In[23]:
print('My name is', print('Python'))
# - Unlike `input`, the function `print` does not return the string it is trying to print. Printing a string is, therefore, different from returning a string.
# - `print` actually returns a `None` object that gets printed as `None`.
# ## Type Conversion
# The following program tries to compute the sum of two numbers from user inputs:
# In[24]:
num1 = input('Please input an integer: ')
num2 = input('Please input another integer: ')
print(num1, '+', num2, 'is equal to', num1 + num2)
# **Exercise** There is a [bug](https://en.wikipedia.org/wiki/Software_bug) in the above code. Can you locate the error?
# The two numbers are concatenated instead of added together.
# `input` *returns* user input as a string.
# E.g., if the user enters `12`, the input is
# - not treated as the integer twelve, but rather
# - treated as a string containing two characters, one followed by two.
# To see this, we can use `type` to return the data type of an expression.
# In[25]:
num1 = input('Please input an integer: ')
print('Your input is', num1, 'with type', type(num1))
# **Exercise** `type` applies to any expressions. Try it out below on `15`, `print`, `print()`, `input`, and even `type` itself and `type(type)`.
# In[26]:
type(15), type(print), type(print()), type(input), type(type), type(type(type))
# **So what happens when we add strings together?**
# In[27]:
'4' + '5' + '6'
# **How to fix the bug then?**
# We can convert a string to an integer using `int`.
# In[28]:
int('4') + int('5') + int('6')
# We can also convert an integer to a string using `str`.
# In[29]:
str(4) + str(5) + str(6)
# **Exercise** Fix the bug in the following cell.
# In[30]:
num1 = input('Please input an integer: ')
num2 = input('Please input another integer: ')
# print(num1, '+', num2, 'is equal to', num1 + num2) # fix this line below
### BEGIN SOLUTION
print(num1, '+', num2, 'is equal to', int(num1) + int(num2))
### END SOLUTION
# ## Error
# In addition to writing code, a programmer spends significant time in *debugging* code that contains errors.
#
# **Can an error be automatically detected by the computer?**
# - You have just seen an example of *logical error*, which is due to an error in the logic.
# - The ability to debug or even detect such error is, unfortunately, beyond Python's intelligence.
# Other kinds of error may be detected automatically.
# As an example, note that we can omit `+` for string concatenation, but we cannot omit it for integer summation:
# In[31]:
print('Skipping + for string concatenation')
'4' '5' '6'
# In[32]:
print('Skipping + for integer summation')
4 5 6
# Python interpreter detects the bug and raises a *syntax* error.
# **Why Syntax error can be detected automatically?
# Why is the print statement before the error not executed?**
# - The Python interpreter can easily detect syntax error even before executing the code simply because
# - the interpreter fails to interpret the code, i.e., translates the code to lower-level executable code.
# The following code raises a different kind of error.
# In[33]:
print("Evaluating '4' + '5' + 6")
'4' + '5' + 6 # summing string with integer
# **Why Python throws a TypeError when evaluating `'4' + '5' + 6`?**
# There is no default implementation of `+` operation on a value of type `str` and a value of type `int`.
# - Unlike syntax error, the Python interpreter can only detect type error at runtime (when executing the code.)
# - Hence, such error is called a *runtime error*.
#
# **Why is TypeError a runtime error?**
# The short answer is that Python is a [strongly-and-dynamically-typed](https://en.wikipedia.org/wiki/Strong_and_weak_typing) language:
# - Strongly-typed: Python does not force a type conversion to avoid a type error.
# - Dynamically-typed: Python allow data type to change at runtime.
# The underlying details are more complicated than required for this course. It helps if you already know the following languages:
# - JavaScript, which is a *weakly-typed* language that forces a type conversion to avoid a type error.
# - C, which is a *statically-typed* language that does not allow data type to change at runtime.
# In[34]:
get_ipython().run_cell_magic('javascript', '', "alert('4' + '5' + 6) // no error because 6 is converted to a str automatically")
# A weakly-typed language may seem more robust, but it can lead to [more logical errors](https://www.oreilly.com/library/view/fluent-conference-javascript/9781449339203/oreillyvideos1220106.html).
# To improve readability, [typescript](https://www.typescriptlang.org/) is a strongly-typed replacement of javascript.
# **Exercise** Not all the strings can be converted into integers. Try breaking the following code by providing invalid inputs and record them in the subsequent cell. Explain whether the errors are runtime errors.
# In[35]:
num1 = input('Please input an integer: ')
num2 = input('Please input another integer: ')
print(num1, '+', num2, 'is equal to', int(num1) + int(num2))
# The possible invalid inputs are:
# > `4 + 5 + 6`, `15.0`, `fifteen`
#
# It raises a value error, which is a runtime error detected during execution.
#
# Note that the followings are okay
# > int('-1'), eval('4 + 5 + 6')
# ## Floating Point Numbers
# Not all numbers are integers. In Enginnering, we often need to use fractions.
# **How to enter fractions in a program?**
# In[36]:
x = -0.1 # decimal number
y = -1.0e-1 # scientific notation
z = -1/10 # fraction
x, y, z, type(x), type(y), type(z)
# **What is the type `float`?**
# - `float` corresponds to the [*floating point* representation](https://en.wikipedia.org/wiki/Floating-point_arithmetic#Floating-point_numbers).
# - A `float` in stored exactly the way we write it in scientific notation:
#
# $$
# \overbrace{-}^{\text{sign}} \underbrace{1.0}_{\text{mantissa}\kern-1em}e\overbrace{-1}^{\text{exponent}\kern-1em}=-1\times 10^{-1}
# $$
# - The [truth](https://www.h-schmidt.net/FloatConverter/IEEE754.html) is more complicated than required for the course.
# Integers in mathematics may be regarded as a `float` instead of `int`:
# In[37]:
type(1.0), type(1e2)
# You can also convert an `int` or a `str` to a `float`.
# In[38]:
float(1), float('1')
# **Is it better to store an integer as `float`?**
# Python stores a [floating point](https://docs.python.org/3/library/sys.html#sys.float_info) with finite precision (usually as a 64bit binary fraction):
# In[39]:
import sys
sys.float_info
# It cannot represent a number larger than the `max`:
# In[40]:
sys.float_info.max * 2
# The precision also affects the check for equality.
# In[41]:
(1.0 == 1.0 + sys.float_info.epsilon * 0.5, # returns true if equal
1.0 == 1.0 + sys.float_info.epsilon * 0.6, sys.float_info.max + 1 == sys.float_info.max)
# Another issue with float is that it may keep more decimal places than desired.
# In[42]:
1/3
# **How to [round](https://docs.python.org/3/library/functions.html#round) a floating point number to the desired number of decimal places?**
# In[43]:
round(2.665,2), round(2.675,2)
# **Why 2.675 rounds to 2.67 instead of 2.68?**
# - A `float` is actually represented in binary.
# - A decimal fraction [may not be represented exactly in binary](https://docs.python.org/3/tutorial/floatingpoint.html#tut-fp-issues).
# The `round` function can also be applied to an integer.
# In[44]:
round(150,-2), round(250,-2)
# **Why 250 rounds to 200 instead of 300?**
# - Python 3 implements the default rounding method in [IEEE 754](https://en.wikipedia.org/w/index.php?title=IEEE_754#Rounding_rules).
# ## String Formatting
# **Can we round a `float` or `int` for printing but not calculation?**
# This is possible with [*format specifications*](https://docs.python.org/3/library/string.html#format-specification-mini-language).
# In[45]:
x = 10000/3
print('x ≈ {:.2f} (rounded to 2 decimal places)'.format(x))
x
# - `{:.2f}` is a *format specification*
# - that gets replaced by a string
# - that represents the argument `x` of `format`
# - as a decimal floating point number rounded to 2 decimal places.
# **Exercise** Play with the following widget to learn the effect of different format specifications. In particular, print `10000/3` as `3,333.33`.
# In[46]:
from ipywidgets import interact
@interact(x='10000/3',
align={'None':'','<':'<','>':'>','=':'=','^':'^'},
sign={'None':'','+':'+','-':'-','SPACE':' '},
width=(0,20),
grouping={'None':'','_':'_',',':','},
precision=(0,20))
def print_float(x,sign,align,grouping,width=0,precision=2):
format_spec = f"{{:{align}{sign}{'' if width==0 else width}{grouping}.{precision}f}}"
print("Format spec:",format_spec)
print("x ≈",format_spec.format(eval(x)))
# In[47]:
print('{:,.2f}'.format(10000/3))
# String formatting is useful for different data types other than `float`.
# E.g., consider the following program that prints a time specified by some variables.
# In[48]:
# Some specified time
hour = 12
minute = 34
second = 56
print("The time is " + str(hour) + ":" + str(minute) + ":" + str(second)+".")
# Imagine you have to show also the date in different formats.
# The code can become very hard to read/write because
# - the message is a concatenation of multiple strings and
# - the integer variables need to be converted to strings.
# Omitting `+` leads to syntax error. Removing `str` as follows also does not give the desired format.
# In[49]:
print("The time is ", hour, ":", minute, ":", second, ".") # note the extra spaces
# To make the code more readable, we can use the `format` function as follows.
# In[50]:
message = "The time is {}:{}:{}."
print(message.format(hour,minute,second))
# - We can have multiple *place-holders* `{}` inside a string.
# - We can then provide the contents (any type: numbers, strings..) using the `format` function, which
# - substitutes the place-holders by the function arguments from left to right.
# According to the [string formatting syntax](https://docs.python.org/3/library/string.html#format-string-syntax), we can change the order of substitution using
# - indices *(0 is the first item)* or
# - names inside the placeholder `{}`:
# In[51]:
print("You should {0} {1} what I say instead of what I {0}.".format("do", "only"))
print("The surname of {first} {last} is {last}.".format(first="John", last="Doe"))
# You can even put variables inside the format specification directly and have a nested string formatting.
# In[52]:
align, width = "^", 5
print(f"{{:*{align}{width}}}".format(x)) # note the syntax f"..."
# **Exercise** Play with the following widget to learn more about the formating specification.
# 1. What happens when `align` is none but `fill` is `*`?
# 1. What happens when the `expression` is a multi-line string?
# In[53]:
from ipywidgets import interact
@interact(expression=r"'ABC'",
fill='*',
align={'None':'','<':'<','>':'>','=':'=','^':'^'},
width=(0,20))
def print_objectt(expression,fill,align='^',width=10):
format_spec = f"{{:{fill}{align}{'' if width==0 else width}}}"
print("Format spec:",format_spec)
print("Print:",format_spec.format(eval(expression)))
# 1. It returns a ValueError because align must be specified when fill is.
# 1. The newline character is simply regarded a character. The formatting is not applied line-by-line. E.g., try 'ABC\nDEF'.
|
ccha23/CS1302ICP
|
_build/jupyter_execute/Lab9/Monte Carlo and Root Finding.py
|
#!/usr/bin/env python
# coding: utf-8
# # Monte Carlo and Root Finding
# **CS1302 Introduction to Computer Programming**
# ___
# ## The Monty-Hall Game
# In[ ]:
get_ipython().run_cell_magic('html', '', '<iframe width="912" height="513" src="https://www.youtube.com/embed/rn1y-HrmA5c?end=23" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>')
# **Is it better to change the initial pick? What is the chance of winning if we change?**
# *Hint:* There are two doors to choose from, and only one of the doors has treasure behind.
# Let's use the following program to play the game a couple of times.
# In[ ]:
import random
def play_monty_hall(num_of_doors=3):
'''Main function to run the Monty Hall game.'''
doors = {str(i) for i in range(num_of_doors)}
door_with_treasure = random.sample(doors, 1)[0]
# Input initial pick of the door.
while True:
initial_pick = input(f'Pick a door from {", ".join(sorted(doors))}: ')
if initial_pick in doors: break
# Open all but one other door. Opened door must have nothing.
doors_to_open = doors - {initial_pick, door_with_treasure}
other_door = door_with_treasure if initial_pick != door_with_treasure else doors_to_open.pop()
print('Door(s) with nothing behind:', *doors_to_open)
# Allow player to change the initial pick the other (unopened) door.
change_pick = input(
f'Would you like to change your choice to {other_door}? [y/N] ').lower(
) == 'y'
# Check and recording winning.
if not change_pick:
mh_stats['# no change'] += 1
if door_with_treasure == initial_pick:
mh_stats['# win without changing'] += 1
return print('You won!')
else:
mh_stats['# change'] += 1
if door_with_treasure == other_door:
mh_stats['# win by changing'] += 1
return print('You won!')
print(f'You lost. The door with treasure is {door_with_treasure}.')
mh_stats = dict.fromkeys(('# win by changing',
'# win without changing',
'# change',
'# no change'),0)
def monty_hall_statistics():
'''Print the statistics of the Monty Hall games.'''
print('-' * 30, 'Statistics', '-' * 30)
if mh_stats['# change']:
print(f"% win by changing: {mh_stats['# win by changing'] / mh_stats['# change']:.0%}")
if mh_stats['# no change']:
print(f"% win without changing: {mh_stats['# win without changing']/mh_stats['# no change']:.0%}")
# In[ ]:
play_monty_hall()
monty_hall_statistics()
# You may also [play the game online](https://math.ucsd.edu/~crypto/Monty/monty.html).
# To get a good estimate of the chance of winning, we need to play the game many times.
# We can write a Monty-Carlo simulation instead.
# In[ ]:
# Do not change any variables defined here, or some of the tests may fail.
import numpy as np
get_ipython().run_line_magic('pinfo', 'np.random.randint')
np.random.seed(0) # for reproducible result
num_of_games = int(10e7)
door_with_treasure = np.random.randint(1, 4, num_of_games, dtype=np.uint8)
initial_pick = np.random.randint(1, 4, num_of_games, dtype=np.uint8)
print(f"{'Door with treasure:':>19}", *door_with_treasure[:10],'...')
print(f"{'Initial pick:':>19}", *initial_pick[:10],'...')
# - `door_with_treasure` stores as 8-bit unsigned integers `uint8` the door numbers randomly chosen from $\{1, 2, 3\}$ as the doors with treasure behind for a number `num_of_games` of Monty-Hall games.
# - `initial_pick` stores the initial choices for the different games.
# If players do not change their initial pick, the chance of winning can be estimated as follows:
# In[ ]:
def estimate_chance_of_winning_without_change(door_with_treasure,
initial_pick):
'''Estimate the chance of winning the Monty Hall game without changing
the initial pick using the Monte Carlo simulation of door_with_treasure
and initial_pick.'''
count_of_win = 0
for x, y in zip(door_with_treasure, initial_pick):
if x == y: count_of_win += 1
return count_of_win / n
n = num_of_games // 100
estimate_chance_of_winning_without_change(door_with_treasure[:n],
initial_pick[:n])
# However, the above code is inefficient and takes a long time to run. You may try running it on the entire sequences of `door_with_treasure` and `initial_pick` but **DO NOT** put the code in your notebook, as jupyterhub refuses to autograde notebook that take too much time or memory to run.
# A simpler and also more efficient solution with well over 100 times speed up is as follows:
# In[ ]:
def estimate_chance_of_winning_without_change(door_with_treasure,
initial_pick):
'''Estimate the chance of winning the Monty Hall game without changing
the initial pick using the Monte Carlo simulation of door_with_treasure
and initial_pick.'''
return (door_with_treasure == initial_pick).mean()
estimate_chance_of_winning_without_change(door_with_treasure,
initial_pick)
# The code uses the method `mean` of `ndarray` that computes the mean of the `numpy` array.
# In computing the mean, `True` and `False` are regarded as `1` and `0` respectively, as illustrated below.
# In[ ]:
for i in True, False:
for j in True, False:
print(f'{i} + {j} == {i + j}')
# **Exercise** Define the function `estimate_chance_of_winning_by_change` same as `estimate_chance_of_winning_without_change` above but returns the estimate of the chance of winning by changing the initial choice instead. Again, *implement efficiently or jupyterhub may refuse to autograde your entire notebook*.
#
# *Hint:* Since there are only two unopened doors at the end of each game, a player will win by changing the initial pick if the initially picked door is not the door with treasure behind.
# In[ ]:
def estimate_chance_of_winning_by_change(door_with_treasure,
initial_pick):
'''Estimate the chance of winning the Monty Hall game by changing
the initial pick using the Monte Carlo simulation of door_with_treasure
and initial_pick.'''
# YOUR CODE HERE
raise NotImplementedError()
# In[ ]:
# tests
assert np.isclose(
estimate_chance_of_winning_by_change(door_with_treasure[:10],
initial_pick[:10]), 0.7)
# ## Solving a 3-by-3 system of linear equations
# `numpy` has a module `linalg` for linear algebra, and the module provides a function called `solve` that can solve a system of linear equations. For the example in the lecture
#
# $$
# \begin{aligned}
# 2 x_0 + 2 x_1 &= 1\\
# 2 x_1 &= 1,
# \end{aligned}
# $$
# we can obtain the solution as follows:
# In[ ]:
get_ipython().run_line_magic('pinfo', 'np.linalg.solve')
A = np.array([[2.,2],[0,2]])
b = np.array([1.,1])
x = np.linalg.solve(A,b)
# As explained in the lecture, the arguments `A` and `b` are obtained from the matrix form of the system of linear equations:
#
# $$
# \underbrace{
# \begin{bmatrix}
# 2 & 2\\
# 0 & 2
# \end{bmatrix}}_{\mathbf{A}}
# \underbrace{
# \begin{bmatrix}
# x_0\\ x_1
# \end{bmatrix}}_{\mathbf{x}}
# =
# \underbrace{
# \begin{bmatrix}
# 1 \\ 1
# \end{bmatrix}
# }_{\mathbf{b}}
# $$
# However, the function returns an error when there is no unique solutions.
# In[ ]:
# Case with infinitely many solution
A = np.array([[2.,2],[2,2]])
b = np.array([1.,1])
x = np.linalg.solve(A,b)
# In[ ]:
# Case without solution
A = np.array([[2.,2],[2,2]])
b = np.array([1.,0])
x = np.linalg.solve(A,b)
# A unique solution does not exist if and only if the [determinant](https://en.m.wikipedia.org/wiki/Determinant) of $\mathbf{A}$ is $0$, in which case $\mathbf{A}$ is called a singular matrix. For a $2$-by-$2$ matrix, the determinant is defined as follows:
#
# $$
# \begin{aligned}
# \operatorname{det}(A) &:= \left|
# \begin{matrix}
# a_{00} & a_{01}\\
# a_{10} & a_{11}
# \end{matrix}
# \right|\\
# &= a_{00}\times a_{11} - a_{01}\times a_{10}.
# \end{aligned}
# $$
# For example, the first system has a unique solution because
#
# $$
# \left|
# \begin{matrix}
# 2 & 2\\
# 0 & 2
# \end{matrix}
# \right|
# = 2\times 2 - 2\times 0 = 4>0.
# $$
# The last two systems do not have unique solutions because
#
# $$
# \left|
# \begin{matrix}
# 2 & 2\\
# 2 & 2
# \end{matrix}
# \right|
# = 2\times 2 - 2\times 2 = 0.
# $$
# We can use the function `det` from `np.linalg` to compute the determinant as follows:
# In[ ]:
get_ipython().run_line_magic('pinfo', 'np.linalg.det')
np.linalg.det(np.array([[2.,2],[0,2]])), np.linalg.det(np.array([[2.,2],[2,2]]))
# **Exercise** Use the `det` and `solve` functions to assign `x` to the `numpy` array storing the solution of the following linear system if the solution is unique else `None`.
#
# $$
# \begin{aligned}
# x_0 + 2 x_1 + 3x_2 &= 14\\
# 2x_0 + x_1 + 2x_2 &= 10\\
# 3 x_0 + 2x_1 + x_2 &= 10.
# \end{aligned}
# $$
# In[ ]:
# YOUR CODE HERE
raise NotImplementedError()
x
# In[ ]:
# tests
# As the main test must be hidden, you may want to verify your solution
# as explained in the lecture using matrix multiplication.
assert isinstance(x,np.ndarray) and x.shape == (3,)
# ## Solving non-linear equations
# Suppose we want to solve:
#
# $$
# f(x) = 0
# $$
# for some possibly non-linear real-valued function $f(x)$ in one real-valued variable $x$. Quadratic equation with an $x^2$ term is an example. The following is another example.
# In[ ]:
import matplotlib.pyplot as plt
import numpy as np
f = lambda x: x*(x - 1)*(x - 2)
x = np.linspace(-0.5,2.5)
plt.plot(x,f(x))
plt.axhline(color='gray',linestyle=':')
plt.xlabel(r'$x$')
plt.title(r'Plot of $f(x)=x(x-1)(x-2)$')
plt.show()
# While it is clear that the above function has three roots, namely, $x=0, 1, 2$, can we write a program to compute a root of any given continuous function $f$?
# In[ ]:
get_ipython().run_cell_magic('html', '', '<iframe width="912" height="513" src="https://www.youtube.com/embed/PXSLcEGkXkU" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>')
# The following function `bisection`
# - takes as arguments
# - a continuous function `f`,
# - two real values `a` and `b`,
# - a positive integer `n` indicating the maximum depth of the recursion, and
# - returns a list `[xstart, xstop]` if the bisection succeeds in capturing a root in the interval `[xstart, xstop]` bounded by `a` and `b`, or else, returns a empty list `[]`.
# In[ ]:
def bisection(f,a,b,n=10):
if f(a) * f(b) > 0:
return [] # because f(x) may not have a root between x=a and x=b
elif n <= 0: # base case when recursion cannot go any deeper
return [a,b] if a <= b else [b,a]
else:
c = (a + b)/2 # bisect the interval between a and b
return bisection(f,a,c,n-1) or bisection(f,c,b,n-1) # recursion
# bisection solver
import ipywidgets as widgets
@widgets.interact(a=(-0.5,2.5,0.5),b=(-0.5,2.5,0.5),n=(0,10,1))
def bisection_solver(a=-0.5,b=0.5,n=0):
x = np.linspace(-0.5,2.5)
plt.plot(x,f(x))
plt.axhline(color='gray',linestyle=':')
plt.xlabel(r'$x$')
plt.title(r'Bisection on $f(x)$')
[xstart,xstop] = bisection(f,a,b,n)
plt.plot([xstart,xstop],[0,0],'r|-')
print('Interval: ',[xstart,xstop])
# Try setting the values of $a$ and $b$ as follows and change $n$ to see the change of the interval step-by-step.
# In[ ]:
bisection(f,-0.5,0.5), bisection(f,1.5,0.5), bisection(f,-0.1,2.5)
# **Exercise** Modify the function `bisection` to
# - take the floating point parameter `tol` instead of `n`, and
# - return the interval from the bisection method represented by a list `[xstart,xstop]` but as soon as the gap `xstop - xstart` is $\leq$ `tol`.
# In[ ]:
def bisection(f,a,b,tol=1e-9):
# YOUR CODE HERE
raise NotImplementedError()
# In[ ]:
# tests
import numpy as np
f = lambda x: x*(x - 1)*(x - 2)
bisection(f,1.5,0.5)
assert np.isclose(bisection(f,-0.5,0.5),[-9.313225746154785e-10, 0.0]).all()
assert np.isclose(bisection(f,1.5,0.5,1e-2), [1.0, 1.0078125]).all()
assert np.isclose(bisection(f,-0.1,2.5,1e-3), [1.9998046875000002, 2.0004394531250003]).all()
|
ccha23/CS1302ICP
|
_build/jupyter_execute/Lecture3/Conditional Execution.py
|
<gh_stars>1-10
#!/usr/bin/env python
# coding: utf-8
# # Conditional Execution
# **CS1302 Introduction to Computer Programming**
# ___
# In[1]:
get_ipython().run_line_magic('reload_ext', 'mytutor')
# ## Motivation
# Conditional execution means running different pieces of code based on different conditions. Why?
# For instance, when trying to compute `a/b`, `b` may be `0` and division by `0` is invalid.
# In[2]:
def multiply_or_divide(a, b):
print('a:{}, b:{}, a*b:{}, a/b:{}'.format(a, b, a * b, a / b))
multiply_or_divide(1, 2)
multiply_or_divide(1, 0) # multiplication is valid but not shown
# Can we skip only the division but not multiplication when `b` is `0`?
# In[3]:
def multiply_or_divide(a, b):
fix = a / b if b else 'undefined'
print('a:{}, b:{}, a*b:{}, a/b:{}'.format(a, b, a * b, fix))
multiply_or_divide(1, 2)
multiply_or_divide(1, 0) # multiplication is valid but not shown
# The above solution involve:
# - a *boolean expression* `fix` that checks whether a condition holds, and
# - a *conditional construct* `... if ... else ...` that specify which code block should be executed under what condition.
# ## Boolean expressions
# ### Comparison Operators
# **How to compare different values?**
# Like the equality and inequality relationships in mathematics,
# Python also have binary [*comparison/relational operators*](https://docs.python.org/3/reference/expressions.html#comparisons):
# | Expression | True iff |
# | ---------: | :--------- |
# | `x == y` | $x=y$. |
# | `x < y` | $x<y$. |
# | `x <= y` | $x\leq y$. |
# | `x > y` | $x>y$. |
# | `x >= y` | $x\geq y$. |
# | `x != y` | $x\neq y$. |
# Explore these operators using the widgets below:
# In[4]:
# Comparisons
from ipywidgets import interact
comparison_operators = ['==','<','<=','>','>=','!=']
@interact(operand1='10',
operator=comparison_operators,
operand2='3')
def comparison(operand1,operator,operand2):
expression = f"{operand1} {operator} {operand2}"
value = eval(expression)
print(f"""{'Expression:':>11} {expression}\n{'Value:':>11} {value}\n{'Type:':>11} {type(value)}""")
# - These operators return either `True` or `False`, which are `keywords` of type *boolean*.
# - The expressions are called *boolean expressions* or *predicates*, named after [George Boole](https://en.wikipedia.org/wiki/George_Boole).
# - N.b., the equality operator `==` consists of *two equal signs*, different from the assignment operator `=`.
# **What is the precedence of comparison operators?**
# All the comparison operators have the [same precedence](https://docs.python.org/3/reference/expressions.html?highlight=precedence#operator-precedence) lower than that of `+` and `-`.
# In[5]:
1 + 2 >= 3 # (1 + 2) >= 3
# Python allows multiple comparison operations to be chained together:
# In[6]:
2.0 == 2>1 #equivalent to (2.0 ==2) and (2>1)
# **What is the associativity?**
# Comparison operations are [*non-associative*](https://en.wikipedia.org/wiki/Operator_associativity#Non-associative_operators):
# In[7]:
(2.0 == 2) > 1, 2.0 == (2 > 1) # not the same as 2.0 == 2 > 1
# **Errorata** in [Halterman17] due to a misunderstanding of non-associativity vs left-to-right evaluation order:
#
# - [Halterman17, p.69](https://archive.org/stream/2018Fundamentals.ofPython?ref=ol#page/n79/mode/1up):
# > The relational operators are binary operators and are all ~left associative~ **non-associative**.
# - [Halterman17, p.50, Table 3.2](https://archive.org/stream/2018Fundamentals.ofPython?ref=ol#page/n60/mode/1up):
# - `=` should be non-associative instead of right-associative.
# - The corresponding table in `Lecture2/Expressions and Arithmetic.ipynb` should also be corrected accordingly.
# **Exercise** Explain why the following boolean expressions have different values.
# In[8]:
1 <= 2 < 3 != 4, (1 <= 2) < (3 != 4)
# The second expression is not a chained comparison:
# - The expressions in the parentheses are evaluated to boolean values first to `True`, and so
# - the overall expression `True < True` is evaluated to `False`.
# **Exercise** The comparison operators can be applied to different data types, as illustrated below.
# Explain the meaning of the operators in each of the following expressions.
# In[9]:
# Comparisons beyond numbers
@interact(expression=[
'10 == 10.', '"A" == "A"', '"A" == "A "', '"A" != "a"',
'"A" > "a"', '"aBcd" < "abd"', '"A" != 64', '"A" < 64'
])
def relational_expression(expression):
print(eval(expression))
# 1. Checks whether an integer is equal to a floating point number.
# 1. Checks whether two characters are the same.
# 1. Checks whether two strings are the same. Note the space character.
# 1. Checks whether a character is larger than the order character according to their unicodes.
# 1. Checks whether a string is lexicographically smaller than the other string.
# 1. Checks whether a character is not equal to an integer.
# 1. TypeError because there is no implementation that evaluates whether a string is smaller than an integer.
# **Is `!` the same as the `not` operator?**
# **Errata** There is an error in [Halterman17, p.69](https://archive.org/stream/2018Fundamentals.ofPython?ref=ol#page/n79/mode/1up) due to confusion with C language:
# > ... `!(x >= 10)` and `!(10 <= x)` are ~equivalent~ **invalid**.
# - We can write `1 != 2` as `not 1 == 2` but not `!(1 == 2)` because
# - `!` is not a logical operator. It is used to call a [system shell command](https://ipython.readthedocs.io/en/stable/interactive/tutorial.html?highlight=system%20call#system-shell-commands) in IPython.
# In[10]:
get_ipython().system('(1 == 2)')
# In[11]:
get_ipython().system('ls # a bash command that lists files in the current directory')
# **How to compare floating point numbers?**
# In[12]:
x = 10
y = (x**(1/3))**3
x == y
# Why False? Shouldn't $(x^{\frac13})^3=x$?
# - Floating point numbers have finite precisions and so
# - we should instead check whether the numbers are close enough.
# One method of comparing floating point numbers:
# In[13]:
abs(x - y) <= 1e-9
# `abs` is a function that returns the absolute value of its argument. Hence, the above translates to
#
# $$|x - y| \leq \delta_{\text{abs}}$$
# or equivalently
#
# $$y-\delta_{\text{abs}} \leq x \leq y+\delta_{\text{abs}} $$
# where $\delta_{\text{abs}}$ is called the *absolute tolerance*.
# **Is an absolute tolerance of `1e-9` good enough?**
# What if we want to compare `x = 1e10` instead of `10`?
# In[14]:
x = 1e10
y = (x**(1/3))**3
abs(x - y) <= 1e-9
# Floating point numbers "float" at different scales.
# A better way to use the [`isclose`](https://docs.python.org/3/library/math.html#math.isclose) function from `math` module.
# In[15]:
import math
math.isclose(x, y)
# **How does it work?**
# `math.isclose(x,y)` implements
#
# $$ |x - y| \leq \max\{\delta_{\text{rel}} \max\{|x|,|y|\},\delta_{\text{abs}}\}$$
# with the default
# - *relative tolerance* $\delta_{\text{rel}}$ equal to `1e-9`, and
# - absolute tolerance $\delta_{\text{abs}}$ equal to `0.0`.
# **Exercise** Write the boolean expression implemented by `isclose`. You can use the function `max(a,b)` to find the maximum of `a` and `b`.
# In[16]:
rel_tol, abs_tol = 1e-9, 0.0
x, y = 1e-100, 2e-100
### BEGIN SOLUTION
abs(x-y) <= max(rel_tol * max(abs(x), abs(y)), abs_tol)
### END SOLUTION
# ### Boolean Operations
# Since chained comparisons are non-associative. It follows a different evaluation rule than arithmetical operators.
# E.g., `1 <= 2 < 3 != 4` is evaluated as follows:
# In[17]:
1 <= 2 and 2 < 3 and 3 != 4
# The above is called a *compound boolean expression*, which is formed using the *boolean/logical operator* `and`.
# **Why use boolean operators?**
# What if we want to check whether a number is either $< 0$ or $\geq 100$?
# Can we achieve this only by chaining the comparison operators or applying the logical `and`?
# In[18]:
# Check if a number is outside a range.
@interact(x='15')
def check_out_of_range(x):
x_ = float(x)
is_out_of_range = x_<0 or x_>=100
print('Out of range [0,100):', is_out_of_range)
# - `and` alone is not [functionally complete](https://en.wikipedia.org/wiki/Functional_completeness), i.e., not enough to give all possible boolean functions.
# - In addition to `and`, we can also use `or` and `not`.
# | `x` | `y` | `x and y` | `x or y` | `not x` |
# | :-----: | :-----: | :-------: | :------: | :-----: |
# | `True` | `True` | `True` | `True` | `False` |
# | `True` | `False` | `False` | `True` | `False` |
# | `False` | `True` | `False` | `True` | `True` |
# | `False` | `False` | `False` | `False` | `True` |
# The above table is called a *truth table*. It enumerates all possible input and output combinations for each boolean operator.
# **How are chained logical operators evaluated?
# What are the precedence and associativity for the logical operators?**
# - All binary boolean operators are left associative.
# - [Precedence](https://docs.python.org/3/reference/expressions.html?highlight=precedence#operator-precedence): `comparison operators` > `not` > `and` > `or`
#
#
#
#
# **Exercise** Explain what the values of the following two compound boolean expressions are:
# - Expression A: `True or False and True`
# - Expression B: `True and False and True`
# - Expression C: `True or True and False`
# - Expression A evaluates to `True` because `and` has higher precedence and so the expression has the same value as `True or (False and True)`.
# - Expression B evaluates to `False` because `and` is left associative and so the expression has the same value as `(True and False) and True`.
# - Expression C evaluates to `True` because `and` has a higher precedence and so the expression has the same value as `True or (True and False)`. Note that `(True or True) and False` evaluates to something `False` instead, so precedence matters.
# Instead of following the precedence and associativity, however, a compound boolean expression uses a [short-circuit evaluation](https://docs.python.org/3/reference/expressions.html?highlight=precedence#boolean-operations).
# To understand this, we will use the following function to evaluate a boolean expression verbosely.
# In[19]:
def verbose(id,boolean):
'''Identify evaluated boolean expressions.'''
print(id,'evaluated:',boolean)
return boolean
# In[20]:
verbose('A',verbose(1,True) or verbose(2,False) and verbose(3,True)) # True or (False and True)
# **Why expression 2 and 3 are not evaluated?**
# Because True or ... must be True (Why?) so Python does not look further. From the [documentation](https://docs.python.org/3/reference/expressions.html?highlight=precedence#boolean-operations):
# > The expression `x or y` first evaluates `x`; if `x` is true, its value is returned; otherwise, `y` is evaluated and the resulting value is returned.
# Note that:
# - Even though `or` has lower precedence than `and`, it is still evaluated first.
# - The evaluation order for logical operators is left-to-right.
# In[21]:
verbose('B',verbose(4,True) and verbose(5,False) and verbose(6,True)) # (True and False) and True
# **Why expression 6 is not evaluated?**
# `True and False and ...` must be `False` so Python does not look further.
# > The expression `x and y` first evaluates `x`; if `x` is false, its value is returned; otherwise, `y` is evaluated and the resulting value is returned.
# Indeed, logical operators can even be applied to non-boolean operands. From the [documentation](https://docs.python.org/3/reference/expressions.html?highlight=precedence#boolean-operations):
# > In the context of Boolean operations, and also when expressions are used by control flow statements, the following values are interpreted as false: `False`, None, numeric zero of all types, and empty strings and containers (including strings, tuples, lists, dictionaries, sets and frozensets). All other values are interpreted as true.
# **Exercise** How does the following code work?
# In[22]:
print('You have entered', input() or 'nothing')
# - The code replaces empty user input by the default string `nothing` because empty string is regarded as False in a boolean operation.
# - If user input is non-empty, it is regarded as True in the boolean expression and returned immediately as the value of the boolean operation.
# **Is empty string equal to False?**
# In[23]:
print('Is empty string equal False?',''==False)
# - An empty string is regarded as False in a boolean operation but
# - a *comparison operation is not a boolean operation*, even though it forms a boolean expression.
# ## Conditional Constructs
# Consider writing a program that sorts values in *ascending* order.
# A *sorting algorithm* refers to the procedure of sorting values in order.
# ### If-Then Construct
# **How to sort two values?**
# Given two values are stored as `x` and `y`, we want to
# - `print(x,y)` if `x <= y`, and
# - `print(y,x)` if `y < x`.
# Such a program flow is often represented by a flowchart like the following:
# <img src="https://www.cs.cityu.edu.hk/~ccha23/cs1302/Lecture3/sort_two_values1.svg" style="max-width:300px;" alt="sort_two_values(x,y);
# if(x<=y) {
# print(x, y)
# }
# if (y<x) {
# print(y, x)
# }">
# Python provides the [`if` statement](https://docs.python.org/3/reference/compound_stmts.html#the-if-statement) to implement the above [*control flow*](https://en.wikipedia.org/wiki/Control_flow) specified by the diamonds.
# In[24]:
# Sort two values using if statement
def sort_two_values(x, y):
if x <= y:
print(x, y)
if y < x: print(y, x)
@interact(x='1', y='0')
def sort_two_values_app(x, y):
sort_two_values(eval(x), eval(y))
# We can visualize the execution as follows:
# In[25]:
get_ipython().run_cell_magic('mytutor', '-h 350', 'def sort_two_values(x, y):\n if x <= y:\n print(x, y)\n if y < x: print(y, x)\n \nsort_two_values(1,0)\nsort_two_values(1,2)')
# Python use indentation to indicate code blocks or *suite*:
# - `print(x, y)` (Line 5) is indented to the right of `if x <= y:` (Line 4) to indicate it is the body of the if statement.
# - For convenience, `if y < x: print(y, x)` (Line 6) is a one-liner for an `if` statement that only has one line in its block.
# - Both `if` statements (Line 4-6) are indented to the right of `def sort_two_values(x,y):` (Line 3) to indicate that they are part of the body of the function `sort_two_values`.
# **How to indent?**
# - The [style guide](https://www.python.org/dev/peps/pep-0008/#indentation) recommends using 4 spaces for each indentation.
# - In IPython, you can simply type the `tab` key and IPython will likely enter the correct number of spaces for you.
# **What if you want to leave a block empty?**
# In programming, it is often useful to delay detailed implementations until we have written an overall skeleton.
# To leave a block empty, Python uses the keyword [`pass`](https://docs.python.org/3/tutorial/controlflow.html#pass-statements).
# In[26]:
# write a code skeleton
def sort_two_values(x, y):
pass
# print the smaller value first followed by the larger one
sort_two_values(1,0)
sort_two_values(1,2)
# Without `pass`, the code will fail to run, preventing you from checking other parts of the code.
# In[27]:
# You can add more details to the skeleton step-by-step
def sort_two_values(x, y):
if x <= y:
pass
# print x before y
if y < x: pass # print y before x
sort_two_values(1,0)
sort_two_values(1,2)
# ### If-Then-Else Construct
# The sorting algorithm is not efficient enough. Why not?
# Hint: `(x <= y) and not (y < x)` is a *tautology*, i.e., always true.
# To improve the efficient, we should implement the following program flow.
# <img src="https://www.cs.cityu.edu.hk/~ccha23/cs1302/Lecture3/sort_two_values2.svg" style="max-width:300px;" alt="sort_two_values(x,y);
# if(x<=y) {
# print(x, y)
# }
# else {
# print(y, x)
# }">
# This can be down by the `else` clause of the [`if` statement](https://docs.python.org/3/tutorial/controlflow.html#if-statements).
# In[28]:
get_ipython().run_cell_magic('mytutor', '-h 350', 'def sort_two_values(x, y):\n if x <= y:\n print(x, y)\n else:\n print(y,x)\n \nsort_two_values(1,0)\nsort_two_values(1,2)')
# We can also use a [*conditional expression*](https://docs.python.org/3/reference/expressions.html#conditional-expressions) to shorten the code.
# In[29]:
def sort_two_values(x, y):
print(('{0} {1}' if x <= y else '{1} {0}').format(x, y))
@interact(x='1', y='0')
def sort_two_values_app(x, y):
sort_two_values(eval(x), eval(y))
# **Exercise** Explain why the followings have syntax errors.
# In[30]:
1 if True
# In[31]:
x = 1 if True else x = 0
# A conditional expression must be an expression:
# 1. It must give a value under all cases. To enforce that, `else` keyword must be provided.
# 1. An assignment statement does not return any value and therefore cannot be used for the conditional expression.
# `x = 1 if True else 0` is valid because `x =` is not part of the conditional expression.
# ### Nested Conditionals
# Consider sorting three values instead of two. A feasible algorithm is as follows:
# <img src="https://www.cs.cityu.edu.hk/~ccha23/cs1302/Lecture3/sort_three_values1.svg" style="max-width:800px;" alt="sort_three_values(x,y,z);
# if(x<=y<=z) {
# print(x, y, z)
# } else
# if (x<=z<=y) {
# print(x, z, y)
# } else
# if (y<=x<=z) {
# print(y, x, z)
# } else
# if (y<=z<=x) {
# print(y, z, x)
# } else
# if (z<=x<=y) {
# print(z, x, y)
# } else {
# print(z, y, x)
# }">
# We can implement the flow using *nested conditional constructs*:
# In[32]:
def sort_three_values(x, y, z):
if x <= y <= z:
print(x, y, z)
else:
if x <= z <= y:
print(x, z, y)
else:
if y <= x <= z:
print(y, x, z)
else:
if y <= z <= x:
print(y, z, x)
else:
if z <= x <= y:
print(z, x, y)
else:
print(z, y, x)
def test_sort_three_values():
sort_three_values(0,1,2)
sort_three_values(0,2,1)
sort_three_values(1,0,2)
sort_three_values(1,2,0)
sort_three_values(2,0,1)
sort_three_values(2,1,0)
test_sort_three_values()
# Imagine what would happen if we have to sort many values.
# To avoid an excessively long line due to the indentation, Python provides the `elif` keyword that combines `else` and `if`.
# In[33]:
def sort_three_values(x, y, z):
if x <= y <= z:
print(x, y, z)
elif x <= z <= y:
print(x, z, y)
elif y <= x <= z:
print(y, x, z)
elif y <= z <= x:
print(y, z, x)
elif z <= x <= y:
print(z, x, y)
else:
print(z, y, x)
test_sort_three_values()
# **Exercise** The above sorting algorithm is inefficient because some conditions may be checked more than once.
# Improve the program to eliminate duplicate checks.
# *Hint:* Do not use chained comparison operators or compound boolean expressions.
# In[34]:
def sort_three_values(x, y, z):
if x <= y:
if y <= z:
print(x, y, z)
elif x <= z:
print(x, z, y)
else:
print(z, x, y)
### BEGIN SOLUTION
elif z <= y:
print(z, y, x)
elif z <= x:
print(y, z, x)
else:
print(y, x, z)
### END SOLUTION
sort_three_values(10,17,14)
|
ccha23/CS1302ICP
|
_build/jupyter_execute/Lecture4/Writing Functions.py
|
#!/usr/bin/env python
# coding: utf-8
# # Writing Function
# **CS1302 Introduction to Computer Programming**
# ___
# In[1]:
get_ipython().run_line_magic('reload_ext', 'mytutor')
# ## Function Definition
# **How to write a function?**
# A function is defined using the [`def` keyword](https://docs.python.org/3/reference/compound_stmts.html#def):
# The following is a simple function that prints "Hello, World!".
# In[2]:
# Function definition
def say_hello():
print('Hello, World!')
# In[3]:
# Function invocation
say_hello()
# To make a function more powerful and solve different problems,
# we can
# - use a [return statement](https://docs.python.org/3/reference/simple_stmts.html#the-return-statement) to return a value that
# - depends on some input arguments.
# In[4]:
def increment(x):
return x + 1
increment(3)
# We can also have multiple input arguments.
# In[5]:
def length_of_hypotenuse(a, b):
if a >= 0 and b >= 0:
return (a**2 + b**2)**0.5
else:
print('Input arguments must be non-negative.')
# In[6]:
length_of_hypotenuse(3, 4)
# In[7]:
length_of_hypotenuse(-3, 4)
# ## Documentation
# **How to document a function?**
# In[8]:
# Author: <NAME>
# Last modified: 2020-09-14
def increment(x):
'''The function takes in a value x and returns the increment x + 1.
It is a simple example that demonstrates the idea of
- parameter passing,
- return statement, and
- function documentation.'''
return x + 1 # + operation is used and may fail for 'str'
# The `help` command shows the docstring we write
# - at beginning of the function body
# - delimited using triple single/double quotes.
# In[9]:
help(increment)
# The docstring should contain the *usage guide*, i.e., information for new users to call the function properly.
# There is a Python style guide (PEP 257) for
# - [one-line docstrings](https://www.python.org/dev/peps/pep-0257/#one-line-docstrings) and
# - [multi-line docstrings](https://www.python.org/dev/peps/pep-0257/#multi-line-docstrings).
# **Why doesn't `help` show the comments that start with `#`?**
# ```Python
# # Author: <NAME>
# # Last modified: 2020-09-14
# def increment(x):
# ...
# return x + 1 # + operation is used and may fail for 'str'
# ```
# Those comments are not usage guide. They are intended for programmers who need to maintain/extend the function definition.
# - Information about the author and modification date facilitate communications among programmers.
# - Comments within the code help explain important and not-so-obvious implementation details.
# **How to let user know the data types of input arguments and return value?**
# We can [annotate](https://docs.python.org/3/library/typing.html) the function with *hints* of the types of the arguments and return value.
# In[10]:
# Author: <NAME>
# Last modified: 2020-09-14
def increment(x: float) -> float:
'''The function takes in a value x and returns the increment x + 1.
It is a simple example that demonstrates the idea of
- parameter passing,
- return statement, and
- function documentation.'''
return x + 1 # + operation is used and may fail for 'str'
help(increment)
# The above annotations is not enforced by the Python interpreter.
# Nevertheless, such annotations make the code easier to understand and can be used by editor with type-checking tools.
# In[11]:
def increment_user_input():
return increment(input()) # does not raise error even though input returns str
# In[12]:
increment_user_input() # still lead to runtime error
# ## Parameter Passing
# **Can we increment a variable instead of returning its increment?**
# In[13]:
def increment(x):
x += 1
# In[14]:
x = 3
increment(x)
print(x) # 4?
# Does the above code increment `x`?
# In[15]:
get_ipython().run_cell_magic('mytutor', '-h 350', 'def increment(x):\n x += 1\n\n\nx = 3\nincrement(x)\nprint(x)')
# - Step 3: The function `increment` is invoked with the argument evaluated to the value of `x`.
# - Step 3-4: A local frame is created for variables local to `increment` during its execution.
# - The *formal parameter* `x` in `def increment(x):` becomes a local variable and
# - it is assigned the value `3` of the *actual parameter* given by the global variable `x`.
# - Step 5-6: The local (but not the global) variable `x` is incremented.
# - Step 6-7: The function call completes and the local frame is removed.
|
ccha23/CS1302ICP
|
_build/jupyter_execute/Lecture4/Using Functions.py
|
<gh_stars>1-10
#!/usr/bin/env python
# coding: utf-8
# # Using Functions
# **CS1302 Introduction to Computer Programming**
# ___
# In[1]:
get_ipython().run_line_magic('reload_ext', 'mytutor')
# ## Motivation
# **How to reuse code so we can write less?**
# When we write a loop, the code is executed multiple times, once for each iteration.
# This is a simple form of *code reuse* that
# - gives your code an elegant *structure* that
# - can be executed efficiently by a computer, and
# - *interpreted* easily by a programmer.
# **How to repeat execution at different times, in different programs, and in slightly different ways?**
# ## Functions
# **How to calculate the logarithm?**
# There is no arithmetic operator for logarithm.
# Do we have to implement it ourselves?
# We can use the function `log` from the [`math` *module*](https://docs.python.org/3/library/math.html):
# In[2]:
from math import log
log(256, 2) # log base 2 of 256
# The above computes the base-$2$ logarithm, $\log_2(256)$. Like functions in mathematics, a computer function `log`
# - is *called/invoked* with some input *arguments* `(256, 2)` following the function, and
# - *returns* an output value computed from the input arguments.
# In[3]:
# A function is callable while an integer is not
callable(log), callable(1)
# Unlike mathematical functions:
# - A computer function may require no arguments, but we still need to call it with `()`.
# In[4]:
input()
# - A computer function may have side effects and return `None`.
# In[5]:
x = print()
print(x, 'of type', type(x))
# An argument of a function call can be any expression.
# In[6]:
print('1st input:', input(), '2nd input', input())
# Note also that
# - the argument can also be a function call like function composition in mathematics.
# - Before a function call is executed, its arguments are evaluated first from left to right.
# **Why not implement logarithm yourself?**
# - The function from standard library is efficiently implemented and thoroughly tested/documented.
# - Knowing what a function does is often insufficient for an efficient implementation.
# (See [how to calculate logarithm](https://en.wikipedia.org/wiki/Logarithm#Calculation) as an example.)
# Indeed, the `math` library does not implement `log` itself:
# > **CPython implementation detail:** The `math` module consists mostly of thin *wrappers* around the platform C math library functions. - [pydoc last paragraph](https://docs.python.org/3/library/math.html)
#
# (See the [source code wrapper for `log`](https://github.com/python/cpython/blob/457d4e97de0369bc786e363cb53c7ef3276fdfcd/Modules/mathmodule.c#L731).)
# **Exercise** What is a function in programming?
# - A function is a structure that allows a piece of code to be reused in a program.
# - A function can adapt its computations to different situations using input arguments.
# ## Import Functions from Modules
# **How to import functions?**
# We can use the [`import` statement](https://docs.python.org/3/reference/simple_stmts.html#import) to import multiple functions into the program *global frame*.
# In[7]:
get_ipython().run_cell_magic('mytutor', '-h 300', "from math import log10, ceil\nx = 1234\nprint('Number of digits of x:', ceil(log10(x)))")
# The above import both the functions `log10` and `ceil` from `math` to compute the number $\lceil \log_{10}(x)\rceil$ of digits of a *strictly positive* integer $x$.
# **How to import all functions from a library?**
# In[8]:
get_ipython().run_cell_magic('mytutor', '-h 300', "from math import * # import all except names starting with an underscore\nprint('{:.2f}, {:.2f}, {:.2f}'.format(sin(pi / 6), cos(pi / 3), tan(pi / 4)))")
# The above uses the wildcard `*` to import ([nearly](https://docs.python.org/3/tutorial/modules.html#more-on-modules)) all the functions/variables provided in `math`.
# **What if different packages define the same function?**
# In[9]:
get_ipython().run_cell_magic('mytutor', '-h 300', "print('{}'.format(pow(-1, 2)))\nprint('{:.2f}'.format(pow(-1, 1 / 2)))\nfrom math import *\nprint('{}'.format(pow(-1, 2)))\nprint('{:.2f}'.format(pow(-1, 1 / 2)))")
# - The function `pow` imported from `math` overwrites the built-in function `pow`.
# - Unlike the built-in function, `pow` from `math` returns only floats but not integers nor complex numbers.
# - We say that the import statement *polluted the namespace of the global frame* and caused a *name collision*.
# **How to avoid name collisions?**
# In[10]:
get_ipython().run_cell_magic('mytutor', '-h 250', "import math\nprint('{:.2f}, {:.2f}'.format(math.pow(-1, 2), pow(-1, 1 / 2)))")
# We can use the full name (*fully-qualified name*) `math.pow` prefixed with the module name (and possibly package names containing the module).
# **Can we shorten a name?**
# The name of a library can be very long and there can be a hierarchical structure as well.
# E.g., to plot a sequence using `pyplot` module from `matplotlib` package:
# In[11]:
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot
matplotlib.pyplot.stem([4, 3, 2, 1])
matplotlib.pyplot.ylabel(r'$x_n$')
matplotlib.pyplot.xlabel(r'$n$')
matplotlib.pyplot.title('A sequence of numbers')
matplotlib.pyplot.show()
# It is common to rename `matplotlib.pyplot` as `plt`:
# In[12]:
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
plt.stem([4, 3, 2, 1])
plt.ylabel(r'$x_n$')
plt.xlabel(r'$n$')
plt.title('A sequence of numbers')
plt.show()
# We can also rename a function as we import it to avoid name collision:
# In[13]:
from math import pow as fpow
fpow(2, 2), pow(2, 2)
# **Exercise** What is wrong with the following code?
# In[14]:
import math as m
for m in range(5): m.pow(m, 2)
# There is a name collision: `m` is assigned to an integer in the for loop and so it is no longer the module `math` when calling `m.pow`.
# **Exercise** Use the `randint` function from `random` to simulate the rolling of a die, by printing a random integer from 1 to 6.
# In[15]:
import random
print(random.randint(1, 6))
# ## Built-in Functions
# **How to learn more about a function such as `randint`?**
# There is a built-in function `help` for showing the *docstring* (documentation string).
# In[16]:
import random
help(random.randint) # random must be imported before
# In[17]:
help(random) # can also show the docstring of a module
# In[18]:
help(help)
# **Does built-in functions belong to a module?**
# Indeed, every function must come from a module.
# In[19]:
__builtin__.print('I am from the __builtin__ module.')
# `__builtin__` module is automatically loaded because it provides functions that are commonly use for all programs.
# **How to list everything in a module?**
# We can use the built-in function `dir` (*directory*).
# In[20]:
dir(__builtin__)
# We can also call `dir` without arguments.
# What does it print?
# In[21]:
dir()
|
ccha23/CS1302ICP
|
_build/jupyter_execute/Lab3b/Big Number Conversion.py
|
<filename>_build/jupyter_execute/Lab3b/Big Number Conversion.py
#!/usr/bin/env python
# coding: utf-8
# # Big Number Conversion
# **CS1302 Introduction to Computer Programming**
# ___
# ## Conversion to Decimal
# In this notebook, we will use iterations to convert numbers with arbitrary size.
# ### Binary-to-Decimal
# In a previous lab, we considered converting a byte string to decimal.
# What about converting a binary string of arbitrary length to decimal?
# Given a binary string of an arbitrarily length $k$,
#
# $$
# b_{k-1}\circ \dots \circ b_1\circ b_0,
# $$
# the decimal number can be computed by the formula
#
# $$
# 2^0 \cdot b_0 + 2^1 \cdot b_1 + \dots + 2^{k-1} \cdot b_{k-1}.
# $$
# In mathematics, we use the summation notation to write the above formula:
#
# $$
# \sum_{i=0}^{k-1} 2^i \cdot b_{i}.
# $$
# In a program, the formula can be implemented as a for loop:
# ```Python
# def binary_to_decimal(binary_str):
# k = len(binary_str)
# decimal = 0 # initialization
# for i in range(k):
# decimal += 2**i * int(binary_str[(k-1)-i]) # iteration
# return decimal
# ```
# Note that $b_i$ is given by `binary_str[(k-1)-i]`:
#
# $$
# \begin{array}{c|c:c:c:c|}\texttt{binary_str} & b_{k-1} & b_{k-2} & \dots & b_0\\ \text{indexing} & [0] & [1] & \dots & [k-1] \end{array}
# $$
# The following is another way to write the for loop.
# ```Python
# def binary_to_decimal(binary_str):
# decimal = 0 # initialization
# for bit in binary_str:
# decimal = decimal * 2 + int(bit) # iteration
# return decimal
# ```
# The algorithm implements the same formula factorized as follows:
#
# $$
# \begin{aligned} \sum_{i=0}^{k-1} 2^i \cdot b_{i}
# &= \left(\sum_{i=1}^{k-1} 2^i \cdot b_{i}\right) + b_0\\
# &= \left(\sum_{i=1}^{k-1} 2^{i-1} \cdot b_{i}\right)\times 2 + b_0 \\
# &= \left(\sum_{j=0}^{k-2} 2^{j} \cdot b_{j+1}\right)\times 2 + b_0 && \text{with $j=i-1$} \\
# &= \underbrace{(\dots (\underbrace{(\underbrace{\overbrace{0}^{\text{initialization}\kern-2em}\times 2 + b_{k-1}}_{\text{first iteration} }) \times 2 + b_{k-2}}_{\text{second iteration} }) \dots )\times 2 + b_0}_{\text{last iteration} }.\end{aligned}
# $$
# **Exercise** Complete the code for `binary_to_decimal` with the most efficient implementation you can think of.
# (You can choose one of the two implementations above but take the time to type in the code instead of copy-and-paste.)
# In[ ]:
def binary_to_decimal(binary_str):
# YOUR CODE HERE
raise NotImplementedError()
return decimal
# In[ ]:
# tests
import numpy as np
def test_binary_to_decimal(decimal, binary_str):
decimal_ = binary_to_decimal(binary_str)
correct = isinstance(decimal_, int) and decimal_ == decimal
if not correct:
print(f'{binary_str} should give {decimal} not {decimal_}.')
assert correct
test_binary_to_decimal(0, '0')
test_binary_to_decimal(255, '11111111')
test_binary_to_decimal(52154, '1100101110111010')
test_binary_to_decimal(3430, '110101100110')
# In[ ]:
# binary-to-decimal converter
from ipywidgets import interact
bits = ['0', '1']
@interact(binary_str='1011')
def convert_byte_to_decimal(binary_str):
for bit in binary_str:
if bit not in bits:
print('Not a binary string.')
break
else:
print('decimal:', binary_to_decimal(binary_str))
# ### Undecimal-to-Decimal
# A base-11 number system is called an [undecimal system](https://en.wikipedia.org/wiki/Undecimal). The digits range from 0 to 10 with 10 denoted as X:
#
# $$
# 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, X.
# $$
#
# The [International Standard Book Number (ISBN)](https://en.wikipedia.org/wiki/International_Standard_Book_Number) uses an undecimal digit.
# **Exercise** In the following code, assign to `decimal` the integer represented by an undecimal string of arbitrary length.
# *Hint:* Write a conditional to
# 1. check if a digit is (capital) `'X'`, and if so,
# 2. convert the digit to the integer value 10.
# In[ ]:
def undecimal_to_decimal(undecimal_str):
# YOUR CODE HERE
raise NotImplementedError()
return decimal
# In[ ]:
# tests
def test_undecimal_to_decimal(decimal, undecimal_str):
decimal_ = undecimal_to_decimal(undecimal_str)
correct = isinstance(decimal_, int) and decimal_ == decimal
if not correct:
print(f'{undecimal_str} should give {decimal} not {decimal_}.')
assert correct
test_undecimal_to_decimal(27558279079916281, '6662X0X584839464')
test_undecimal_to_decimal(23022771839270, '73769X2556695')
test_undecimal_to_decimal(161804347284488, '476129248X2067')
# In[ ]:
# undecimal-to-decimal calculator
from ipywidgets import interact
undecimal_digits = [str(i) for i in range(10)] + ['X']
@interact(undecimal_str='X')
def convert_undecimal_to_decimal(undecimal_str):
for digit in undecimal_str:
if digit not in undecimal_digits:
print('Not an undecimal string.')
break
else:
print('decimal:', undecimal_to_decimal(undecimal_str))
# ## Conversion from Decimal
# Consider the reverse process that converts a non-negative decimal number of arbitrary size to a string representation in another number system.
# ### Decimal-to-Binary
# The following code converts a decimal number to a binary string.
# ```Python
# def decimal_to_binary(decimal):
# binary_str = str(decimal % 2)
# while decimal // 2:
# decimal //= 2
# binary_str = str(decimal % 2) + binary_str
# return binary_str
# ```
# To understand the while loop, consider the same formula before, where the braces indicate the value of `decimal` at different times:
#
# $$
# \begin{aligned} \sum_{i=0}^{k-1} 2^i \cdot b_{i} &= \left(\sum_{i=0}^{k-2} 2^{i-2} \cdot b_{i-1}\right)\times 2 + b_0 \\
# &= \underbrace{(\underbrace{ \dots (\underbrace{(0\times 2 + b_{k-1}) \times 2 + b_{k-2}}_{\text{right before the last iteration} } )\times 2 \dots + b_1}_{\text{right before the second iteration} })\times 2 + b_0}_{\text{right before the first iteration} }.\end{aligned}
# $$
# - $b_0$ is the remainder `decimal % 2` right before the first iteration,
# - $b_1$ is the remainder `decimal // 2 % 2` right before the second iteration, and
# - $b_{k-1}$ is the remainder `decimal // 2 % 2` right before the last iteration.
# We can also write a for loop instead of a while loop:
# In[ ]:
from math import floor, log2
def decimal_to_binary(decimal):
binary_str = ''
num_bits = 1 + (decimal and floor(log2(decimal)))
for i in range(num_bits):
binary_str = str(decimal % 2) + binary_str
decimal //= 2
return binary_str
# In[ ]:
# decimal-to-binary calculator
@interact(decimal='11')
def convert_decimal_to_binary(decimal):
if not decimal.isdigit():
print('Not a non-negative integer.')
else:
print('binary:', decimal_to_binary(int(decimal)))
# **Exercise** Explain what the expression `1 + (decimal and floor(log2(decimal)))` calculates. In particular, explain the purpose of the logical `and` operation in the expression?
# YOUR ANSWER HERE
# ### Decimal-to-Undecimal
# **Exercise** Assign to `undecimal_str` the undecimal string that represents a non-negative integer `decimal` of any size.
# *Hint:* For loop or while loop?
# In[ ]:
def decimal_to_undecimal(decimal):
# YOUR CODE HERE
raise NotImplementedError()
return undecimal_str
# In[ ]:
# tests
def test_decimal_to_undecimal(undecimal,decimal):
undecimal_ = decimal_to_undecimal(decimal)
correct = isinstance(undecimal, str) and undecimal == undecimal_
if not correct:
print(
f'{decimal} should be represented as the undecimal string {undecimal}, not {undecimal_}.'
)
assert correct
test_decimal_to_undecimal('X', 10)
test_decimal_to_undecimal('0', 0)
test_decimal_to_undecimal('1752572309X478', 57983478668530)
# In[ ]:
# undecimal-to-decimal calculator
from ipywidgets import interact
@interact(decimal='10')
def convert_decimal_to_undecimal(decimal):
if not decimal.isdigit():
print('Not a non-negative integer.')
else:
print('undecimal:', decimal_to_undecimal(int(decimal)))
|
ccha23/CS1302ICP
|
_build/jupyter_execute/Lecture2/Expressions and Arithmetic.py
|
<gh_stars>1-10
#!/usr/bin/env python
# coding: utf-8
# # Expressions and Arithmetic
# **CS1302 Introduction to Computer Programming**
# ___
# ## Operators
# The followings are common operators you can use to form an expression in Python:
# | Operator | Operation | Example |
# | --------: | :------------- | :-----: |
# | unary `-` | Negation | `-y` |
# | `+` | Addition | `x + y` |
# | `-` | Subtraction | `x - y` |
# | `*` | Multiplication | `x*y` |
# | `/` | Division | `x/y` |
# - `x` and `y` in the examples are called the *left and right operands* respectively.
# - The first operator is a *unary operator*, which operates on just one operand.
# (`+` can also be used as a unary operator, but that is not useful.)
# - All other operators are *binary operators*, which operate on two operands.
# Python also supports some more operators such as the followings:
# | Operator | Operation | Example |
# | -------: | :--------------- | :-----: |
# | `//` | Integer division | `x//y` |
# | `%` | Modulo | `x%y` |
# | `**` | Exponentiation | `x**y` |
# In[1]:
# ipywidgets to demonstrate the operations of binary operators
from ipywidgets import interact
binary_operators = {'+':' + ','-':' - ','*':'*','/':'/','//':'//','%':'%','**':'**'}
@interact(operand1=r'10',
operator=binary_operators,
operand2=r'3')
def binary_operation(operand1,operator,operand2):
expression = f"{operand1}{operator}{operand2}"
value = eval(expression)
print(f"""{'Expression:':>11} {expression}\n{'Value:':>11} {value}\n{'Type:':>11} {type(value)}""")
# **Exercise** What is the difference between `/` and `//`?
# - `/` is the usual division, and so `10/3` returns the floating-point number $3.\dot{3}$.
# - `//` is integer division, and so `10//3` gives the integer quotient 3.
# **What does the modulo operator `%` do?**
# You can think of it as computing the remainder, but the [truth](https://docs.python.org/3/reference/expressions.html#binary-arithmetic-operations) is more complicated than required for the course.
# **Exercise** What does `'abc' * 3` mean? What about `10 * 'a'`?
# - The first expression means concatenating `'abc'` three times.
# - The second means concatenating `'a'` ten times.
# **Exercise** How can you change the default operands (`10` and `3`) for different operators so that the overall expression has type `float`.
# Do you need to change all the operands to `float`?
# - `/` already returns a `float`.
# - For all other operators, changing at least one of the operands to `float` will return a `float`.
# ## Operator Precedence and Associativity
# An expression can consist of a sequence of operations performed in a row such as `x + y*z`.
# **How to determine which operation should be performed first?**
# Like arithmetics, the order of operations is decided based on the following rules applied sequentially:
# 1. *grouping* by parentheses: inner grouping first
# 1. operator *precedence/priority*: higher precedence first
# 1. operator *associativity*:
# - left associativity: left operand first
# - right associativity: right operand first
# **What are the operator precedence and associativity?**
# The following table gives a concise summary:
# | Operators | Associativity |
# | :--------------- | :-----------: |
# | `**` | right |
# | `-` (unary) | right |
# | `*`,`/`,`//`,`%` | left |
# | `+`,`-` | left |
# **Exercise** Play with the following widget to understand the precedence and associativity of different operators.
# In particular, explain whether the expression `-10 ** 2*3` gives $(-10)^{2\times 3}= 10^6 = 1000000$.
# In[2]:
from ipywidgets import fixed
@interact(operator1={'None':'','unary -':'-'},
operand1=fixed(r'10'),
operator2=binary_operators,
operand2=fixed(r'2'),
operator3=binary_operators,
operand3=fixed(r'3')
)
def three_operators(operator1,operand1,operator2,operand2,operator3,operand3):
expression = f"{operator1}{operand1}{operator2}{operand2}{operator3}{operand3}"
value = eval(expression)
print(f"""{'Expression:':>11} {expression}\n{'Value:':>11} {value}\n{'Type:':>11} {type(value)}""")
# The expression evaluates to $(-(10^2))\times 3=-300$ instead because the exponentiation operator `**` has higher precedence than both the multiplication `*` and the negation operators `-`.
# **Exercise** To avoid confusion in the order of operations, we should follow the [style guide](https://www.python.org/dev/peps/pep-0008/#other-recommendations) when writing expression.
# What is the proper way to write `-10 ** 2*3`?
# In[3]:
print(-10**2 * 3) # can use use code-prettify extension to fix incorrect styles
print((-10)**2 * 3)
# ## Augmented Assignment Operators
# - For convenience, Python defines the [augmented assignment operators](https://docs.python.org/3/reference/simple_stmts.html#grammar-token-augmented-assignment-stmt) such as `+=`, where
# - `x += 1` means `x = x + 1`.
# The following widgets demonstrate other augmented assignment operators.
# In[4]:
from ipywidgets import interact, fixed
@interact(initial_value=fixed(r'10'),
operator=['+=','-=','*=','/=','//=','%=','**='],
operand=fixed(r'2'))
def binary_operation(initial_value,operator,operand):
assignment = f"x = {initial_value}\nx {operator} {operand}"
_locals = {}
exec(assignment,None,_locals)
print(f"""Assignments:\n{assignment:>10}\nx: {_locals['x']} ({type(_locals['x'])})""")
# **Exercise** Can we create an expression using (augmented) assignment operators? Try running the code to see the effect.
# In[62]:
3*(x = 15)
# Assignment operators are used in assignment statements, which are not expressions because they cannot be evaluated.
|
ccha23/CS1302ICP
|
_build/jupyter_execute/Lecture5/Objects.py
|
#!/usr/bin/env python
# coding: utf-8
# # Objects
# **CS1302 Introduction to Computer Programming**
# ___
# In[1]:
get_ipython().run_line_magic('reload_ext', 'mytutor')
# ## Object-Oriented Programming
# **Why object-oriented programming?**
# In[2]:
import jupyter_manim
from manimlib.imports import *
# In[3]:
get_ipython().run_cell_magic('manim', 'HelloWorld -l', "class HelloWorld(Scene):\n def construct(self):\n self.play(Write(TextMobject('Hello, World!')))")
# - `HelloWorld` is a specific `Scene` that is
# - `construct`ed by `play`ing an animation that `Write`
# - the `TextMobject` of the message `'Hello, World!'`.
# **Exercise** Try changing
# - Mobjects: `TextMobject('Hello, World!')` to `TexMobject(r'E=mc^2')` or `Circle()` or `Square()`.
# - Animation objects: `Write` to `FadeIn` or `GrowFromCenter`.
#
# See the [documentation](https://eulertour.com/docs/) for other choices.
# More complicated behavior can be achieved by using different objects.
# In[4]:
get_ipython().run_cell_magic('html', '', '<iframe width="912" height="513" src="https://www.youtube.com/embed/ENMyFGmq5OA" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>')
# **What is an object?**
# Almost everything is an [`object`](https://docs.python.org/3/library/functions.html?highlight=object#object) in Python.
# In[5]:
get_ipython().run_line_magic('pinfo', 'isinstance')
isinstance(1, object), isinstance(1.0, object), isinstance('1', object)
# A function is also a [first-class](https://en.wikipedia.org/wiki/First-class_function) object object.
# In[6]:
isinstance(print, object), isinstance(''.isdigit, object)
# A data type is also an object.
# In[7]:
# chicken and egg relationship
isinstance(type, object), isinstance(object, type), isinstance(object, object)
# Python is a [*class-based* object-oriented programming](https://en.wikipedia.org/wiki/Object-oriented_programming#Class-based_vs_prototype-based) language:
# - Each object is an instance of a *class* (also called type in Python).
# - An object is a collection of *members/attributes*, each of which is an object.
# In[8]:
get_ipython().run_line_magic('pinfo', 'hasattr')
hasattr(str, 'isdigit')
# Different objects of a class
# - have the same set of attributes as that of the class, but
# - the attribute values can be different.
# In[9]:
get_ipython().run_line_magic('pinfo', 'dir')
dir(1)==dir(int), complex(1, 2).imag != complex(1, 1).imag
# **How to operate on an object?**
# - A class can define a function as an attribute for all its instances.
# - Such a function is called a *method* or *member function*.
# In[10]:
complex.conjugate(complex(1, 2)), type(complex.conjugate)
# A [method](https://docs.python.org/3/tutorial/classes.html#method-objects) can be accessed by objects of the class:
# In[11]:
complex(1, 2).conjugate(), type(complex(1, 2).conjugate)
# `complex(1,2).conjugate` is a *callable* object:
# - Its attribute `__self__` is assigned to `complex(1,2)`.
# - When called, it passes `__self__` as the first argument to `complex.conjugate`.
# In[12]:
callable(complex(1,2).conjugate), complex(1,2).conjugate.__self__
# ## File Objects
# **How to read a text file?**
# Consider reading a csv (comma separated value) file:
# In[13]:
get_ipython().system("more 'contact.csv'")
# To read the file by a Python program:
# In[14]:
f = open('contact.csv') # create a file object for reading
print(f.read()) # return the entire content
f.close() # close the file
# 1. [`open`](https://docs.python.org/3/library/functions.html?highlight=open#open) is a function that creates a file object and assigns it to `f`.
# 1. Associated with the file object,
# - [`read`](https://docs.python.org/3/library/io.html#io.TextIOBase.read) returns the entire content of the file as a string.
# - [`close`](https://docs.python.org/3/library/io.html#io.IOBase.close) flushes and closes the file.
# **Why close a file?**
# If not, depending on the operating system,
# - other programs may not be able to access the file, and
# - changes may not be written to the file.
# To ensure a file is closed properly, we can use the [`with` statement](https://docs.python.org/3/reference/compound_stmts.html#with):
# In[15]:
with open('contact.csv') as f:
print(f.read())
# The `with` statement applies to any [context manager](https://docs.python.org/3/reference/datamodel.html#context-managers) that provides the methods
# - `__enter__` for initialization, and
# - `__exit__` for finalization.
# In[16]:
with open('contact.csv') as f:
print(f, hasattr(f, '__enter__'), hasattr(f, '__exit__'), sep='\n')
# - `f.__enter__` is called after the file object is successfully created and assigned to `f`, and
# - `f.__exit__` is called at the end, which closes the file.
# - `f.closed` indicates whether the file is closed.
# In[17]:
f.closed
# We can iterate a file object in a for loop,
# which implicitly call the method `__iter__` to read a file line by line.
# In[18]:
with open('contact.csv') as f:
for line in f:
print(line, end='')
hasattr(f, '__iter__')
# **Exercise** Print only the first 5 lines of the file `contact.csv`.
# In[19]:
with open('contact.csv') as f:
### BEGIN SOLUTION
for i, line in enumerate(f):
print(line, end='')
if i >= 5: break
### END SOLUTION
# **How to write to a text file?**
# Consider backing up `contact.csv` to a new file:
# In[20]:
destination = 'private/new_contact.csv'
# The directory has to be created first if it does not exist:
# In[21]:
import os
os.makedirs(os.path.dirname(destination), exist_ok=True)
# In[22]:
get_ipython().run_line_magic('pinfo', 'os.makedirs')
get_ipython().system('ls')
# To write to the destination file:
# In[23]:
with open('contact.csv') as source_file:
with open(destination, 'w') as destination_file:
destination_file.write(source_file.read())
# In[24]:
get_ipython().run_line_magic('pinfo', 'destination_file.write')
get_ipython().system('more {destination}')
# - The argument `'w'` to `open` sets the file object to write mode.
# - The method `write` writes the input strings to the file.
# **Exercise** We can also use `a` mode to *append* new content to a file.
# Complete the following code to append `new_data` to the file `destination`.
# In[25]:
new_data = '<NAME>,<EMAIL>, (888) 311-9512'
with open(destination, 'a') as f:
### BEGIN SOLUTION
f.write('\n')
f.write(new_data)
### END SOLUTION
get_ipython().system('more {destination}')
# **How to delete a file?**
# Note that the file object does not provide any method to delete the file.
# Instead, we should use the function `remove` of the `os` module.
# In[26]:
if os.path.exists(destination):
os.remove(destination)
# ## String Objects
# **How to search for a substring in a string?**
# A string object has the method `find` to search for a substring.
# E.g., to find the contact information of Tai Ming:
# In[27]:
get_ipython().run_line_magic('pinfo', 'str.find')
with open('contact.csv') as f:
for line in f:
if line.find('Tai Ming') != -1:
record = line
print(record)
break
# **How to split and join strings?**
# A string can be split according to a delimiter using the `split` method.
# In[28]:
record.split(',')
# The list of substrings can be joined back together using the `join` methods.
# In[29]:
print('\n'.join(record.split(',')))
# **Exercise** Print only the phone number (last item) in `record`. Use the method `rstrip` or `strip` to remove unnecessary white spaces at the end.
# In[30]:
get_ipython().run_line_magic('pinfo', 'str.rstrip')
### BEGIN SOLUTION
print(record.split(',')[-1].rstrip())
### END SOLUTION
# **Exercise** Print only the name (first item) in `record` but with
# - surname printed first with all letters in upper case
# - followed by a comma, a space, and
# - the first name as it is in `record`.
#
# E.g., `<NAME>` should be printed as `CHAN, <NAME>`.
#
# *Hint*: Use the methods `upper` and `rsplit` (with the parameter `maxsplit=1`).
# In[31]:
get_ipython().run_line_magic('pinfo', 'str.rsplit')
### BEGIN SOLUTION
first, last = record.split(',')[0].rsplit(' ', maxsplit=1)
print('{}, {}'.format(last.upper(),first))
### END SOLUTION
# ## Operator Overloading
# ### What is overloading?
# Recall that the addition operation `+` behaves differently for different types.
# In[32]:
for x, y in (1, 1), ('1', '1'), (1, '1'):
print(f'{x!r:^5} + {y!r:^5} = {x+y!r}')
# - Having an operator perform differently based on its argument types is called [operator *overloading*](https://en.wikipedia.org/wiki/Operator_overloading).
# - `+` is called a *generic* operator.
# - We can also have function overloading to create generic functions.
# ### How to dispatch on type?
# The strategy of checking the type for the appropriate implementation is called *dispatching on type*.
# A naive idea is to put all different implementations together with case-by-case checks of operand types.
# In[33]:
def add_case_by_case(x, y):
if isinstance(x, int) and isinstance(y, int):
print('Do integer summation...')
elif isinstance(x, str) and isinstance(y, str):
print('Do string concatenation...')
else:
print('Return a TypeError...')
return x + y # replaced by internal implementations
for x, y in (1, 1), ('1', '1'), (1, '1'):
print(f'{x!r:^10} + {y!r:^10} = {add_case_by_case(x,y)!r}')
# It can get quite messy with all possible types and combinations.
# In[34]:
for x, y in ((1, 1.1), (1, complex(1, 2)), ((1, 2), (1, 2))):
print(f'{x!r:^10} + {y!r:^10} = {x+y!r}')
# **What about new data types?**
# In[35]:
from fractions import Fraction # non-built-in type for fractions
for x, y in ((Fraction(1, 2), 1), (1, Fraction(1, 2))):
print(f'{x} + {y} = {x+y}')
# Weaknesses of the naive approach:
# 1. New data types require rewriting the addition operation.
# 1. A programmer may not know all other types and combinations to rewrite the code properly.
# ### How to have data-directed programming?
# The idea is to treat an implementation as a datum that can be returned by the operand types.
# - `x + y` is a [*syntactic sugar*](https://en.wikipedia.org/wiki/Syntactic_sugar) that
# - invokes the method `type(x).__add__(x,y)` of `type(x)` to do the addition.
# In[36]:
for x, y in (Fraction(1, 2), 1), (1, Fraction(1, 2)):
print(f'{x} + {y} = {type(x).__add__(x,y)}') # instead of x + y
# - The first case calls `Fraction.__add__`, which provides a way to add `int` to `Fraction`.
# - The second case calls `int.__add__`, which cannot provide any way of adding `Fraction` to `int`. (Why not?)
# **Why return a [`NotImplemented` object](https://docs.python.org/3.6/library/constants.html#NotImplemented) instead of raising an error/exception?**
# - This allows `+` to continue to handle the addition by
# - dispatching on `Fraction` to call its reverse addition method [`__radd__`](https://docs.python.org/3.6/library/numbers.html#implementing-the-arithmetic-operations).
# In[37]:
get_ipython().run_cell_magic('mytutor', '-h 500', "from fractions import Fraction\ndef add(x, y):\n '''Simulate the + operator.'''\n sum = x.__add__(y)\n if sum is NotImplemented:\n sum = y.__radd__(x)\n return sum\n\n\nfor x, y in (Fraction(1, 2), 1), (1, Fraction(1, 2)):\n print(f'{x} + {y} = {add(x,y)}')")
# The object-oriented programming techniques involved are formally called:
# - [*Polymorphism*](https://en.wikipedia.org/wiki/Polymorphism_(computer_science)): Different types can have different implementations of the `__add__` method.
# - [*Single dispatch*](https://en.wikipedia.org/wiki/Dynamic_dispatch): The implementation is chosen based on one single type at a time.
# Remarks:
# - A method with starting and trailing double underscores in its name is called a [*dunder method*](https://dbader.org/blog/meaning-of-underscores-in-python).
# - Dunder methods are not intended to be called directly. E.g., we normally use `+` instead of `__add__`.
# - [Other operators](https://docs.python.org/3/library/operator.html?highlight=operator) have their corresponding dunder methods that overloads the operator.
#
# ## Object Aliasing
# **When are two objects identical?**
# - Two objects are the same if they occupy the same memory.
# - The keyword `is` checks whether two objects are the same object.
# - The function `id` returns a unique id number for each object.
# In[38]:
get_ipython().run_cell_magic('mytutor', '-h 400', "x, y = complex(1,2), complex(1,2)\nz = x\n\nfor expr in 'id(x)', 'id(y)', 'id(z)', 'x == y == z', 'x is y', 'x is z':\n print(expr,eval(expr))")
# As the box-pointer diagram shows:
# - `x` is not `y` because they point to objects at different memory locations,
# even though the objects have the same type and value.
# - `x` is `z` because the assignment `z = x` binds `z` to the same memory location `x` points to.
# `z` is said to be an *alias* (another name) of `x`.
# **Should we use `is` or `==`?**
# `is` is faster but:
# In[39]:
1 is 1, 1 is 1., 1 == 1.
# - `1 is 1.` returns false because `1` is `int` but `1.` is `float`.
# - `==` calls the method `__eq__` of `float` which returns mathematical equivalence.
# *Can we use `is` for integer comparison?*
# In[40]:
x, y = 1234, 1234
1234 is 1234, x is y
# No. The behavior of `is` is not entirely predictable.
# **When should we use `is`?**
# `is` can be used for [built-in constants](https://docs.python.org/3/library/constants.html#built-in-constants) such as `None` and `NotImplemented`
# because there can only be one instance of each of them.
|
ccha23/CS1302ICP
|
_build/jupyter_execute/Lab5/Pandas.py
|
<reponame>ccha23/CS1302ICP<filename>_build/jupyter_execute/Lab5/Pandas.py
#!/usr/bin/env python
# coding: utf-8
# # Pandas
# **CS1302 Introduction to Computer Programming**
# ___
# In this lab, we will analyze COVID19 data using a powerful package called [`pandas`](https://pandas.pydata.org/docs/user_guide/index.html).
# The package name comes from *panel data* and *Python for data analysis*.
# ## Loading CSV Files with Pandas
# [DATA.GOV.HK](https://data.gov.hk/en-data/dataset/hk-dh-chpsebcddr-novel-infectious-agent) provides an [API](https://data.gov.hk/en/help/api-spec#historicalAPI) to retrieve historical data on COVID-19 cases in Hong Kong.
# The following uses the `urlencode` function to create the url that links to a csv file containing probable and confirmed cases of COVID-19 by Aug 1st, 2020.
# In[ ]:
from urllib.parse import urlencode
url_data_gov_hk_get = 'https://api.data.gov.hk/v1/historical-archive/get-file'
url_covid_csv = 'http://www.chp.gov.hk/files/misc/enhanced_sur_covid_19_eng.csv'
time = '20200801-1204'
url_covid = url_data_gov_hk_get + '?' + urlencode({
'url': url_covid_csv,
'time': time
})
print(url_covid)
# `urlencode` creates a string `'url=<...>&time=<...>'` with some [special symbols encoded](https://www.w3schools.com/tags/ref_urlencode.ASP), e.g.:
# - `:` is replaced by `%3A`, and
# - `/` is replaced by `%2F`.
# **Exercise** Write a function `simple_encode` that takes in a string and return a string with `:` and `/` encoded as described above.
# *Hint:* Use the `replace` method of `str`.
# In[ ]:
def simple_encode(string):
'''Returns the string with : and / encoded to %3A and %2F respectively.'''
# YOUR CODE HERE
raise NotImplementedError()
# In[ ]:
# tests
assert simple_encode(
'http://www.chp.gov.hk/files/misc/enhanced_sur_covid_19_eng.csv'
) == 'http%3A%2F%2Fwww.chp.gov.hk%2Ffiles%2Fmisc%2Fenhanced_sur_covid_19_eng.csv'
# Like the function `open` that loads a file into memory, `pandas` has a function `read_csv` that loads a csv file.
# The csv file can even reside on the web.
# In[ ]:
import pandas as pd
df_covid = pd.read_csv(url_covid)
print(type(df_covid))
df_covid
# The above creates a [`DataFrame` object](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html?highlight=dataframe#pandas.DataFrame). The content of the csv file is displayed as an HTML table conveniently.
# (We can control how much information to show by setting the [display options](https://pandas.pydata.org/pandas-docs/stable/user_guide/options.html).)
# **Exercise** Using the function `pd.read_csv`, load `building_list_eng.csv` as `df_building` from the url `url_building`.
# In[ ]:
url_building_csv = 'http://www.chp.gov.hk/files/misc/building_list_eng.csv'
time = '20200801-1203'
url_building = url_data_gov_hk_get + '?' + urlencode({
'url': url_building_csv,
'time': time
})
# YOUR CODE HERE
raise NotImplementedError()
df_building
# In[ ]:
# tests
assert all(df_building.columns == ['District', 'Building name', 'Last date of residence of the case(s)',
'Related probable/confirmed cases']) # check column names
# ## Selecting and Removing columns
# We can obtain the column labels of a `Dataframe` using its `columns` attribute.
# In[ ]:
df_covid.columns
# Using the indexing operator `[]`, a column of a `DataFrame` can be returned as a [`Series` object](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.html), which is essentially a named array.
# We can further use the method `value_counts` to return the counts of different values in another `Series` object.
# In[ ]:
series_gender_counts = df_covid['Gender'].value_counts() # return the number of male and female cases
print(type(series_gender_counts))
series_gender_counts
# **Exercise** For `df_building`, use the operator `[]` and method `value_counts` to assign `series_district_counts` to a `Series` object that stores the counts of buildings in different district.
# In[ ]:
# YOUR CODE HERE
raise NotImplementedError()
series_district_counts
# In[ ]:
# tests
assert all(series_district_counts[['Wong Tai Sin', 'Kwun Tong']] == [313, 212])
# In `df_covid`, it appears that the column `Name of hospital admitted` contains no information. We can confirm this by:
# 1. Returning the column as a `Series` with `df_covid_cases['Name of hospital admitted']`, and
# 1. printing an array of unique column values using the method `unique`.
# In[ ]:
df_covid['Name of hospital admitted'].unique()
# **Exercise** Drop the column `Name of hospital admitted` using the `drop` method of the DataFrame.
#
# Use the keyword argument `inplace=True`, so that the method will
# - mutate the original DataFrame in place instead of
# - creating a copy of the DataFrame with the column dropped.
# In[ ]:
# YOUR CODE HERE
raise NotImplementedError()
df_covid
# In[ ]:
# tests
assert all(df_covid.columns == ['Case no.', 'Report date', 'Date of onset', 'Gender', 'Age',
'Hospitalised/Discharged/Deceased', 'HK/Non-HK resident',
'Case classification*', 'Confirmed/probable'])
# ## Selecting Rows of DataFrame
# We can select the confirmed male cases using the attribute [`.loc`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.loc.html) and the indexing operator `[]`.
# `.loc` implements an advanced indexing method `__getitem__` that can take a boolean vector.
# In[ ]:
df_confirmed_male = df_covid.loc[(df_covid['Confirmed/probable']=='Confirmed') & (df_covid['Gender']=='M')]
df_confirmed_male
# **Exercise** Assign `df_confirmed_local` to a `DataFrame` of confirmed cases that are local or epidemiologically linked with a local case.
# In[ ]:
# YOUR CODE HERE
raise NotImplementedError()
df_confirmed_local
# In[ ]:
# tests
assert set(df_confirmed_local['Case classification*'].unique()) == {
'Epidemiologically linked with local case', 'Local case'
}
# ## Challenge
# **Exercise** Write a function `case_counts` that
# - takes an argument `district`, and
# - returns the number of cases in `district`.
#
# *Hint:* Be careful that there can be more than one case for each building and there may be multiple buildings associated with one case.
# You may want to use the `split` and `strip` methods of `str` to obtain a list of cases from the `Dataframe`.
# In[ ]:
def case_counts(district):
# YOUR CODE HERE
raise NotImplementedError()
# In[ ]:
# tests
assert case_counts('Kwai Tsing') == 109
|
ccha23/CS1302ICP
|
_build/jupyter_execute/Lecture9/Monte Carlo Simulation and Linear Algebra.py
|
#!/usr/bin/env python
# coding: utf-8
# # Monte Carlo Simulation and Linear Algebra
# **CS1302 Introduction to Computer Programming**
# ___
# In[1]:
get_ipython().run_line_magic('reload_ext', 'mytutor')
# ## Monte Carlo simulation
# **What is Monte Carlo simulation?**
# > The name Monte Carlo refers to the [Monte Carlo Casino in Monaco](https://en.wikipedia.org/wiki/Monte_Carlo_Casino) where Ulam's uncle would borrow money from relatives to gamble.
# It would be nice to simulate the casino, so Ulam's uncle did not need to borrow money to go.
# Actually...,
# - Monte Carlo is the code name of the secret project for creating the [hydrogen bomb](https://en.wikipedia.org/wiki/Monte_Carlo_method).
# - [Ulam](https://en.wikipedia.org/wiki/Stanislaw_Ulam) worked with [<NAME>](https://en.wikipedia.org/wiki/John_von_Neumann) to program the first electronic computer ENIAC to simulate a computational model of a thermonuclear reaction.
#
# (See also [The Beginning of the Monte Carlo Method](https://permalink.lanl.gov/object/tr?what=info:lanl-repo/lareport/LA-UR-88-9067) for a more detailed account.)
# **How to compute the value of $\pi$**?
# An application of Monte Carlo simulation is in approximating $\pi$ using
# the [Buffon's needle](https://en.wikipedia.org/wiki/Buffon%27s_needle_problem).
# There is [a program](https://www.khanacademy.org/computer-programming/pi-by-buffons-needle/6695500989890560) written in javascript to do this.
# The javascript program a bit long to digest, so we will use an alternative simulation that is easier to understand/program.
# If we uniformly randomly pick a point in a square. What is the chance it is in the inscribed circle, i.e., the biggest circle inside the square?
# The chance is the area of the circle divided by the area of the square. Suppose the square has length $\ell$, then the chance is
#
# $$ \frac{\pi (\ell /2)^2}{ (\ell)^2 } = \frac{\pi}4 $$
# independent of the length $\ell$.
# **Exercise** Complete the following function to return an approximation of $\pi$ as follows:
# 1. Simulate the random process of picking a point from a square repeatedly `n` times by
# generating the $x$ and $y$ coordinates uniformly randomly from a unit interval $[0,1)$.
# 2. Compute the fraction of times the point is in the first quadrant of the inscribed circle as shown in the figure below.
# 3. Return $4$ times the fraction as the approximation.
# <p><a href="https://commons.wikimedia.org/wiki/File:Pi_30K.gif#/media/File:Pi_30K.gif"><img src="https://upload.wikimedia.org/wikipedia/commons/8/84/Pi_30K.gif" alt="Pi 30K.gif"></a></p>
# In[2]:
import random, math
def approximate_pi(n):
### BEGIN SOLUTION
return 4*len([True for i in range(n)
if random.random()**2 + random.random()**2 < 1])/n
### END SOLUTION
print(f'Approximate: {approximate_pi(int(1e7))}\nGround truth: {math.pi}')
# **How accurate is the approximation?**
# The following uses a powerful library `numpy` for computing to return a [$95\%$-confidence interval](http://onlinestatbook.com/2/estimation/mean.html#:~:text=To%20compute%20the%2095%25%20confidence,be%20between%20the%20cutoff%20points.).
# In[3]:
import numpy as np
def np_approximate_pi(n):
in_circle = (np.random.random((n,2))**2).sum(axis=-1) < 1
mean = 4 * in_circle.mean()
std = 4 * in_circle.std() / n**0.5
return np.array([mean - 2*std, mean + 2*std])
interval = np_approximate_pi(int(1e7))
print(f'''95%-confidence interval: {interval}
Estimate: {interval.mean():.4f} ± {(interval[1]-interval[0])/2:.4f}
Ground truth: {math.pi}''')
# Note that the computation done using `numpy` is over $5$ times faster despite the additional computation of the standard deviation.
# There are faster methods to approximate $\pi$ such as the [Chudnovsky_algorithm](https://en.wikipedia.org/wiki/Chudnovsky_algorithm), but Monte-Carlo method is still useful in more complicated situations.
# E.g., see the Monte Carlo simulation of a [real-life situation](https://www.youtube.com/watch?v=-fCVxTTAtFQ) in playing basketball:
# > "When down by three and left with only 30 seconds is it better to attempt a hard 3-point shot or an easy 2-point shot and get another possession?" --<NAME>
# ## Linear Algebra
# **How to solve a linear equation?**
# Given the following linear equation in variable $x$ with real-valued coefficient $a$ and $b$,
#
# $$ a x = b,$$
# what is the value of $x$ that satisfies the equation?
# **Exercise** Complete the following function to return either the unique solution of $x$ or `None` if a unique solution does not exist.
# In[4]:
def solve_linear_equation(a,b):
### BEGIN SOLUTION
return b/a if a != 0 else None
### END SOLUTION
import ipywidgets as widgets
@widgets.interact(a=(0,5,1),b=(0,5,1))
def linear_equation_solver(a=2, b=3):
print(f'''linear equation: {a}x = {b}
solution: x = {solve_linear_equation(a,b)}''')
# **How to solve multiple linear equations?**
# In the general case, we have a system of $m$ linear equations and $n$ variables:
#
# $$ \begin{aligned}
# a_{00} x_0 + a_{01} x_1 + \dots + a_{0(n-1)} x_{n-1} &= b_0\\
# a_{10} x_0 + a_{11} x_1 + \dots + a_{1(n-1)} x_{n-1} &= b_1\\
# \vdots\kern2em &= \vdots\\
# a_{(m-1)0} x_0 + a_{(m-1)1} x_1 + \dots + a_{(m-1)(n-1)} x_{n-1} &= b_{m-1}\\
# \end{aligned}
# $$
# where
# - $x_j$ for $j\in \{0,\dots,n-1\}$ are the variables, and
# - $a_{ij}$ and $b_j$ for $i\in \{0,\dots,m-1\}$ and $j\in \{0,\dots,n-1\}$ are the coefficients.
#
# A fundamental problem in linear algebra is to compute the unique solution to the system if it exists.
# We will consider the simpler 2-by-2 system with 2 variables and 2 equations:
#
# $$ \begin{aligned}
# a_{00} x_0 + a_{01} x_1 &= b_0\\
# a_{10} x_0 + a_{11} x_1 &= b_1.
# \end{aligned}
# $$
# To get an idea of the solution, suppose
#
# $$a_{00}=a_{11}=1, a_{01} = a_{10} = 0.$$
# The system of equations become
#
# $$ \begin{aligned}
# x_0 \hphantom{+ x_1} &= b_0\\
# \hphantom{x_0 +} x_1 &= b_1,
# \end{aligned}
# $$
# which gives the solution directly.
# What about $a_{00}=a_{11}=2$ instead?
#
# $$ \begin{aligned}
# 2x_0 \hphantom{+ x_1} &= b_0\\
# \hphantom{2x_0 +} 2x_1 &= b_1,
# \end{aligned}$$
# To obtain the solution, we simply divide both equations by 2:
#
# $$ \begin{aligned}
# x_0 \hphantom{+ x_1} &= \frac{b_0}2\\
# \hphantom{x_0 +} x_1 &= \frac{b_1}2.
# \end{aligned}
# $$
# What if $a_{01}=2$ instead?
#
# $$ \begin{aligned}
# 2x_0 + 2x_1 &= b_0\\
# \hphantom{2x_0 +} 2x_1 &= b_1\\
# \end{aligned}
# $$
# The second equation gives the solution of $x_1$, and we can use the solution in the first equation to solve for $x_0$. More precisely:
# - Subtract the second equation from the first one:
#
# $$ \begin{aligned}
# 2x_0 \hphantom{+2x_1} &= b_0 - b_1\\
# \hphantom{2x_0 +} 2x_1 &= b_1\\
# \end{aligned}
# $$
# - Divide both equation by 2:
#
# $$ \begin{aligned}
# x_0 \hphantom{+ x_1} &= \frac{b_0 - b_1}2\\
# \hphantom{x_0 +} x_1 &= \frac{b_1}2\\
# \end{aligned}
# $$
# The above operations are called *row operations* in linear algebra: each row is an equation.
# A system of linear equations can be solved by the linear operations of
# 1. multiplying an equation by a constant, and
# 2. subtracting one equation from another.
# How to write a program to solve a general 2-by-2 system? We will use the `numpy` library.
# ### Creating `numpy` arrays
# **How to store the coefficients?**
# In linear algebra, a system of equations such as
#
# $$ \begin{aligned}
# a_{00} x_0 + a_{01} x_1 &= b_0\\
# a_{10} x_0 + a_{11} x_1 &= b_1
# \end{aligned}
# $$
# is written concisely in *matrix* form as $ \mathbf{A} \mathbf{x} = \mathbf{b} $:
#
# $$\overbrace{\begin{bmatrix}
# a_{00} & a_{01}\\
# a_{10} & a_{11}
# \end{bmatrix}}^{\mathbf{A}}
# \overbrace{
# \begin{bmatrix}
# x_0\\
# x_1
# \end{bmatrix}}
# ^{\mathbf{x}}
# = \overbrace{\begin{bmatrix}
# b_0\\
# b_1
# \end{bmatrix}}^{\mathbf{b}},
# $$
# where
# $ \mathbf{A} \mathbf{x}$ is the *matrix multiplication*
#
# $$ \mathbf{A} \mathbf{x} = \begin{bmatrix}
# a_{00} x_0 + a_{01} x_1\\
# a_{10} x_0 + a_{11} x_1
# \end{bmatrix}.
# $$
# We say that $\mathbf{A}$ is a [*matrix*](https://en.wikipedia.org/wiki/Matrix_(mathematics)) and its dimension/shape is $2$-by-$2$:
# - The first dimension/axis has size $2$. We also say that the matrix has $2$ rows.
# - The second dimension/axis has size $2$. We also say that the matrix has $2$ columns.
# $\mathbf{x}$ and $\mathbf{b}$ are called column vectors, which are matrices with one column.
# Consider the example
# $$ \begin{aligned}
# 2x_0 + 2x_1 &= 1\\
# \hphantom{2x_0 +} 2x_1 &= 1,
# \end{aligned}$$
# or in matrix form with
# $$ \begin{aligned}
# \mathbf{A}&=\begin{bmatrix}
# a_{00} & a_{01} \\
# a_{10} & a_{11}
# \end{bmatrix}
# = \begin{bmatrix}
# 2 & 2 \\
# 0 & 2
# \end{bmatrix}\\
# \mathbf{b}&=\begin{bmatrix}
# b_0\\
# b_1
# \end{bmatrix} = \begin{bmatrix}
# 1\\
# 1
# \end{bmatrix}\end{aligned}$$
# Instead of using `list` to store the matrix, we will use a `numpy` array.
# In[5]:
A = np.array([[2.,2],[0,2]])
b = np.array([1.,1])
A, b
# Compared to `list`, `numpy` array is often more efficient and has more useful attributes.
# In[6]:
array_attributes = set(attr for attr in dir(np.array([])) if attr[0]!='_')
list_attributes = set(attr for attr in dir(list) if attr[0]!='_')
print('\nCommon attributes:\n',*(array_attributes & list_attributes))
print('\nArray-specific attributes:\n', *(array_attributes - list_attributes))
print('\nList-specific attributes:\n',*(list_attributes - array_attributes))
# The following attributes give the dimension/shape, number of dimensions, size, and datatype.
# In[7]:
for array in A, b:
print(f'''{array}
shape: {array.shape}
ndim: {array.ndim}
size: {array.size}
dtype: {array.dtype}
''')
# Note that the function `len` only returns the size of the first dimension:
# In[8]:
assert A.shape[0] == len(A)
len(A)
# Unlike `list`, every `numpy` array has a data type. For efficient computation/storage, numpy implements different data types with different storage sizes:
# * integer: `int8`, `int16`, `int32`, `int64`, `uint8`, ...
# * float: `float16`, `float32`, `float64`, ...
# * complex: `complex64`, `complex128`, ...
# * boolean: `bool8`
# * Unicode: `string`
# * Object: `object`
# E.g., `int64` is the 64-bit integer. Unlike `int`, `int64` has a range.
# In[9]:
get_ipython().run_line_magic('pinfo', 'np.int64')
print(f'range: {np.int64(-2**63)} to {np.int64(2**63-1)}')
np.int64(2**63) # overflow error
# We can use the `astype` method to convert the data type:
# In[10]:
A_int64 = A.astype(int) # converts to int64 by default
A_float32 = A.astype(np.float32) # converts to float32
for array in A_int64, A_float32:
print(array, array.dtype)
# We have to be careful about assigning items of different types to an array.
# In[11]:
A_int64[0,0] = 1
print(A_int64)
A_int64[0,0] = 0.5
print(A_int64) # intended assignment fails
np.array([int(1), float(1)]) # will be all floating point numbers
# **Exercise** Create a heterogeneous numpy array to store both integer and strings:
# ```Python
# [0, 1, 2, 'a', 'b', 'c']
# ```
# *Hint:* There is an numpy data type called `object`.
# In[12]:
get_ipython().run_line_magic('pinfo', 'np.object')
### BEGIN SOLUTION
heterogeneous_np_array = np.array([*range(3),*'abc'],dtype=object)
### END SOLUTION
heterogeneous_np_array
# Be careful when creating arrays of `tuple`/`list`:
# In[13]:
for array in (np.array([(1,2),[3,4,5]],dtype=object),
np.array([(1,2),[3,4]],dtype=object)):
print(array, '\nshape:', array.shape, 'length:', len(array), 'size:', array.size)
# `numpy` provides many functions to create an array:
# In[14]:
get_ipython().run_line_magic('pinfo', 'np.zeros')
np.zeros(0), np.zeros(1), np.zeros((2,3,4)) # Dimension can be higher than 2
# In[15]:
get_ipython().run_line_magic('pinfo', 'np.ones')
np.ones(0, dtype=int), np.ones((2,3,4), dtype=int) # initialize values to int 1
# In[16]:
get_ipython().run_line_magic('pinfo', 'np.eye')
np.eye(0), np.eye(1), np.eye(2), np.eye(3) # identity matrices
# In[17]:
get_ipython().run_line_magic('pinfo', 'np.diag')
np.diag(range(1)), np.diag(range(2)), np.diag(np.ones(3),k=1) # diagonal matrices
# In[18]:
get_ipython().run_line_magic('pinfo', 'np.empty')
np.empty(0), np.empty((2,3,4), dtype=int) # create array faster without initialization
# `numpy` also provides functions to build an array using rules.
# In[19]:
get_ipython().run_line_magic('pinfo', 'np.arange')
np.arange(5), np.arange(4,5), np.arange(4.5,5.5,0.5) # like range but allow non-integer parameters
# In[20]:
get_ipython().run_line_magic('pinfo', 'np.linspace')
np.linspace(4,5), np.linspace(4,5,11), np.linspace(4,5,11) # can specify number of points instead of step
# In[21]:
get_ipython().run_line_magic('pinfo', 'np.fromfunction')
np.fromfunction(lambda i, j: i * j, (3,4)) # can initialize using a function
# We can also reshape an array using the `reshape` method/function:
# In[22]:
array = np.arange(2*3*4)
get_ipython().run_line_magic('pinfo', 'array.reshape')
(array,
array.reshape(2,3,4), # last axis index changes fastest
array.reshape(2,3,-1), # size of last axis calculated automatically
array.reshape((2,3,4), order='F')) # first axis index changes fastest
# `flatten` is a special case of reshaping an array to one dimension.
# (Indeed, `flatten` returns a copy of the array but `reshape` returns a dynamic view whenever possible.)
# In[23]:
array = np.arange(2*3*4).reshape(2,3,4)
array, array.flatten(), array.reshape(-1), array.flatten(order='F')
# **Exercise** Correct the following function to print every element of an array line-by-line.
# ```Python
# def print_array_entries_line_by_line(array):
# for i in array:
# print(i)
# ```
# In[24]:
def print_array_entries_line_by_line(array):
### BEGIN SOLUTION
for i in array.flatten():
print(i)
### END SOLUTION
print_array_entries_line_by_line(np.arange(2*3*4).reshape(2,3,4))
# ### Operating on `numpy` arrays
# **How to verify the solution of a system of linear equations?**
# Before solving the system of linear equations, let us try to verify a solution to the equations:
#
# $$ \begin{aligned}
# 2x_0 + 2x_1 &= 1\\
# \hphantom{2x_0 +} 2x_1 &= 1
# \end{aligned}
# $$
# `numpy` provides the function `matmul` and the operator `@` for matrix multiplication.
# In[25]:
print(np.matmul(A,np.array([0,0])) == b)
print(A @ np.array([0,0.5]) == b)
# Note that the comparison on `numpy` arrays returns a boolean array instead of a boolean value, unlike the comparison operations on lists.
# To check whether all items are true, we use the `all` method.
# In[26]:
print((np.matmul(A,np.array([0,0])) == b).all())
print((A @ np.array([0,0.5]) == b).all())
# **How to concatenate arrays?**
# We will operate on an augmented matrix of the coefficients:
#
# $$ \begin{aligned} \mathbf{C} &= \begin{bmatrix}
# \mathbf{A} & \mathbf{b}
# \end{bmatrix}\\
# &= \begin{bmatrix}
# a_{00} & a_{01} & b_0 \\
# a_{10} & a_{11} & b_1
# \end{bmatrix}
# \end{aligned}
# $$
#
# `numpy` provides functions to create block matrices:
# In[27]:
get_ipython().run_line_magic('pinfo', 'np.block')
C = np.block([A,b.reshape(-1,1)]) # reshape to ensure same ndim
C
# To stack an array along different axes:
# In[28]:
array = np.arange(1*2*3).reshape(1,2,3)
for concat_array in [array,
np.hstack((array,array)), # stack along the first axis
np.vstack((array,array)), # second axis
np.concatenate((array,array), axis=-1), # last axis
np.stack((array,array), axis=0)]: # new axis
print(concat_array, '\nshape:', concat_array.shape)
# **How to perform arithmetic operations on a `numpy` array?**
# To divide all the coefficients by $2$, we can simply write:
# In[29]:
D = C / 2
D
# Note that the above does not work for `list`.
# In[30]:
C.tolist() / 2 # deep convert to list
# Arithmetic operations on `numpy` arrays apply if the arrays have compatible dimensions. Two dimensions are compatible when
# - they are equal, except for
# - components equal to 1.
# `numpy` uses [broadcasting rules](https://numpy.org/doc/stable/user/basics.broadcasting.html#general-broadcasting-rules) to stretch the axis of size 1 up to match the corresponding axis in other arrays.
# `C / 2` is a example where the second operand $2$ is broadcasted to a $2$-by-$2$ matrix before the elementwise division. Another example is as follows.
# In[31]:
three_by_one = np.arange(3).reshape(3,1)
one_by_four = np.arange(4).reshape(1,4)
print(f'''
{three_by_one}
*
{one_by_four}
==
{three_by_one * one_by_four}
''')
# Next, to subtract the second row of the coefficients from the first row:
# In[32]:
D[0,:] = D[0,:] - D[1,:]
D
# Notice the use of commas to index different dimensions instead of using multiple brackets:
# In[33]:
assert (D[0][:] == D[0,:]).all()
# Using this indexing technique, it is easy extract the last column as the solution to the system of linear equations:
# In[34]:
x = D[:,-1]
x
# This gives the desired solution $x_0=0$ and $x_1=0.5$ for
#
# $$ \begin{aligned}
# 2x_0 + 2x_1 &= 1\\
# \hphantom{2x_0 +} 2x_1 &= 1\\
# \end{aligned}$$
# `numpy` provides many [convenient ways](https://numpy.org/doc/stable/reference/arrays.indexing.html#advanced-indexing) to index an array.
# In[35]:
B = np.arange(2*3).reshape(2,3)
B, B[(0,1),(2,0)] # selecting the corners using integer array
# In[36]:
B = np.arange(2*3*4).reshape(2,3,4)
B, B[0], B[0,(1,2)], B[0,(1,2),(2,3)], B[:,(1,2),(2,3)] # pay attention to the last two cases
# In[37]:
assert (B[...,-1] == B[:,:,-1]).all()
B[...,-1] # ... expands to selecting all elements of all previous dimensions
# In[38]:
B[B>5] # indexing using boolean array
# Finally, the following function solves a system of 2 linear equations with 2 variables.
# In[39]:
def solve_2_by_2_system(A,b):
'''Returns the unique solution of the linear system, if exists,
else returns None.'''
C = np.hstack((A,b.reshape(-1,1)))
if C[0,0] == 0: C = C[(1,0),:]
if C[0,0] == 0: return None
C[0,:] = C[0,:] / C[0,0]
C[1,:] = C[1,:] - C[0,:] * C[1,0]
if C[1,1] == 0: return None
C[1,:] = C[1,:] / C[1,1]
C[0,:] = C[0,:] - C[1,:] * C[0,1]
return C[:,-1]
# In[40]:
# tests
for A in (np.eye(2),
np.ones((2,2)),
np.stack((np.ones(2),np.zeros(2))),
np.stack((np.ones(2),np.zeros(2)),axis=1)):
print(f'A={A}\nb={b}\nx={solve_2_by_2_system(A,b)}\n')
# ### Universal functions
# Why does the first line of code below return two arrays but the second code return only one array? Shouldn't the first line of code return the following?
# ```Python
# array([[(0,1), (0,2), (0,3)],
# [(1,1), (1,2), (1,3)]])
# ```
# In[41]:
print(np.fromfunction(lambda i,j:(i,j), (2,3), dtype=int))
print(np.fromfunction(lambda i,j:(i*j), (2,3), dtype=int))
# From the documentation, `fromfunction` applies the given function to the two arrays as arguments.
# - The first line of code returns a tuple of the arrays.
# - The second line of code multiplies the two arrays to give one array, according to how multiplication works for numpy arrays.
# Indeed, `numpy` implements [universal/vectorized functions/operators](https://numpy.org/doc/stable/reference/ufuncs.html) that take arrays as arguments and perform operations with appropriate broadcasting rules. The following is an example that uses the universal function `np.sin`:
# In[42]:
import matplotlib.pyplot as plt
@widgets.interact(a=(0,5,1),b=(-1,1,0.1))
def plot_sin(a=1,b=0):
x = np.linspace(0,2*math.pi)
plt.plot(x,np.sin(a*x+b*math.pi)) # np.sin, *, + are universal functions
plt.title(r'$\sin(ax+b\pi)$')
plt.xlabel(r'$x$ (radian)')
# In addition to making the code shorter, universal functions are both efficient and flexible. (Recall the Monte Carlo simulation to approximate $\pi$.)
# **Exercise** Explain how the Monte Carlo simulation work using universal functions:
# ```Python
# def np_approximate_pi(n):
# in_circle = (np.random.random((n,2))**2).sum(axis=-1) < 1
# mean = 4 * in_circle.mean()
# std = 4 * in_circle.std() / n**0.5
# return np.array([mean - 2*std, mean + 2*std])
# ```
# - `random.random` generates a numpy array for $n$ points in the unit square randomly.
# - `sum` sums up the element along the last axis to give the squared distance.
# - `<` returns the boolean array indicating whether each point is in the first quadrant of the inscribed circle.
# - `mean` and `std` returns the mean and standard deviation of the boolean array with True and False interpreted as 1 and 0 respectively.
|
ccha23/CS1302ICP
|
_build/jupyter_execute/Lab1/Setup.py
|
#!/usr/bin/env python
# coding: utf-8
# # Setup
# CS1302 Introduction to Computer Programming
# ___
# ## JupyterHub
# ### How to access the JupyterHub Server?
# 1. Enter the url of the Jupyterhub server [ltjh.cs.cityu.edu.hk](https://ltjh.cs.cityu.edu.hk) in a web browser.
# 1. Enter your [EID](https://www.cityu.edu.hk/esu/eid.htm) and Password in the fields `Username` and `Password` respectively.
# 1. Click the `Sign In` button.
# <img src="https://www.cs.cityu.edu.hk/~ccha23/cs1302/Login.gif" />
# **Tips**
# - If the browser is stuck at the following page loading the server, `refresh` your browser.
# 
# - If you see the following page with ``My Server`` button, click on that button.
# 
# - If you see the ``Start My Server`` button instead, click on that button to start your server.
# 
# - For other issues, try logging out using the `Logout` button at the top right-hand corner, and then logging in again. You may also click the `Control Panel` button and restart your server.
# ### How to access course materials?
# 1. Click on the `Assignments` tab, and ensure `cs1302` is chosen in the drop down list.
# 1. In the `Released assignments panel`, click the button `Fetch` to download `Lab1`.
# 1. `Lab1` should appear in the `Downloaded assignments panel`.
# 1. Click on the little arrow next to `Lab1` to show its content.
# 1. Ctrl-Click on `Lab1` to open the assignment folder on a new browser tab.
# 1. On the new browser, click the folder `cs1302` to navigate to the notebook `Setup.ipynb`.
# 1. Click on `Setup.ipynb` to open the notebook.
# <img src="https://www.cs.cityu.edu.hk/~ccha23/cs1302/Fetch.gif" />
# **Tips**
# 1. Note that all the downloaded course materials will be placed under the `cs1302` folder of your home directory by default, so you need not go to the `Assignments` tab again to open the downloaded materials.
# E.g., you can access the `Setup.ipynb` notebook as follows:
# 1. Going to the `File` tab, which is the default JupyterHub homepage after login or when you click the logo on the top left-hand corner.
# 1. Enter the notebook URL [ltjh.cs.cityu.edu.hk/user-redirect/tree/cs1302/Lab1/Setup.ipynb](https://ltjh.cs.cityu.edu.hk/user-redirect/tree/cs1302/Lab1/Setup.ipynb). (See the [documentation](https://jupyterhub.readthedocs.io/en/stable/reference/urls.html) for details.)
# 1. If for any reason you want to Fetch `Lab1` again, you have to first rename your `Lab1` folder to a different name such as `Lab1_orig`. You can do so by selecting the folder and click rename. You can also remove the folder by evaluating `!rm -rf ~/cs1302/Lab1` in a code cell. (Be very cautious as removed folders cannot be recovered.)
# ## Jupyter Notebook
# ### How to complete a lab assignment?
# After opening the `Lab1` notebook `Setup.ipynb`:
# 1. Click `Help->User Interface Tour` to learn the jupyter notebook interface.
# 1. Click `Help->Notebook Help` and skim through the tutorials on `Running Code` and `Working with Markdown Cells`.
# **Exercise** In learning a new computer language, the first program to write is often the ["Hello, World!"](https://en.wikipedia.org/wiki/%22Hello,_World!%22_program) program, which says Hello to the world. Type the program `print('Hello, World!')` below and run it with `Shift+Enter`.
# In[1]:
### BEGIN SOLUTION
print('Hello, World!')
### END SOLUTION
# We often ask you to write a code in a particular cell. Make sure you fill in any place that says `YOUR CODE HERE` or "YOUR ANSWER HERE".
# In order to check your work thoroughly, there will visible and hidden test cases. The following is a visible test you can run to check your answer: The test returns an assertion error only if your program does not print the correct message.
# In[2]:
# Run this test cell right after running your "Hello, World!" program.
import sys, io
old_stdout, sys.stdout = sys.stdout, io.StringIO()
exec(In[-2])
printed = sys.stdout.getvalue()
sys.stdout = old_stdout
assert printed == 'Hello, World!\n'
# **Tips**
# 1. You can repeatedly modify your solution and run the test cell until your solution passes the test. You are not required to know how the test cell is written.
# 1. To assess your solution thoroughly, we often run new tests hidden from you after you have submitted your notebook. There is no partial credit for a partially correct solution that works for the visible test but fails for the hidden test. Therefore, *you should ensure your solution works in general rather than just the visible tests*.
# 1. You can click the `Validate` button to run all the visible tests.
# 1. If you open the same notebook multiple times in different browser windows, be careful in making changes in different windows. Inconsistent changes may lead to conflicts or loss of your data.
# 1. If your notebook fails to run any code, the Kernel might have died. You can restart the kernel with `Kernel->Restart`. If restarting fails, check your code cells to see if there is any code that breaks the kernel.
# ### How to submit a notebook
# - Although Lab1 does not count towards your final grade, you are required to submit it, to get familiar with the procedure.
# - Before you submit, make sure everything runs as expected:
# 1. **Restart the kernel**: `Kernel->Restart`
# 1. **run all cells**: `Cell->Run All`
# To submit your notebook:
# 1. Go to `Assignment` tab of JupyterHub where you fetched the Lab assignment.
# 1. Expand the Lab1 folder and click the `validate` button next to the notebook(s) to check if all visible tests pass.
# 1. Click `Submit` to submit your notebook.
# 1. You may submit as many times as you wish before the due date as we collect your latest submission for grading.
# - *No late submission* will be collected without valid justifications.
# - *Double check* that you have submitted the correct Lab assignment.
# - You are responsible for *recording your submission attempt* with a valid timestamp in case of technical issues.
# **Tips**
# 1. You normally have at least 5 days to work on the lab after your lab session.
# 1. You can check the due dates of all the labs from the course homepage.
# 1. You may seek help from us or your classmates. However, you must write your own solution and indicate who your collaborators are using the code:
# In[7]:
COLLABORATORS = ['<NAME>', '<NAME>']
# ## Advanced Usage
# ### How to print or backup a notebook?
# To convert a notebook to pdf, we can print it to pdf instead:
# - `File->Print Preview`
#
# However, animation and video cannot be properly printed. You are highly recommended to takes notes on the dynamic notebook instead on the hard copy.
# To download a copy of your notebook:
# - `File->Download as->Notebook (.ipynb)`
#
# You can run the notebook
# - locally using [Anaconda](https://www.anaconda.com/products/individual), or
# - remotely on other JupyHub services such as [Google Colab](https://colab.research.google.com/).
#
# However, you would need to learn how to manage and install the additional packages required by the course.
# ### Jupyter Lab and extensions
# Instead of the classic notebook interface, you may also play with the new interface called JupyterLab by visiting [ltjh.cs.cityu.edu.hk/user-redirect/lab/](https://ltjh.cs.cityu.edu.hk/user-redirect/lab/).
#
#
# Note that the new interface does not support the validation and submission of lab assignment. It is currently under [active development on GitHub](https://github.com/jupyterlab/jupyterlab), so be prepared to see [bugs](https://en.wikipedia.org/wiki/Software_bug).
# You may use the [visual debugger](https://github.com/jupyterlab/debugger) in JupyterLab to debug a jupyter notebook. To do you, you should open the notebook and choose XPython as the kernel.
# Both the notebook/lab interface is extensible. For the notebook interface, you can enable extensions from the [nbextensions page](https://ltjh.cs.cityu.edu.hk/user-redirect/nbextensions).
# ### Visual Studio Code
# For a complete IDE experience, you can open VS Code as follows:
# - [ltjh.cs.cityu.edu.hk/user-redirect/vscode/](https://ltjh.cs.cityu.edu.hk/user-redirect/vscode/)
# - In classic notebook interface: `File` tab -> `New` menu -> `VS Code` menu item.
# - In JupyterLab interface: `File` menu -> `New Launcher` menu item -> `VS Code` icon
|
ccha23/CS1302ICP
|
_build/jupyter_execute/Lab8/Information Theory.py
|
<gh_stars>1-10
#!/usr/bin/env python
# coding: utf-8
# # Information Theory
# **CS1302 Introduction to Computer Programming**
# ___
# As mentioned in previous lectures, the following two lists `coin_flips` and `dice_rolls` simulate the random coin flips and rollings of a dice:
# In[ ]:
# Do NOT modify any variables defined here because some tests rely on them
import random
random.seed(0) # for reproducible results.
num_trials = 200
coin_flips = ['H' if random.random() <= 1/2 else 'T' for i in range(num_trials)]
dice_rolls = [random.randint(1,6) for i in range(num_trials)]
print('coin flips: ',*coin_flips)
print('dice rolls: ',*dice_rolls)
# **What is the difference of the two random processes?
# Can we say one process has more information content than the other?**
# In this Lab, we will use dictionaries to store their distributions and then compute their information content using information theory, which was introduced by *<NAME>*. It has [numerous applications](https://www.khanacademy.org/computing/computer-science/informationtheory):
# - *compression* (to keep files small)
# - *communications* (to send data mobile phones), and
# - *machine learning* (to identify relevant features to learn from).
# ## Entropy
# Mathematically, we denote a distribution as $\mathbf{p}=[p_i]_{i\in \mathcal{S}}$, where
# - $\mathcal{S}$ is the set of distinct outcomes, and
# - $p_i$ denotes the probability (chance) of seeing outcome $i$.
# The following code shown in the lecture uses a dictionary to store the distribution for a sequence efficiently without storing outcomes with zero counts:
# In[ ]:
# Do NOT modify any variables defined here because some tests rely on them
def distribute(seq):
'''Returns a dictionary where each value in a key-value pair is
the probability of the associated key occuring in the sequence.
'''
p = {}
for i in seq:
p[i] = p.get(i,0) + 1/len(seq)
return p
# tests
coin_flips_dist = distribute(coin_flips)
dice_rolls_dist = distribute(dice_rolls)
print('Distribution of coin flips:', coin_flips_dist)
print('Distribution of dice rolls:', dice_rolls_dist)
# For $\mathbf{p}$ to be a valid distribution, the probabilities $p_i$'s have to sum to $1$, i.e.,
#
# $$\sum_{i\in \mathcal{S}} p_i = 1, $$
# which can be verified as follows:
# In[ ]:
import math
assert math.isclose(sum(coin_flips_dist.values()),1) and math.isclose(sum(dice_rolls_dist.values()),1)
# **How to measure the information content?**
# In[ ]:
get_ipython().run_cell_magic('html', '', '<iframe width="912" height="513" src="https://www.youtube.com/embed/2s3aJfRr9gE" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>')
# In information theory, the information content of a distribution is measured by its [*entropy*](https://en.wikipedia.org/wiki/Entropy_(information_theory)) defined as:
#
# $$ \begin{aligned} H(\mathbf{p}) &:= \sum_{i\in \mathcal{S}} p_i \overbrace{\log_2 \tfrac{1}{p_i}}^{\text{called surprise} } \\ &= - \sum_{i\in \mathcal{S}} p_i \log_2 p_i \kern1em \text{(bits)} \end{aligned} $$
# with $p_i \log_2 \frac{1}{p_i} = 0$ if $p_i = 0$ because $\lim_{x\downarrow 0} x \log_2 \frac1x = 0$.
# For instance, if $\mathbf{p}=(p_{H},p_{T})=(0.5,0.5)$, then
#
# $$\begin{aligned} H(\mathbf{p}) &= 0.5 \log_2 \frac{1}{0.5} + 0.5 \log_2 \frac{1}{0.5} \\ &= 0.5 + 0.5 = 1 \text{ bit,}\end{aligned} $$
# i.e., an outcome of a fair coin flip has one bit of information content, as expected.
# On the other hand, if $\mathbf{p}=(p_{H},p_{T})=(1,0)$, then
# $$\begin{aligned} H(\mathbf{p}) &= 1 \log_2 \frac{1}{1} + 0 \log_2 \frac{1}{0} \\ &= 0 + 0 = 0 \text{ bits,}\end{aligned} $$
# i.e., an outcome of a biased coin flip that always comes up head has no information content, again as expected.
# **Exercise** Define a function `entropy` that
# - takes a distribution $\mathbf{p}$ as its argument, and
# - returns the entropy $H(\mathbf{p})$.
#
# Handle the case when $p_i=0$, e.g., using the short-circuit evaluation of logical `and`.
# In[ ]:
def entropy(dist):
# YOUR CODE HERE
raise NotImplementedError()
# In[ ]:
# tests
assert math.isclose(entropy({'H': 0.5, 'T': 0.5}), 1)
assert math.isclose(entropy({'H': 1, 'T': 0}), 0)
assert math.isclose(entropy(dict.fromkeys(range(1, 7), 1 / 6)), math.log2(6))
# ## Uniform distribution maximizes entropy
# Intuitively,
# - for large enough numbers of fair coin flips, we should have $\mathcal{S}=\{H,T\}$ and $p_H=p_T=0.5$, i.e., equal chance for head and tail.
# - for large enough numbers of fair dice rolls, we should have $p_i=\frac16$ for all $i\in \mathcal{S}=\{1,2,3,4,5,6\}$.
# In[ ]:
import matplotlib.pyplot as plt
def plot_distribution(seq):
dist = distribute(seq)
plt.stem(dist.keys(), # set-like view of the keys
dist.values(), # view of the values
use_line_collection=True)
plt.xlabel('Outcomes')
plt.title('Distribution')
plt.ylim(0, 1)
import ipywidgets as widgets
n_widget = widgets.IntSlider(
value=1,
min=1,
max=num_trials,
step=1,
description='n:',
continuous_update=False,
)
widgets.interactive(lambda n: plot_distribution(coin_flips[:n]),n=n_widget)
# In[ ]:
widgets.interactive(lambda n: plot_distribution(dice_rolls[:n]),n=n_widget)
# A distribution is called a *uniform distribution* if all its distinct outcomes have the same probability of occuring, i.e.,
#
# $$ p_i = \frac{1}{|\mathcal{S}|}\kern1em \text{for all }i\in \mathcal{S}, $$
# where $|\mathcal{S}|$ is the mathematical notation to denote the size of the set $\mathcal{S}$.
# **Exercise** Define a function `uniform` that
# - takes a sequence of possibly duplicate outcomes, and
# - returns a uniform distribution on the distinct outcomes.
# In[ ]:
def uniform(outcomes):
'''Returns the uniform distribution (dict) over distinct items in outcomes.'''
# YOUR CODE HERE
raise NotImplementedError()
# In[ ]:
# tests
assert uniform('HT') == {'H': 0.5, 'T': 0.5}
assert uniform('HTH') == {'H': 0.5, 'T': 0.5}
fair_dice_dist = uniform(range(1, 7))
assert all(
math.isclose(fair_dice_dist[k], v) for k, v in {
1: 0.16666666666666666,
2: 0.16666666666666666,
3: 0.16666666666666666,
4: 0.16666666666666666,
5: 0.16666666666666666,
6: 0.16666666666666666
}.items())
# **What is the entropy for uniform distributions?**
# By definition,
#
# $$ \begin{aligned} H(\mathbf{p}) &:= \sum_{i\in \mathcal{S}} p_i \log_2 \tfrac{1}{p_i} \\ &= \sum_{i\in \mathcal{S}} \frac{1}{|\mathcal{S}|} \log_2 |\mathcal{S}| = \log_2 |\mathcal{S}| \kern1em \text{(bits)} \end{aligned} $$
#
# This reduces to the formula you learned in Lecture 1 and Lab 1 regarding the number of bits required to represent a set. This is the maximum possible entropy for a given finite set of possible outcomes.
# You can use this result to test whether you have implemented both `entropy` and `uniform` correctly:
# In[ ]:
assert all(math.isclose(entropy(uniform(range(n))), math.log2(n)) for n in range(1,100))
# ## Joint distribution and its entropy
# If we duplicate a sequence of outcomes multiple times, the total information content should remain unchanged, NOT doubled, because the duplicate contain the same information as the original. We will verify this fact by creating a [joint distribution](https://en.wikipedia.org/wiki/Joint_probability_distribution)
#
# $$\mathbf{p}=[p_{ij}]_{i\in \mathcal{S},j\in \mathcal{T}}$$
# - where $\mathcal{S}$ and $\mathcal{T}$ are sets of outcomes; and
# - $p_{ij}$ is the chance we see outcomes $i$ and $j$ simultaneously.
#
# The subscript $ij$ in $p_{ij}$ denotes a tuple $(i,j)$, NOT the multiplication $i\times j$. We also have
#
# $$\sum_{i\in \mathcal{S}} \sum_{j\in \mathcal{T}} p_{ij} = 1.$$
#
#
#
#
# **Exercise** Define a function `jointly_distribute` that
# - takes two sequences `seq1` and `seq2` of outcomes with the same length, and
# - returns the joint distribution represented as a dictionary where each key-value pair has the key being a tuple `(i,j)` associated with the probability $p_{ij}$ of seeing `(i,j)` in `zip(seq1,seq2)`.
# In[ ]:
def jointly_distribute(seq1, seq2):
'''Returns the joint distribution of the tuple (i,j) of outcomes from zip(seq1,seq2).'''
# YOUR CODE HERE
raise NotImplementedError()
# In[ ]:
# tests
assert jointly_distribute('HT', 'HT') == {('H', 'H'): 0.5, ('T', 'T'): 0.5}
assert jointly_distribute('HHTT', 'HTHT') == {
('H', 'H'): 0.25,
('H', 'T'): 0.25,
('T', 'H'): 0.25,
('T', 'T'): 0.25
}
coin_flips_duplicate_dist = {
('T', 'T'): 0.5350000000000004,
('H', 'H'): 0.4650000000000003
}
coin_flips_duplicate_ans = jointly_distribute(coin_flips, coin_flips)
assert all(
math.isclose(coin_flips_duplicate_ans[i], pi)
for i, pi in coin_flips_duplicate_dist.items())
# If you have implemented `entropy` and `jointly_distribute` correctly, you can verify that duplicating a sequence will give the same entropy.
# In[ ]:
assert math.isclose(entropy(jointly_distribute(coin_flips,coin_flips)), entropy(distribute(coin_flips)))
assert math.isclose(entropy(jointly_distribute(dice_rolls,dice_rolls)), entropy(distribute(dice_rolls)))
# However, for two sequences generated independently, the joint entropy is roughly the sum of the individual entropies.
# In[ ]:
coin_flips_entropy = entropy(distribute(coin_flips))
dice_rolls_entropy = entropy(distribute(dice_rolls))
cf_dr_entropy = entropy(jointly_distribute(coin_flips, dice_rolls))
print(f'''Entropy of coin flip: {coin_flips_entropy}
Entropy of dice roll: {dice_rolls_entropy}
Sum of the above entropies: {coin_flips_entropy + dice_rolls_entropy}
Joint entropy: {cf_dr_entropy}''')
# ## Conditional distribution and entropy
# Mathematically, we denote a [conditional distribution](https://en.wikipedia.org/wiki/Conditional_probability_distribution) as $\mathbf{q}:=[q_{j|i}]_{i\in \mathcal{S}, j\in \mathcal{T}}$, where
# - $\mathcal{S}$ and $\mathcal{T}$ are two sets of distinct outcomes, and
# - $q_{j|i}$ denotes the probability (chance) of seeing outcome $j$ given the condition that outcome $i$ is observed.
#
# For $\mathbf{q}$ to be a valid distribution, the probabilities $q_{j|i}$'s have to sum to $1$ for every $i$, i.e.,
#
# $$\sum_{j\in \mathcal{T}} q_{j|i} = 1 \kern1em \text{for all }i\in \mathcal{S} $$
# For example, suppose we want to compute the distribution of coin flips given dice rolls, then the following assign `q_H_1` and `q_T_1` to the values $q_{H|1}$ and $q_{T|1}$ respectively:
# In[ ]:
coin_flips_1 = [j for i, j in zip(dice_rolls, coin_flips) if i == 1]
q_H_1 = coin_flips_1.count('H') / len(coin_flips_1)
q_T_1 = coin_flips_1.count('T') / len(coin_flips_1)
print('Coin flips given dice roll is 1:', coin_flips_1)
print('Distribution of coin flip given dice roll is 1: {{ "H": {}, "T": {}}}'.format(q_H_1, q_T_1))
assert math.isclose(q_H_1 + q_T_1, 1)
# Note that `q_H_1 + q_T_1` is 1 as expected. Similarly, we can assign `q_H_2` and `q_T_2` to the values $q_{H|2}$ and $q_{T|2}$ respectively.
# In[ ]:
coin_flips_2 = [j for i, j in zip(dice_rolls, coin_flips) if i == 2]
q_H_2 = coin_flips_2.count('H') / len(coin_flips_2)
q_T_2 = coin_flips_2.count('T') / len(coin_flips_2)
print('Coin flips given dice roll is 2:', coin_flips_2)
print('Distribution of coin flip given dice roll is 2: {{ "H": {}, "T": {}}}'.format(q_H_2, q_T_2))
assert math.isclose(q_H_2 + q_T_2, 1)
# Finally, we want to store the conditional distribution as a nested dictionary so that `q[i]` points to the distribution
#
# $$[q_{j|i}]_{j\in \mathcal{T}} \kern1em \text{for }i\in \mathcal{S}.$$
# In[ ]:
q = {}
q[1] = dict(zip('HT',(q_H_1, q_T_1)))
q[2] = dict(zip('HT',(q_H_2, q_T_2)))
q
# Of course, the above dictionary is missing the entries for other possible outcomes of the dice rolls.
# **Exercise** Define a function `conditionally_distribute` that
# - takes two sequences `seq1` and `seq2` of outcomes of the same length, and
# - returns the conditional distribution of `seq2` given `seq1` in the form of a nested dictionary efficiently without storing the unobserved outcomes.
#
# In the above example, `seq1` is `dice_rolls` while `seq2` is `coin_flips`.
#
# *Hint:* For an efficient implementation without traversing the input sequences too many times, consider using the following solution template and the `setdefault` method.
# ```Python
# def conditionally_distribute(seq1, seq2):
# q, count = {}, {} # NOT q = count = {}
# for i in seq1:
# count[i] = count.get(i, 0) + 1
# for i, j in zip(seq1, seq2):
# q[i][j] = ____________________________________________________
# return q
# ```
# In[ ]:
def conditionally_distribute(seq1, seq2):
'''Returns the conditional distribution q of seq2 given seq1 such that
q[i] is a dictionary for observed outcome i in seq1 and
q[i][j] is the probability of observing j in seq2 given the
corresponding outcome in seq1 is i.'''
# YOUR CODE HERE
raise NotImplementedError()
# In[ ]:
# tests
cf_given_dr_dist = {
4: {
'T': 0.5588235294117647,
'H': 0.4411764705882353
},
1: {
'T': 0.46511627906976744,
'H': 0.5348837209302325
},
3: {
'H': 0.5135135135135135,
'T': 0.4864864864864865
},
6: {
'H': 0.5454545454545454,
'T': 0.45454545454545453
},
2: {
'T': 0.7586206896551724,
'H': 0.2413793103448276
},
5: {
'T': 0.5416666666666666,
'H': 0.4583333333333333
}
}
cf_given_dr_ans = conditionally_distribute(dice_rolls, coin_flips)
assert all(
math.isclose(cf_given_dr_ans[i][j], v)
for i, d in cf_given_dr_dist.items() for j, v in d.items())
# The [*conditional entropy*](https://en.wikipedia.org/wiki/Conditional_entropy) is defined for a conditional distribution $\mathbf{q}=[q_{j|i}]_{i\in \mathcal{S},j\in\mathcal{T}}$ and a distribution $\mathbf{p}=[p_i]_{i\in \mathcal{S}}$ as follows:
#
# $$ H(\mathbf{q}|\mathbf{p}) = \sum_{i\in \mathcal{S}} p_i \sum_{j\in \mathcal{T}} q_{j|i} \log_2 \frac{1}{q_{j|i}}, $$
# where, by convention,
# - the summand of the outer sum is 0 if $p_i=0$ (regardless of the values of $q_{j|i}$), and
# - the summand of the inner sum is 0 if $q_{j|i}=0$.
# **Exercise** Define a function `conditional_entropy` that
# - takes
# - a distribution p as its first argument,
# - a conditional distribution q as its second argument, and
# - returns the conditional entropy $H(\mathbf{q}|\mathbf{p})$.
#
# Handle the cases when $p_i=0$ and $q_{j|i}=0$ as well.
# In[ ]:
# YOUR CODE HERE
raise NotImplementedError()
# In[ ]:
# tests
cf_given_dr_dist = {
4: {
'T': 0.5588235294117647,
'H': 0.4411764705882353
},
1: {
'T': 0.46511627906976744,
'H': 0.5348837209302325
},
3: {
'H': 0.5135135135135135,
'T': 0.4864864864864865
},
6: {
'H': 0.5454545454545454,
'T': 0.45454545454545453
},
2: {
'T': 0.7586206896551724,
'H': 0.2413793103448276
},
5: {
'T': 0.5416666666666666,
'H': 0.4583333333333333
}
}
assert conditional_entropy({'H': 0.5, 'T': 0.5},{'H': {'H': 0.5, 'T': 0.5}, 'T': {'H': 0.5, 'T': 0.5}}) == 1
assert conditional_entropy({'H': 0, 'T': 1},{'H': {'H': 0.5, 'T': 0.5}, 'T': {'H': 0.5, 'T': 0.5}}) == 1
assert conditional_entropy({'H': 0.5, 'T': 0.5},{'H': {'H': 1, 'T': 0}, 'T': {'H': 0, 'T': 1}}) == 0
assert conditional_entropy({'H': 0.5, 'T': 0.5},{'H': {'H': 1, 'T': 0}, 'T': {'H': 0.5, 'T': 0.5}}) == 0.5
assert math.isclose(conditional_entropy(dice_rolls_dist, cf_given_dr_dist), 0.9664712793722372)
# The joint probability $p_{ij}$ over $i\in \mathcal{S}$ and $j\in \mathcal{T}$ can be calculated as follows
#
# $$p_{ij} = p_{i} q_{j|i}$$
# where $p_i$ is the probability of $i$ and $q_{j|i}$ is the conditional probability of $j$ given $i$.
# **Exercise** Define a function `joint_distribution` that
# - takes the distribution $p$ and conditional distribution $q$ as arguments, and
# - returns their joint distribution.
# In[ ]:
def joint_distribution(p,q):
# YOUR CODE HERE
raise NotImplementedError()
# In[ ]:
# tests
assert joint_distribution({'H': 0.5, 'T': 0.5},{'H': {'H': 0.5, 'T': 0.5}, 'T': {'H': 0.5, 'T': 0.5}}) == {('H', 'H'): 0.25, ('H', 'T'): 0.25, ('T', 'H'): 0.25, ('T', 'T'): 0.25}
assert joint_distribution({'H': 0, 'T': 1},{'H': {'H': 0.5, 'T': 0.5}, 'T': {'H': 0.5, 'T': 0.5}}) == {('H', 'H'): 0.0, ('H', 'T'): 0.0, ('T', 'H'): 0.5, ('T', 'T'): 0.5}
assert joint_distribution({'H': 0.5, 'T': 0.5},{'H': {'H': 1, 'T': 0}, 'T': {'H': 0, 'T': 1}}) == {('H', 'H'): 0.5, ('H', 'T'): 0.0, ('T', 'H'): 0.0, ('T', 'T'): 0.5}, {'H': 0.5, 'T': 0.5}
# Finally, a fundamental information identity, called the [*chain rule*](https://en.wikipedia.org/wiki/Conditional_entropy#Chain_rule), is that the joint entropy is equal to
#
# $$ H(\mathbf{p}) + H(\mathbf{q}|\mathbf{p})$$
# for any distribution $\mathbf{p}$ over outcome $i\in \mathcal{S}$ and conditional distribution $\mathbf{q}$ over outcome $j\in \mathcal{T}$ given outcome $i \in \mathcal{S}$.
# If you have implemented `jointly_distribute`, `conditionally_distribute`, `entropy`, and `conditional_entropy` correctly, we can verify the identity as follows.
# In[ ]:
def validate_chain_rule(seq1,seq2):
p = distribute(seq1)
q = conditionally_distribute(seq1,seq2)
pq = jointly_distribute(seq1,seq2)
H_pq = entropy(pq)
H_p = entropy(p)
H_q_p = conditional_entropy(p,q)
print(f'''Entropy of seq1: {H_p}
Conditional entropy of seq2 given seq1: {H_q_p}
Sum of the above entropies: {H_p + H_q_p}
Joint entropy: {H_pq}''')
assert math.isclose(H_pq,H_p + H_q_p)
# In[ ]:
validate_chain_rule(coin_flips,coin_flips)
# In[ ]:
validate_chain_rule(dice_rolls,coin_flips)
|
OmarSalah95/Lambda-Treasure-MUD-Automation
|
cpu.py
|
"""CPU functionality."""
import sys
import time
class CPU:
"""Main CPU class."""
def __init__(self):
"""Construct a new CPU."""
self.ram = [0] * 256
self.reg = [0] * 7 + [len(self.ram)-12]
# final register reserved for SP -- grows downward, and final 11 blocks are reserved for other uses
self.PC = 0
# will be bit-& operated on with the last the bits denoting LT, E, GT
self.FL = 0b00000000
# used to check for timer interrupts
self.time = time.time()
self.hint = ''
self.instructions = {
0b00000001: "HLT",
0b10000010: self.LDI,
0b01000111: self.PRN,
0b01000101: self.PUSH,
0b01000110: self.POP,
0b01010000: self.CALL,
0b00010001: self.RET,
0b10000100: self.ST,
0b00010011: self.IRET,
0b01010100: self.JMP,
0b01001000: self.PRA,
0b01010101: self.JEQ,
0b01010110: self.JNE,
# 2 params => 10, not ALU => 0, doesn't set PC => 0, identifier = 1000 because ???
0b10001000: self.ADDI,
0b10100010: self.MUL, # ALU ops start here
0b10100000: self.ADD,
0b10100111: self.CMP,
0b10101000: self.AND,
0b10101010: self.OR,
0b10101011: self.XOR,
0b01101001: self.NOT,
0b10101100: self.SHL,
0b10101101: self.SHR,
0b10100100: self.MOD
}
def ram_read(self, address):
"""
Reads a stored value at the given address in memory.
"""
return self.ram[address]
def ram_write(self, address, val):
"""
Stores a value into a block of memory at the given address.
"""
self.ram[address] = val
def load(self, filename):
"""Load a program into memory."""
address = 0
with open(filename) as f:
for line in f:
n = line.split('#') # ignore everything to right of a comment
n[0] = n[0].strip() # remove all whitespace
if n[0] == '': # ignore blank or comment-only lines
continue
# cast the binary command string to an integer
val = int(n[0], 2)
# store it at the current address in memory
self.ram[address] = val
address += 1
def ALU(self, op, reg_a, reg_b=None):
"""ALU operations."""
val_a = self.reg[reg_a]
if reg_b is not None:
val_b = self.reg[reg_b]
if op == "ADD":
self.reg[reg_a] += val_b
elif op == "MUL":
self.reg[reg_a] *= val_b
elif op == "CMP":
if val_a < val_b:
self.FL = self.FL | 0b00000100
elif val_a == val_b:
self.FL = self.FL | 0b00000010
elif val_a > val_b:
self.FL = self.FL | 0b00000001
elif op == "AND":
self.reg[reg_a] = val_a & val_b
elif op == "OR":
self.reg[reg_a] = val_a | val_b
elif op == "XOR":
self.reg[reg_a] = val_a ^ val_b
elif op == "NOT":
self.reg[reg_a] = 255 - val_a
elif op == "SHL":
self.reg[reg_a] = val_a << val_b
elif op == "SHR":
self.reg[reg_a] = val_a >> val_b
elif op == "MOD":
if val_b == 0:
print("Warning: MOD operation attempted with % 0.")
sys.exit(1)
self.reg[reg_a] = val_a % val_b
else:
raise Exception("Unsupported ALU operation")
def trace(self):
"""
Handy function to print out the CPU state. You might want to call this
from run() if you need help debugging.
"""
print(f"TRACE: %02X | %02X %02X %02X |" % (
self.PC,
self.FL,
# self.ie,
self.ram_read(self.PC),
self.ram_read(self.PC + 1),
self.ram_read(self.PC + 2)
), end='')
for i in range(8):
print(" %02X" % self.reg[i], end='')
print()
def run(self):
"""
Run the CPU.
Checks each second for an interrupt flag.
"""
IS = 6
while True:
# fetch corresponding command from an instruction list instead of using large if/else block
new_time = time.time()
if new_time - self.time >= 1:
# at least one second has passed since self.time was last set
# trigger timer by setting the Interrupt Status from 0 to 1
self.reg[IS] = 1
# set new time for next 1-sec increment
self.time = new_time
if self.reg[IS] >= 1: # key interrupts enabled
self._interrupts_enabled()
ir = self.ram[self.PC]
if ir in self.instructions and self.instructions[ir] == "HLT":
break
elif ir in self.instructions:
self.instructions[ir]()
else:
print(f"Unknown command at PC index {self.PC}")
self.trace()
sys.exit(1)
def _interrupts_enabled(self):
"""
Uses masking and bitshifting to find out which interrupt was triggered. Pushes all
relevant CPU state onto the stack until interrupt loop is complete.
"""
# Storing Interrupt Mask and Interrupt Status register indexes
IM = 5
IS = 6
# Mask out all interrupts we aren't interested in
masked_interrupts = self.reg[IM] & self.reg[IS]
for i in range(8):
# each bit checked to see if one of the 8 interrupts happend
interrupt_happened = ((masked_interrupts >> i) & 1) == 1
if interrupt_happened:
# clear bit in IS
self.reg[IS] = 0
# PC register pushed on the stack
self.PUSH(self.PC)
# FL register pushed on the stack
self.PUSH(self.FL)
# The address of the appropriate handler looked up from interrupt table
# Should be for 0 (Timer interrupt)
# i will be zero when IS set to 000000001, other values would be different bits => different interrupt vector
handler_address = self.ram_read(0xF8 + i)
# Registers R0-R6 pushed on the stack in that order
for j in range(0, 7):
self.PUSH(self.reg[j])
# Set the PC to the handler address
self.PC = handler_address
# Disable further interrupt checks until Interrupt Return has occurred
break
def IRET(self):
"""
Returns from interrupt loop, retrieves all CPU state from before interrupt began.
"""
# Registers R6-R0 popped from stack in that order
for i in range(6, -1, -1):
reg_val = self.POP(return_val=True)
self.reg[i] = reg_val
# FL register popped off the stack
self.FL = self.POP(return_val=True)
# return address popped off the stack and stored in PC
return_address = self.POP(return_val=True)
self.PC = return_address
def LDI(self):
"""
Loads a value into a specific address in registry.
"""
reg_address = self.ram_read(self.PC + 1)
reg_value = self.ram_read(self.PC + 2)
self.reg[reg_address] = reg_value
self.PC += 3
def PRN(self):
"""
Prints the value stored at the specific address in registry.
"""
reg_address = self.ram_read(self.PC + 1)
print(f"{self.reg[reg_address]}")
self.PC += 2
def PUSH(self, val=None):
"""
Pushes a value onto the allocated portion of memory for the stack.
Grows downward from the top of memory as values are added.
If passed a value as a parameter, pushes that onto the stack instead
of reading from the next line of instruction.
"""
sp = self.reg[7] # Stack Pointer is held in reserved R07
if val is not None: # check if PUSH is being used internally for other functions
self.ram_write(sp-1, val)
else:
# grab next instruction for register address containing value
reg_address = self.ram_read(self.PC + 1)
reg_val = self.reg[reg_address]
# store value in the next available slot in RAM apportioned to the stack (lower in memory)
self.ram_write(sp-1, reg_val)
# increment PC and decrement SP accordingly
self.PC += 2
# either way sp gets decremented
self.reg[7] = sp - 1
def POP(self, return_val=False):
"""
If a return value is requested (internal use in other functions),
removes latest item from the stack in memory and returns it.
Otherwise, pops item from stack and sets to registry address
from next line of instruction.
"""
sp = self.reg[7]
if return_val is True: # will have a value passed into POP() if ran from int_ret
popped_val = self.ram_read(sp)
self.reg[7] = sp + 1
return popped_val
else:
# grab next instruction for address that will contain the popped value
reg_address = self.ram_read(self.PC + 1)
# Grab the value at the current Stack Pointer address in memory
popped_val = self.ram_read(sp)
# Add popped_val to the specified register address
self.reg[reg_address] = popped_val
# Move lower in the stack (higher in memory)
self.reg[7] = sp + 1
# Increment PC accordingly
self.PC += 2
def CALL(self):
"""
Stores return address in stack and sets PC to address specified in instruction.
"""
# PUSH return address to the stack
return_address = self.PC + 2
self.PUSH(return_address)
# Set the PC to the value in the register
reg_val = self.ram_read(self.PC + 1)
sub_address = self.reg[reg_val]
self.PC = sub_address
def RET(self):
"""
Pops return address added in CALL() from the stack and sets the PC back to it.
"""
# POP the return address off the stack
return_address = self.POP(return_val=True)
# store in the PC so the CPU knows which instruction to pick up at
self.PC = return_address
def ST(self):
"""
Using two register addresses from instruction, stores a value
at a specific memory address.
"""
reg_a = self.ram_read(self.PC + 1)
reg_b = self.ram_read(self.PC + 2)
target_address = self.reg[reg_a]
target_val = self.reg[reg_b]
self.ram_write(target_address, target_val)
self.PC += 3
def PRA(self):
"""
Prints the alphanumeric character of an ASCII number at the given registry address.
"""
reg_address = self.ram_read(self.PC + 1)
ascii_num = self.reg[reg_address]
self.hint = self.hint + chr(ascii_num)
self.PC += 2
def ADDI(self):
"""
Adds an immediate value to a register value.
"""
reg_address = self.ram_read(self.PC + 1)
immediate = self.ram_read(self.PC + 2)
self.reg[reg_address] += immediate
self.PC += 3
def JMP(self):
"""
Sets the PC to the given jump address.
"""
jump_address = self.ram_read(self.PC + 1)
self.PC = self.reg[jump_address]
def JEQ(self):
"""
If equal flag is set to true, jump to address stored in given register
"""
if (self.FL & 0b00000010) >> 1 == 1:
jump_address = self.ram_read(self.PC + 1)
self.PC = self.reg[jump_address]
else:
self.PC += 2
def JNE(self):
"""
If equal flag is clear, jump to the address stored in given register
"""
if (self.FL & 0b00000010) >> 1 == 0:
jump_address = self.ram_read(self.PC + 1)
self.PC = self.reg[jump_address]
else:
self.PC += 2
# ALU functions start here
def MUL(self):
"""
ALU is passed the next two inputs (register addresses)
and multiplies the values stored there.
Stores the result in the first register address.
"""
reg_a = self.ram_read(self.PC + 1)
reg_b = self.ram_read(self.PC + 2)
self.ALU('MUL', reg_a, reg_b)
self.PC += 3
def ADD(self):
"""
ALU is passed two register addresses and stores
their sum at the first address.
"""
reg_a = self.ram_read(self.PC + 1)
reg_b = self.ram_read(self.PC + 2)
self.ALU('ADD', reg_a, reg_b)
self.PC += 3
def CMP(self):
"""
ALU is passed two register address and stores whether registerA
is less than, equal to, or greater than register B in the FL flag.
"""
reg_a = self.ram_read(self.PC + 1)
reg_b = self.ram_read(self.PC + 2)
self.ALU('CMP', reg_a, reg_b)
self.PC += 3
def AND(self):
reg_a = self.ram_read(self.PC + 1)
reg_b = self.ram_read(self.PC + 2)
self.ALU('AND', reg_a, reg_b)
self.PC += 3
def OR(self):
reg_a = self.ram_read(self.PC + 1)
reg_b = self.ram_read(self.PC + 2)
self.ALU('OR', reg_a, reg_b)
self.PC += 3
def XOR(self):
reg_a = self.ram_read(self.PC + 1)
reg_b = self.ram_read(self.PC + 2)
self.ALU('XOR', reg_a, reg_b)
self.PC += 3
def NOT(self):
reg_a = self.ram_read(self.PC + 1)
self.ALU('NOT', reg_a)
self.PC += 2
def SHL(self):
reg_a = self.ram_read(self.PC + 1)
reg_b = self.ram_read(self.PC + 2)
self.ALU('SHL', reg_a, reg_b)
self.PC += 3
def SHR(self):
reg_a = self.ram_read(self.PC + 1)
reg_b = self.ram_read(self.PC + 2)
self.ALU('SHR', reg_a, reg_b)
self.PC += 3
def MOD(self):
reg_a = self.ram_read(self.PC + 1)
reg_b = self.ram_read(self.PC + 2)
self.ALU('MOD', reg_a, reg_b)
self.PC += 3
|
OmarSalah95/Lambda-Treasure-MUD-Automation
|
adv.py
|
<reponame>OmarSalah95/Lambda-Treasure-MUD-Automation
import requests
import json
import random
import time
from player import Player
from api import url, key, opposite, Queue
player = Player()
def get_name(name):
"""
Using the most recent locations of treasure, the player travels to those rooms
and picks up items until inventory is full, then will automatically travel back to the shop and sell them.
Once having sold 1000g worth of items at the shop, player will go to Pirate Ry's and
purchase the specified name for themselves (needed for mining Lambda coins).
"""
# Make list of treasure rooms
treasure_rooms = []
for k, v in player.map.items():
if len(v["items"]) > 0:
treasure_rooms.append(k)
treasure_rooms[len(treasure_rooms)//2:]
print("The following rooms have treasure:", treasure_rooms)
while player.gold < 1000: # This is automatically updated, otherwise have to check server
while player.encumbrance < player.strength:
# find room with treasure
# go there
print
current_treasure_room = treasure_rooms[0]
travel_to_target(int(current_treasure_room))
# pick up treasure
# while there are still items to pick up:
# while len(player.map[str(player.current_room["room_id"])]["items"]) > 0:
player.pick_up_loot("tiny treasure")
# update map entry for room to reflect taken treasure
player.map[current_treasure_room]["items"] = []
player._write_file('map.txt', player.map)
treasure_rooms = treasure_rooms[1:]
# If all treasure in map has been taken, go straight to shop
if len(treasure_rooms) < 1:
break
# travel to shop
# sell all items in inventory
sell_loot()
player.check_self()
# travel to Pirate Ry's
travel_to_target(467)
# purchase name
player.buy_name(name)
def sell_loot():
"""
Travels to shop and sells all items in inventory.
"""
travel_to_target(1)
time.sleep(player.cooldown)
print('\nAll the items here in your bag shall be sold', player.inventory, "\n")
for item in player.inventory:
json = {"name": item}
r1 = requests.post(f"{url}/api/adv/sell/", headers={'Authorization': f"Token {key}",
"Content-Type": "application/json"}, json=json).json()
time.sleep(r1['cooldown'])
json['confirm'] = "yes"
r1_conf = requests.post(f"{url}/api/adv/sell/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json).json()
print(f"Clerk: {r1_conf['messages'][0]}")
print(f'{"*"*8} {r1_conf["messages"][1]} {"*"*8}\n')
player.cooldown = r1_conf['cooldown']
time.sleep(player.cooldown)
player.check_self()
def explore_random():
"""
Returns a random unexplored exit direction from the current room
"""
directions = player.current_room["exits"]
room_id = str(player.current_room["room_id"])
unexplored = [d for d in directions if player.graph[room_id][d] == '?']
return unexplored[random.randint(0, len(unexplored)-1)]
def dft_for_dead_end():
"""
Performs depth-first traversal to explore random unexplored paths until
finding a dead end (either no other exits at all, or no unexplored exits)
"""
while '?' in list(player.graph[str(player.current_room["room_id"])].values()):
# Grab direction that leads to unexplored exit
next_dir = explore_random()
# Travel there
player.travel(next_dir)
def generate_path(target):
"""
Performs BFS to find shortest path to target room. If "?" passed instead of target room id,
finds closest room with an unexplored exit.
Returns the first path to meet the specified criteria.
"""
# Create an empty queue and enqueue a PATH to the current room
q = Queue()
q.enqueue([("placeholder direction", str(player.current_room["room_id"]))])
# Create a Set to store visited rooms
visited = set()
while q.size() > 0:
p = q.dequeue()
last_room = p[-1]
last_room_id = str(last_room[1])
if last_room_id not in visited:
# Check if target among exits (either a "?" or specific ID)
for k, v in player.graph[last_room_id].items():
if str(v) == str(target):
# >>> IF YES, RETURN PATH (excluding starting room)
if target != "?":
p.append((k, v))
return p[1:]
# Else mark it as visited
visited.add(last_room_id)
# Then add a PATH to its neighbors to the back of the queue
for k, v in player.graph[last_room_id].items():
if v != '?':
path_copy = p.copy()
path_copy.append((k, v))
q.enqueue(path_copy)
def travel_to_target(target='?'):
"""
Runs a BFS to specific room or to nearest room with unexplored exit,
then moves through that path in order.
"""
# Edge cases
if player.current_room["room_id"] == target:
# already there, just return from function
return
if target != "?" and target < 0 or target > 999:
print(f"There is no room {target}... in either world. Try again.")
return
if target != "?" and str(target) not in player.graph:
# room not in graph, need to warp first
if 'warp' in player.abilities:
player.warp()
else:
print(f"Looks like your destination is in another dimension... but you don't have the warp ability yet!")
return
bfs_path = generate_path(target)
print(f"\nNew path to follow! {bfs_path}\n")
while bfs_path is not None and len(bfs_path) > 0:
# check if there are consecutive matching directions (dash opportunity)
if len(bfs_path) > 2 and bfs_path[0][0] == bfs_path[1][0] == bfs_path[2][0] and "dash" in player.abilities:
print("Power coils in your legs as you prepare to dash!")
dash_direction = bfs_path[0][0]
dash_room_ids = []
for move in bfs_path:
# only grab the consecutive same directions, not later in the path list
if move[0] == dash_direction:
dash_room_ids.append(str(move[1]))
else:
break
num_rooms = len(dash_room_ids)
string_ids = ",".join(dash_room_ids)
# if there are, submit dash request
player.dash(dash_direction, str(num_rooms), string_ids)
# update path to remove dashed rooms
bfs_path = bfs_path[num_rooms:]
# else, just move
else:
next_room = bfs_path.pop(0)
next_direction = next_room[0]
player.travel(next_direction)
def explore_maze():
"""
While the player's map has any room with unexplored exit, continue looping
through DFT until a dead end OR already fully-explored room is found,
then perform BFS to find shortest path to room with unexplored path and go there.
"""
f = 'dark_graph.txt' if player.world == 'dark' else 'graph.txt'
graph = open(f).read().rstrip()
while '?' in graph:
dft_for_dead_end()
travel_to_target()
print("Map complete!")
def acquire_powers():
"""
After maze has been generated, now go to shrines and acquire powers by praying.
Order of importance is dash -> flight -> everything else if available.
"""
if "dash" not in player.abilities:
shrine = 461
travel_to_target(shrine)
player.pray()
if "fly" not in player.abilities:
shrine = 22
travel_to_target(shrine)
player.pray()
if "carry" not in player.abilities:
shrine = 499
travel_to_target(shrine)
player.pray()
if "warp" not in player.abilities:
shrine = 374
travel_to_target(shrine)
player.pray()
print(f"Your Abilities are now: {player.abilities}")
def get_rich():
"""
If in light world (start), player will continuously loop getting Lambda coin locations
from the Wishing Well and going to that spot to mine them. Will also pick up any treasures along the way,
and if inventory becomes full, will go to the shop to sell them, maximizing gold and LC profit.
If in dark world, player will go to the dark Wishing Well, wait until a new snitch location has been revealed
(means somebody else just got the last one), go there, and loot it. Player will perform a set amount of checks
at the well and will go to the specified location regardless after reaching that count.
"""
while True:
if player.world == 'dark':
print(f"\n{player.name} currently has {player.snitches} snitches!")
if player.encumbrance >= player.strength:
sell_loot()
# travel to wishing well
travel_to_target(55 if player.world == 'light' else 555)
# examine it to get the new hint
new_room = player.examine('WELL')
if player.world == 'dark':
print('Waiting for new snitch location...')
head_start = player.examine('WELL')
count = 0
check_limit = 100
while head_start == new_room and count < check_limit:
head_start = player.examine('WELL')
count += 1
if count >= check_limit:
print(
"You can't wait here any longer. Go to the last known location!")
new_room = head_start
print(
f"Next {'coin can be mined' if player.world == 'light' else 'snitch can be found'} in room {new_room}\n")
if player.encumbrance >= player.strength:
sell_loot()
travel_to_target(int(new_room))
if player.world == 'light':
player.get_coin()
player.check_balance()
else:
# player automatically loots a golden snitch anytime they come across it, either
# from move or dash
time.sleep(player.cooldown)
player.check_self()
def get_leaderboard():
"""
Travels to location of the gold leaderboard and prints it out.
"""
travel_to_target(486)
player.examine('BOOK')
def transmogrify(item):
"""
Tosses an acquired item and one Lambda Coin into the transmog in return for random gear.
"""
travel_to_target(495)
player.transform_coin(item)
def print_map():
"""
Prints an approximation of the built map in the REPL.
"""
m = player.map
g = player.graph
row = [" "] * 100
border = ["#"] * 155
grid = [[' ' for i in range(31)] for j in range(100)]
for i in [0, 1, 98, 99]:
grid[i] = border.copy()
for r_id in m:
coords = m[r_id]['coordinates']
x = int(coords[1:3])-45
y = int(coords[-3:-1])
has_e = 'e' in m[r_id]['exits'] and g[r_id]['e'] != "?"
if has_e:
grid[y][x] = str(r_id).zfill(3) + "--"
else:
grid[y][x] = str(r_id).zfill(3) + " "
for line in grid:
print("".join(line))
if __name__ == '__main__':
running = True
command_list = {
"moveTo": {"call": player.travel, "arg_count": 1},
"loot": {"call": player.pick_up_loot, "arg_count": 1},
"drop": {"call": player.drop_loot, "arg_count": 1},
"checkSelf": {"call": player.check_self, "arg_count": 0},
"roomDeets": {"call": player.check_room, "arg_count": 0},
"mine": {"call": player.get_coin, "arg_count": 0},
"checkCoins": {"call": player.check_balance, "arg_count": 0},
"wear": {"call": player.wear, "arg_count": 1},
"pray": {"call": player.pray, "arg_count": 0},
"warp": {"call": player.warp, "arg_count": 0},
"examine": {"call": player.examine, "arg_count": 1},
"showMap": {"call": print_map, "arg_count": 0},
"buildMap": {"call": explore_maze, "arg_count": 0},
"travelTo": {"call": travel_to_target, "arg_count": 1},
"sellLoot": {"call": sell_loot, "arg_count": 0},
"getName": {"call": get_name, "arg_count": 1},
"transmogrify": {"call": transmogrify, "arg_count": 1},
"getPowers": {"call": acquire_powers, "arg_count": 0},
"getRich": {"call": get_rich, "arg_count": 0},
"getLeaderboard": {"call": get_leaderboard, "arg_count": 0},
}
while running:
user_data = input('Enter command: ').split(' ')
cmd = user_data[0]
args = user_data[1:]
for i, v in enumerate(args):
if v.isdigit():
args[i] = int(v)
if cmd == 'quit':
running = False
elif cmd not in command_list:
print("That Command is not part of our command list try again.")
else:
if command_list[cmd]["arg_count"] == 1:
command_list[cmd]['call'](
" ".join(args) if len(args) > 1 else args[0])
elif command_list[cmd]["arg_count"] == 0:
command_list[cmd]['call']()
|
OmarSalah95/Lambda-Treasure-MUD-Automation
|
api.py
|
url = "https://lambda-treasure-hunt.herokuapp.com"
key = open("api_key.txt", "r").read()
opposite = {"n": "s", "e": "w", "s": "n", "w": "e"}
class Queue():
def __init__(self):
self.queue = []
def enqueue(self, value):
self.queue.append(value)
def dequeue(self):
if self.size() > 0:
return self.queue.pop(0)
else:
return None
def size(self):
return len(self.queue)
def generate_path(target):
"""
Performs BFS to find shortest path to target room. If "?" passed instead of target room id,
finds closest room with an unexplored exit.
Returns the first path to meet the specified criteria.
"""
# Create an empty queue and enqueue a PATH to the current room
q = Queue()
q.enqueue([str(player.current_room["room_id"])])
# Create a Set to store visited rooms
v = set()
while q.size() > 0:
p = q.dequeue()
last_room = str(p[-1])
if last_room not in v:
# Check if target among exits (either a "?" or specific ID)
if target in list(player.graph[last_room].values()):
# >>> IF YES, RETURN PATH (excluding starting room)
if target != "?":
# final_dir = next(
# (k for k, v in player.graph[last_room].items() if str(v) == target), '?')
# final_dir ='?'
# for d in player.graph[last_room]:
# if player.graph[last_room][d] is target:
# final_dir=d
p.append(target)
print(p[1:])
return p[1:]
# Else mark it as visited
v.add(last_room)
# Then add a PATH to its neighbors to the back of the queue
for direction in player.graph[last_room]:
if player.graph[last_room][direction] != '?':
path_copy = p.copy()
path_copy.append(player.graph[last_room][direction])
q.enqueue(path_copy)
|
OmarSalah95/Lambda-Treasure-MUD-Automation
|
miner.py
|
from api import url, key
import requests, sys, random, hashlib, json, time
from uuid import uuid4
from timeit import default_timer as timer
def mine():
"""
Simple Proof of Work Algorithm
Stringify the block and look for a proof.
Loop through possibilities, checking each one against `valid_proof`
in an effort to find a number that is a valid proof
:return: A valid proof for the provided block
"""
data = requests.get(f"{url}/api/bc/last_proof/",
headers={'Authorization': f"Token {key}"}).json()
last_proof = data['proof']
difficulty = data["difficulty"]
time.sleep(data['cooldown'])
# previous_hash = hashlib.sha256(f'{last_proof}'.encode()).hexdigest()
start = timer()
print(f"Data for last proof: {data}")
print("Searching for next proof")
proof = 0
# TODO: Your code here
while not valid_proof(last_proof, proof, difficulty):
proof += 3126
print("Proof found: " + str(proof) + " in " + str(timer() - start))
json = {"proof": proof}
req = requests.post(f"{url}/api/bc/mine/ ",
headers={'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json)
print("Proof Submitted")
print(req.json())
return req.json()
def valid_proof(last_hash, proof, difficulty):
"""
Validates the Proof: Does hash(block_string, proof) contain 6
leading zeroes? Return true if the proof is valid
:param block_string: <string> The stringified block to use to
check in combination with `proof`
:param proof: <int?> The value that when combined with the
stringified previous block results in a hash that has the
correct number of leading zeroes.
:return: True if the resulting hash is a valid proof, False otherwise
"""
guess = f'{last_hash}{proof}'.encode()
guess_hash = hashlib.sha256(guess).hexdigest()
return guess_hash[:difficulty] == "0" * difficulty
|
OmarSalah95/Lambda-Treasure-MUD-Automation
|
player.py
|
from api import url, key, opposite
import requests
import json
import time
import os
from miner import mine
from cpu import *
class Player:
def __init__(self):
data = self._get_status()
time.sleep(data['cooldown'])
self.name = data['name']
self.cooldown = data['cooldown']
self.encumbrance = data['encumbrance']
self.strength = data['strength']
self.speed = data['speed']
self.gold = data['gold']
self.bodywear = data['bodywear']
self.footwear = data['footwear']
self.inventory = data['inventory']
self.abilities = data['abilities']
self.status = data['status']
self.has_mined = data['has_mined']
self.errors = data['errors']
self.messages = data['messages']
self.snitches = data['snitches'] if data['snitches'] else 0
self.current_room = self.check_room()
self.world = "dark" if self.current_room['room_id'] > 499 else "light"
self.map = self._read_file('map.txt')
self.graph = self._read_file('graph.txt')
def _get_status(self):
r = requests.post(f"{url}/api/adv/status/",
headers={'Authorization': f"Token {key}", "Content-Type": "application/json"})
return r.json()
def _read_file(self, filepath):
if self.world == 'dark':
filepath = 'dark_' + filepath
if not os.path.exists(filepath):
f = open(filepath, 'w+')
room = self.current_room
if 'graph' in filepath:
room = {room['room_id']: {d: '?' for d in room['exits']}}
self._write_file(filepath, {self.current_room['room_id']: room})
with open(filepath, 'r') as f:
data = json.load(f)
return data
def _write_file(self, filepath, data):
if self.world == 'dark' and 'dark' not in filepath:
filepath = 'dark_' + filepath
with open(filepath, 'w+') as outfile:
json.dump(data, outfile)
def check_room(self):
r = requests.get(f"{url}/api/adv/init/",
headers={'Authorization': f"Token {key}"})
data = r.json()
if 'players' in data:
del data['players']
return data
def check_self(self, cause=None):
data = self._get_status()
cleaned = {**data} # How cool is the spread operator!
cleaned['status'].append("Glasowyn's hands stand Empty and Effervescent, see them filled.") if len(
cleaned['status']) < 1 else None
cleaned["world"] = self.world
cut = ['has_mined', 'errors', ]
for k in cut:
del cleaned[k]
if cause == "item pick up":
ret = f" You are now held down by the weight of {cleaned['encumbrance']} Stones.\n Your Experience and equipment Grant you the ability to\n carry {cleaned['strength']} stones before you need to take longer rests.\n Your bag now carries {cleaned['inventory']}"
print(ret + f"\n Your ghost seems to have the space to carry an additional item if you would like" if "carry" in cleaned['abilities'] and len(
cleaned['status']) else ret)
else:
print('\n'+"*"*22+' '+"Your Current State"+' '+"*"*22)
for item in cleaned.items():
print(f"{item[0]}: {item[1]}")
print("*"*64+'\n')
self.name = data['name']
self.cooldown = data['cooldown']
self.encumbrance = data['encumbrance']
self.strength = data['strength']
self.speed = data['speed']
self.gold = data['gold']
self.bodywear = data['bodywear']
self.footwear = data['footwear']
self.inventory = data['inventory']
self.abilities = data['abilities']
self.status = data['status']
self.has_mined = data['has_mined']
self.errors = data['errors']
self.messages = data['messages']
self.snitches = data['snitches'] if data['snitches'] else 0
self.map = self._read_file('map.txt')
self.graph = self._read_file('graph.txt')
def dash(self, direction, num_rooms, room_ids):
if "dash" not in self.abilities:
print("Error! You can't dash yet!")
return
time.sleep(self.cooldown)
curr_id = self.current_room['room_id']
print("\n======================================")
print(f"Dashing {direction} from room {curr_id}...")
json = {"direction": direction,
"num_rooms": num_rooms, "next_room_ids": room_ids}
r = requests.post(f"{url}/api/adv/dash/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json)
next_room = r.json()
if 'players' in next_room:
del next_room['players']
next_id = next_room['room_id']
# update map with room info
self.map[next_id] = next_room
self._write_file('map.txt', self.map)
# change current room and update cooldown
self.current_room = next_room
self.cooldown = self.current_room['cooldown']
if self.world == 'dark' and 'golden snitch' in next_room['items']:
try:
self.pick_up_loot('golden snitch')
except:
print("Somebody already got that snitch!")
elif self.world == 'light' and len(next_room['items']):
for item in next_room['items']:
self.pick_up_loot(item)
for message in next_room['messages']:
print(f"{message}")
print(f"Now the player is in {self.current_room['room_id']}")
print(f"Cooldown before next action: {self.cooldown} seconds")
print("======================================\n")
def travel(self, direction, method="move"):
time.sleep(self.cooldown)
curr_id = self.current_room['room_id']
print("\n======================================")
if "fly" in self.abilities and self.map[str(curr_id)]['terrain'] in ['MOUNTAIN', 'NORMAL']:
method = "fly"
print(f"Flying {direction} from room {curr_id}...")
else:
print(f"Walking {direction} from room {curr_id}...")
if direction not in self.graph[str(curr_id)]:
print("Error! Not a valid direction from the current room")
else:
json = {"direction": direction}
if self.graph[str(curr_id)][direction] != "?":
json['next_room_id'] = str(self.graph[str(curr_id)][direction])
next_room = requests.post(f"{url}/api/adv/{method}/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json).json()
# change current room and update cooldown
self.current_room = next_room
self.cooldown = self.current_room['cooldown']
if self.world != 'dark':
# Code for looting any items in the room if the space is available
if len(next_room['items']) > 0 and self.encumbrance < self.strength:
for item in next_room['items']:
time.sleep(next_room['cooldown'])
self.pick_up_loot(item)
else:
if 'golden snitch' in next_room['items']:
self.pick_up_loot('golden snitch')
if 'players' in next_room:
del next_room['players']
next_id = next_room['room_id']
# add to graph and map, in addition to making graph connections
if str(next_id) not in self.graph:
print(f"New room! # {next_id}")
self.graph[str(next_id)] = {
e: '?' for e in next_room['exits']}
# make graph connections and update graph
self.graph[str(curr_id)][direction] = next_id
self.graph[str(next_id)][opposite[direction]] = curr_id
self._write_file('graph.txt', self.graph)
# update map with room info
self.map[next_id] = next_room
self._write_file('map.txt', self.map)
for message in next_room['messages']:
print(f"{message}")
print(f"Now the player is in {self.current_room['room_id']}")
print(f"Cooldown before next action: {self.cooldown} seconds")
if len(self.graph) < 500:
print(
f"Total number of rooms explored so far: {len(self.graph)}")
print("======================================\n")
def get_coin(self):
time.sleep(self.cooldown)
data = mine()
self.cooldown = data['cooldown']
if len(data['errors']) > 0:
self.get_coin()
def pick_up_loot(self, item):
print(f"Looting {item}")
json = {"name": item}
if self.encumbrance < self.strength:
time.sleep(self.cooldown)
req = requests.post(f"{url}/api/adv/take/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json).json()
self.cooldown = req['cooldown']
time.sleep(self.cooldown)
self.check_self("item pick up") if self.world == 'light' else print(' Success!\n '+req['messages'][0] if len(req['messages']) > 0 else print(
" Oh NO!\n just as quickly as you arrived, the Golden Snitch disappeared to the next room and out of grasp!"))
else:
if "carry" in self.abilities:
if len(self.status) != 0:
print(
"It seems your Bag is full and Glasowyn is already carring something!")
else:
req = requests.post(f"{url}/api/adv/carry/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json).json()
self.cooldown = req['cooldown']
print(req)
else:
print("Your Bag is full!")
def drop_loot(self, item):
time.sleep(self.cooldown)
json = {"name": item}
req = requests.post(f"{url}/api/adv/drop/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json).json()
time.sleep(req['cooldown'])
self.check_self()
def buy_name(self, name):
time.sleep(self.cooldown)
json = {"name": name}
req = requests.post(f"{url}/api/adv/change_name/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json).json()
print(req)
time.sleep(req['cooldown'])
json['confirm'] = "aye"
r1_conf = requests.post(f"{url}/api/adv/change_name/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json).json()
print(r1_conf)
time.sleep(r1_conf['cooldown'])
self.check_self()
def examine(self, item):
time.sleep(self.cooldown)
json = {"name": item}
req = requests.post(f"{url}/api/adv/examine/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json).json()
self.cooldown = req['cooldown']
if item == "WELL": # Examining well gives binary code to be deciphered for next coin location
if os.path.exists("hint.txt"):
os.remove("hint.txt")
desc = req['description']
instructions = desc.split('\n')
for line in instructions[2:]:
with open("hint.txt", "a") as f:
f.write(f"{line}\n")
cpu = CPU()
cpu.load('hint.txt')
cpu.run()
# clean up after itself and remove the hint file after used (new one will be made for future hints anyway)
if os.path.exists("hint.txt"):
os.remove("hint.txt")
# full message for light is "Mine your coin in room ###"
# but message for dark well is "Find your snitch in room ###"
limiter = 23 if self.world == 'light' else 24
return cpu.hint[limiter:]
else:
print(req['description'])
def pray(self):
time.sleep(self.cooldown)
req = requests.post(f"{url}/api/adv/pray/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}).json()
print(req)
time.sleep(req['cooldown'])
self.check_self()
def wear(self, item):
time.sleep(self.cooldown)
json = {"name": item}
req = requests.post(f"{url}/api/adv/wear/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json).json()
self.cooldown = req['cooldown']
time.sleep(self.cooldown)
self.check_self()
def check_balance(self):
time.sleep(self.cooldown)
req = requests.get(f"{url}/api/bc/get_balance/", headers={
'Authorization': f"Token {key}"}).json()
self.coins = float(req['messages'][0].split(' ')[5])
self.cooldown = req['cooldown']
print(f"\n{req['messages'][0]}\n")
def transform_coin(self, item):
time.sleep(self.cooldown)
self.check_balance()
json = {"name": item}
if self.coins > 0 and item in self.inventory:
time.sleep(self.cooldown)
req = requests.post(f"{url}/api/adv/transmogrify/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}, json=json).json()
print(req)
self.cooldown = req['cooldown']
for item in req['items']:
self.pick_up_loot(item)
def warp(self):
if "warp" in self.abilities:
time.sleep(self.cooldown)
req = requests.post(f"{url}/api/adv/warp/", headers={
'Authorization': f"Token {key}", "Content-Type": "application/json"}).json()
print(req['messages'][0])
self.cooldown = req['cooldown']
if self.world == 'light':
self.world = 'dark'
else:
self.world = 'light'
self.current_room = req
time.sleep(self.cooldown)
self.check_self()
if req['room_id'] not in self.graph:
# Just warped to a previously unknown room, add it to graph and map
g = self.graph
g[req['room_id']] = {d: '?' for d in req['exits']}
self._write_file('graph.txt', g)
m = self.map
m[req['room_id']] = req
self._write_file('map.txt', m)
else:
print("You do not have the warp ability yet!")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.