text stringlengths 37 1.41M |
|---|
#dia de la semana por numero
numero = int(input(" ingresen el numero = "))
if numero >= 1 and numero <= 7:
if numero ==1:
print("lunes")
if numero ==2:
print("martes")
if numero ==3:
print("miercoles")
if numero ==4:
print("jueves")
if numero ==5:
print("viernes")
if numero ==6:
print("sabado")
if numero ==7:
print("domingo") |
import os
os.system('cls')
#P7E15
#Desarrolla un programa utilizando que nos sirva para gestionar
# nuestros contactos (la información a gestionar
# será el teléfono, nombre, apellido1, apellido2 y correo electrónico. El programa
# tendrá un menú, con las siguientes opciones: añadir contacto, consultar contacto
# a partir de la clave, consultar todos los contactos y eliminar contacto. Aprovecha
# lo que has aprendido hasta el momento (diccionarios, funciones, procedimientos…).
agenda={} #dict
funciones=["Añadir contacto", "Consultar contacto a partir de un nombre", "Consultar todos los contactos", "Eliminar un contacto", "Cerrar agenda"]
def añadirContacto(d):
nombre=input("Nombre del contacto: ")
apellido1=input("Primer apellido: ")
apellido2=input("Segundo apellido: ")
telefono=int(input("Telefono del contacto: "))
email=input("Email del contacto: ")
listaValores=[apellido1+" "+apellido2,telefono,email]
d[nombre] = listaValores
return d
def consultarContacto(d):
consultaNombre=input("Introduzca el nombre del contacto: ")
default="No existe"
print(d.get(consultaNombre, default))
def consultarAgenda(d):
# for k,v in d.items():
# print("%s %s" %(k,v))
for i in d.keys():
datos=""
lista=d.get(i)
for j in lista:
datos+=str(j)+" "
print(i,datos)
def eliminarContacto(d):
eliminarNombre=input("Introduzca contacto a eliminar: ")
print("¿Seguro que desea eliminar el contacto %s?" %(eliminarNombre))
check=input("Y/N? ")
if check=="Y" or check=="y":
d.pop(eliminarNombre)
print("%s se ha eliminado de la lista de contactos" %(eliminarNombre))
return d
#def programa(func):
# print("Bienvenido a su agenda, ¿qué desea hacer?")
# for i in func:
# print(i)
# accion=input("Elija una opción: ")
# return accion
print("¿Iniciar la agenda?", end="")
inicio=input("¿Y/N? ")
contador=1
while inicio!="N" and inicio!="n":
if contador==1:
print("Bienvenido, ¿qué desea hacer?:")
for i in funciones:
print(i)
print("--------------------------------------------")
accion=input()
if accion=="Añadir contacto":
añadirContacto(agenda)
contador+=1
print("--------------------------------------------")
elif accion=="Consultar contacto a partir de un nombre":
consultarContacto(agenda)
contador+=1
print("--------------------------------------------")
elif accion=="Consultar todos los contactos":
consultarAgenda(agenda)
contador+=1
print("--------------------------------------------")
elif accion=="Eliminar un contacto":
eliminarContacto(agenda)
contador+=1
print("--------------------------------------------")
elif accion=="Cerrar agenda":
inicio="N"
else:
contador+=1
print("!!!!!!")
print("Error, inténtelo de nuevo.")
print("--------------------------------------------")
print("Bye no vemos mañana")
|
import os
os.system('cls')
#P7E8
#Escribe un programa que pida una frase (entrada por teclado),
# y pase la frase como parámetro a una función que debe eliminar
# los espacios en blanco (compactar la frase). El programa principal
# imprimirá por pantalla el resultado final.
frase=input("Introduzca una frase: ")
espacio=" "
compacto=""
def f(a):
frasecompacta=a.replace (espacio, compacto) #puedes usar "" dentro del replace, no hace falta poner variables
return frasecompacta
print(f(frase))
|
"""
What is a state_dict in PyTorch
===============================
In PyTorch, the learnable parameters (i.e. weights and biases) of a
``torch.nn.Module`` model are contained in the model’s parameters
(accessed with ``model.parameters()``). A ``state_dict`` is simply a
Python dictionary object that maps each layer to its parameter tensor.
Introduction
------------
A ``state_dict`` is an integral entity if you are interested in saving
or loading models from PyTorch.
Because ``state_dict`` objects are Python dictionaries, they can be
easily saved, updated, altered, and restored, adding a great deal of
modularity to PyTorch models and optimizers.
Note that only layers with learnable parameters (convolutional layers,
linear layers, etc.) and registered buffers (batchnorm’s running_mean)
have entries in the model’s ``state_dict``. Optimizer objects
(``torch.optim``) also have a ``state_dict``, which contains information
about the optimizer’s state, as well as the hyperparameters used.
In this recipe, we will see how ``state_dict`` is used with a simple
model.
Setup
-----
Before we begin, we need to install ``torch`` if it isn’t already
available.
::
pip install torch
"""
######################################################################
# Steps
# -----
#
# 1. Import all necessary libraries for loading our data
# 2. Define and initialize the neural network
# 3. Initialize the optimizer
# 4. Access the model and optimizer ``state_dict``
#
# 1. Import necessary libraries for loading our data
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# For this recipe, we will use ``torch`` and its subsidiaries ``torch.nn``
# and ``torch.optim``.
#
import torch
import torch.nn as nn
import torch.optim as optim
######################################################################
# 2. Define and initialize the neural network
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# For sake of example, we will create a neural network for training
# images. To learn more see the Defining a Neural Network recipe.
#
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
print(net)
######################################################################
# 3. Initialize the optimizer
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# We will use SGD with momentum.
#
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
######################################################################
# 4. Access the model and optimizer ``state_dict``
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Now that we have constructed our model and optimizer, we can understand
# what is preserved in their respective ``state_dict`` properties.
#
# Print model's state_dict
print("Model's state_dict:")
for param_tensor in net.state_dict():
print(param_tensor, "\t", net.state_dict()[param_tensor].size())
print()
# Print optimizer's state_dict
print("Optimizer's state_dict:")
for var_name in optimizer.state_dict():
print(var_name, "\t", optimizer.state_dict()[var_name])
######################################################################
# This information is relevant for saving and loading the model and
# optimizers for future use.
#
# Congratulations! You have successfully used ``state_dict`` in PyTorch.
#
# Learn More
# ----------
#
# Take a look at these other recipes to continue your learning:
#
# - `Saving and loading models for inference in PyTorch <https://pytorch.org/tutorials/recipes/recipes/saving_and_loading_models_for_inference.html>`__
# - `Saving and loading a general checkpoint in PyTorch <https://pytorch.org/tutorials/recipes/recipes/saving_and_loading_a_general_checkpoint.html>`__
|
"""
Text classification with the torchtext library
==============================================
In this tutorial, we will show how to use the torchtext library to build the dataset for the text classification analysis. Users will have the flexibility to
- Access to the raw data as an iterator
- Build data processing pipeline to convert the raw text strings into ``torch.Tensor`` that can be used to train the model
- Shuffle and iterate the data with `torch.utils.data.DataLoader <https://pytorch.org/docs/stable/data.html?highlight=dataloader#torch.utils.data.DataLoader>`__
Prerequisites
~~~~~~~~~~~~~~~~
A recent 2.x version of the ``portalocker`` package needs to be installed prior to running the tutorial.
For example, in the Colab environment, this can be done by adding the following line at the top of the script:
.. code-block:: bash
!pip install -U portalocker>=2.0.0`
"""
######################################################################
# Access to the raw dataset iterators
# -----------------------------------
#
# The torchtext library provides a few raw dataset iterators, which yield the raw text strings. For example, the ``AG_NEWS`` dataset iterators yield the raw data as a tuple of label and text.
#
# To access torchtext datasets, please install torchdata following instructions at https://github.com/pytorch/data.
#
import torch
from torchtext.datasets import AG_NEWS
train_iter = iter(AG_NEWS(split="train"))
######################################################################
# ::
#
# next(train_iter)
# >>> (3, "Fears for T N pension after talks Unions representing workers at Turner
# Newall say they are 'disappointed' after talks with stricken parent firm Federal
# Mogul.")
#
# next(train_iter)
# >>> (4, "The Race is On: Second Private Team Sets Launch Date for Human
# Spaceflight (SPACE.com) SPACE.com - TORONTO, Canada -- A second\\team of
# rocketeers competing for the #36;10 million Ansari X Prize, a contest
# for\\privately funded suborbital space flight, has officially announced
# the first\\launch date for its manned rocket.")
#
# next(train_iter)
# >>> (4, 'Ky. Company Wins Grant to Study Peptides (AP) AP - A company founded
# by a chemistry researcher at the University of Louisville won a grant to develop
# a method of producing better peptides, which are short chains of amino acids, the
# building blocks of proteins.')
#
######################################################################
# Prepare data processing pipelines
# ---------------------------------
#
# We have revisited the very basic components of the torchtext library, including vocab, word vectors, tokenizer. Those are the basic data processing building blocks for raw text string.
#
# Here is an example for typical NLP data processing with tokenizer and vocabulary. The first step is to build a vocabulary with the raw training dataset. Here we use built in
# factory function `build_vocab_from_iterator` which accepts iterator that yield list or iterator of tokens. Users can also pass any special symbols to be added to the
# vocabulary.
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
tokenizer = get_tokenizer("basic_english")
train_iter = AG_NEWS(split="train")
def yield_tokens(data_iter):
for _, text in data_iter:
yield tokenizer(text)
vocab = build_vocab_from_iterator(yield_tokens(train_iter), specials=["<unk>"])
vocab.set_default_index(vocab["<unk>"])
######################################################################
# The vocabulary block converts a list of tokens into integers.
#
# ::
#
# vocab(['here', 'is', 'an', 'example'])
# >>> [475, 21, 30, 5297]
#
# Prepare the text processing pipeline with the tokenizer and vocabulary. The text and label pipelines will be used to process the raw data strings from the dataset iterators.
text_pipeline = lambda x: vocab(tokenizer(x))
label_pipeline = lambda x: int(x) - 1
######################################################################
# The text pipeline converts a text string into a list of integers based on the lookup table defined in the vocabulary. The label pipeline converts the label into integers. For example,
#
# ::
#
# text_pipeline('here is the an example')
# >>> [475, 21, 2, 30, 5297]
# label_pipeline('10')
# >>> 9
#
######################################################################
# Generate data batch and iterator
# --------------------------------
#
# `torch.utils.data.DataLoader <https://pytorch.org/docs/stable/data.html?highlight=dataloader#torch.utils.data.DataLoader>`__
# is recommended for PyTorch users (a tutorial is `here <https://pytorch.org/tutorials/beginner/data_loading_tutorial.html>`__).
# It works with a map-style dataset that implements the ``getitem()`` and ``len()`` protocols, and represents a map from indices/keys to data samples. It also works with an iterable dataset with the shuffle argument of ``False``.
#
# Before sending to the model, ``collate_fn`` function works on a batch of samples generated from ``DataLoader``. The input to ``collate_fn`` is a batch of data with the batch size in ``DataLoader``, and ``collate_fn`` processes them according to the data processing pipelines declared previously. Pay attention here and make sure that ``collate_fn`` is declared as a top level def. This ensures that the function is available in each worker.
#
# In this example, the text entries in the original data batch input are packed into a list and concatenated as a single tensor for the input of ``nn.EmbeddingBag``. The offset is a tensor of delimiters to represent the beginning index of the individual sequence in the text tensor. Label is a tensor saving the labels of individual text entries.
from torch.utils.data import DataLoader
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def collate_batch(batch):
label_list, text_list, offsets = [], [], [0]
for _label, _text in batch:
label_list.append(label_pipeline(_label))
processed_text = torch.tensor(text_pipeline(_text), dtype=torch.int64)
text_list.append(processed_text)
offsets.append(processed_text.size(0))
label_list = torch.tensor(label_list, dtype=torch.int64)
offsets = torch.tensor(offsets[:-1]).cumsum(dim=0)
text_list = torch.cat(text_list)
return label_list.to(device), text_list.to(device), offsets.to(device)
train_iter = AG_NEWS(split="train")
dataloader = DataLoader(
train_iter, batch_size=8, shuffle=False, collate_fn=collate_batch
)
######################################################################
# Define the model
# ----------------
#
# The model is composed of the `nn.EmbeddingBag <https://pytorch.org/docs/stable/nn.html?highlight=embeddingbag#torch.nn.EmbeddingBag>`__ layer plus a linear layer for the classification purpose. ``nn.EmbeddingBag`` with the default mode of "mean" computes the mean value of a “bag” of embeddings. Although the text entries here have different lengths, ``nn.EmbeddingBag`` module requires no padding here since the text lengths are saved in offsets.
#
# Additionally, since ``nn.EmbeddingBag`` accumulates the average across
# the embeddings on the fly, ``nn.EmbeddingBag`` can enhance the
# performance and memory efficiency to process a sequence of tensors.
#
# .. image:: ../_static/img/text_sentiment_ngrams_model.png
#
from torch import nn
class TextClassificationModel(nn.Module):
def __init__(self, vocab_size, embed_dim, num_class):
super(TextClassificationModel, self).__init__()
self.embedding = nn.EmbeddingBag(vocab_size, embed_dim, sparse=False)
self.fc = nn.Linear(embed_dim, num_class)
self.init_weights()
def init_weights(self):
initrange = 0.5
self.embedding.weight.data.uniform_(-initrange, initrange)
self.fc.weight.data.uniform_(-initrange, initrange)
self.fc.bias.data.zero_()
def forward(self, text, offsets):
embedded = self.embedding(text, offsets)
return self.fc(embedded)
######################################################################
# Initiate an instance
# --------------------
#
# The ``AG_NEWS`` dataset has four labels and therefore the number of classes is four.
#
# ::
#
# 1 : World
# 2 : Sports
# 3 : Business
# 4 : Sci/Tec
#
# We build a model with the embedding dimension of 64. The vocab size is equal to the length of the vocabulary instance. The number of classes is equal to the number of labels,
#
train_iter = AG_NEWS(split="train")
num_class = len(set([label for (label, text) in train_iter]))
vocab_size = len(vocab)
emsize = 64
model = TextClassificationModel(vocab_size, emsize, num_class).to(device)
######################################################################
# Define functions to train the model and evaluate results.
# ---------------------------------------------------------
#
import time
def train(dataloader):
model.train()
total_acc, total_count = 0, 0
log_interval = 500
start_time = time.time()
for idx, (label, text, offsets) in enumerate(dataloader):
optimizer.zero_grad()
predicted_label = model(text, offsets)
loss = criterion(predicted_label, label)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
optimizer.step()
total_acc += (predicted_label.argmax(1) == label).sum().item()
total_count += label.size(0)
if idx % log_interval == 0 and idx > 0:
elapsed = time.time() - start_time
print(
"| epoch {:3d} | {:5d}/{:5d} batches "
"| accuracy {:8.3f}".format(
epoch, idx, len(dataloader), total_acc / total_count
)
)
total_acc, total_count = 0, 0
start_time = time.time()
def evaluate(dataloader):
model.eval()
total_acc, total_count = 0, 0
with torch.no_grad():
for idx, (label, text, offsets) in enumerate(dataloader):
predicted_label = model(text, offsets)
loss = criterion(predicted_label, label)
total_acc += (predicted_label.argmax(1) == label).sum().item()
total_count += label.size(0)
return total_acc / total_count
######################################################################
# Split the dataset and run the model
# -----------------------------------
#
# Since the original ``AG_NEWS`` has no valid dataset, we split the training
# dataset into train/valid sets with a split ratio of 0.95 (train) and
# 0.05 (valid). Here we use
# `torch.utils.data.dataset.random_split <https://pytorch.org/docs/stable/data.html?highlight=random_split#torch.utils.data.random_split>`__
# function in PyTorch core library.
#
# `CrossEntropyLoss <https://pytorch.org/docs/stable/nn.html?highlight=crossentropyloss#torch.nn.CrossEntropyLoss>`__
# criterion combines ``nn.LogSoftmax()`` and ``nn.NLLLoss()`` in a single class.
# It is useful when training a classification problem with C classes.
# `SGD <https://pytorch.org/docs/stable/_modules/torch/optim/sgd.html>`__
# implements stochastic gradient descent method as the optimizer. The initial
# learning rate is set to 5.0.
# `StepLR <https://pytorch.org/docs/master/_modules/torch/optim/lr_scheduler.html#StepLR>`__
# is used here to adjust the learning rate through epochs.
#
from torch.utils.data.dataset import random_split
from torchtext.data.functional import to_map_style_dataset
# Hyperparameters
EPOCHS = 10 # epoch
LR = 5 # learning rate
BATCH_SIZE = 64 # batch size for training
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=LR)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.1)
total_accu = None
train_iter, test_iter = AG_NEWS()
train_dataset = to_map_style_dataset(train_iter)
test_dataset = to_map_style_dataset(test_iter)
num_train = int(len(train_dataset) * 0.95)
split_train_, split_valid_ = random_split(
train_dataset, [num_train, len(train_dataset) - num_train]
)
train_dataloader = DataLoader(
split_train_, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collate_batch
)
valid_dataloader = DataLoader(
split_valid_, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collate_batch
)
test_dataloader = DataLoader(
test_dataset, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collate_batch
)
for epoch in range(1, EPOCHS + 1):
epoch_start_time = time.time()
train(train_dataloader)
accu_val = evaluate(valid_dataloader)
if total_accu is not None and total_accu > accu_val:
scheduler.step()
else:
total_accu = accu_val
print("-" * 59)
print(
"| end of epoch {:3d} | time: {:5.2f}s | "
"valid accuracy {:8.3f} ".format(
epoch, time.time() - epoch_start_time, accu_val
)
)
print("-" * 59)
######################################################################
# Evaluate the model with test dataset
# ------------------------------------
#
######################################################################
# Checking the results of the test dataset…
print("Checking the results of test dataset.")
accu_test = evaluate(test_dataloader)
print("test accuracy {:8.3f}".format(accu_test))
######################################################################
# Test on a random news
# ---------------------
#
# Use the best model so far and test a golf news.
#
ag_news_label = {1: "World", 2: "Sports", 3: "Business", 4: "Sci/Tec"}
def predict(text, text_pipeline):
with torch.no_grad():
text = torch.tensor(text_pipeline(text))
output = model(text, torch.tensor([0]))
return output.argmax(1).item() + 1
ex_text_str = "MEMPHIS, Tenn. – Four days ago, Jon Rahm was \
enduring the season’s worst weather conditions on Sunday at The \
Open on his way to a closing 75 at Royal Portrush, which \
considering the wind and the rain was a respectable showing. \
Thursday’s first round at the WGC-FedEx St. Jude Invitational \
was another story. With temperatures in the mid-80s and hardly any \
wind, the Spaniard was 13 strokes better in a flawless round. \
Thanks to his best putting performance on the PGA Tour, Rahm \
finished with an 8-under 62 for a three-stroke lead, which \
was even more impressive considering he’d never played the \
front nine at TPC Southwind."
model = model.to("cpu")
print("This is a %s news" % ag_news_label[predict(ex_text_str, text_pipeline)])
|
# -*- coding: utf-8 -*-
"""
Spatial Transformer Networks Tutorial
=====================================
**Author**: `Ghassen HAMROUNI <https://github.com/GHamrouni>`_
.. figure:: /_static/img/stn/FSeq.png
In this tutorial, you will learn how to augment your network using
a visual attention mechanism called spatial transformer
networks. You can read more about the spatial transformer
networks in the `DeepMind paper <https://arxiv.org/abs/1506.02025>`__
Spatial transformer networks are a generalization of differentiable
attention to any spatial transformation. Spatial transformer networks
(STN for short) allow a neural network to learn how to perform spatial
transformations on the input image in order to enhance the geometric
invariance of the model.
For example, it can crop a region of interest, scale and correct
the orientation of an image. It can be a useful mechanism because CNNs
are not invariant to rotation and scale and more general affine
transformations.
One of the best things about STN is the ability to simply plug it into
any existing CNN with very little modification.
"""
# License: BSD
# Author: Ghassen Hamrouni
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
import numpy as np
plt.ion() # interactive mode
######################################################################
# Loading the data
# ----------------
#
# In this post we experiment with the classic MNIST dataset. Using a
# standard convolutional network augmented with a spatial transformer
# network.
from six.moves import urllib
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Training dataset
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(root='.', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])), batch_size=64, shuffle=True, num_workers=4)
# Test dataset
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(root='.', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])), batch_size=64, shuffle=True, num_workers=4)
######################################################################
# Depicting spatial transformer networks
# --------------------------------------
#
# Spatial transformer networks boils down to three main components :
#
# - The localization network is a regular CNN which regresses the
# transformation parameters. The transformation is never learned
# explicitly from this dataset, instead the network learns automatically
# the spatial transformations that enhances the global accuracy.
# - The grid generator generates a grid of coordinates in the input
# image corresponding to each pixel from the output image.
# - The sampler uses the parameters of the transformation and applies
# it to the input image.
#
# .. figure:: /_static/img/stn/stn-arch.png
#
# .. Note::
# We need the latest version of PyTorch that contains
# affine_grid and grid_sample modules.
#
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
# Spatial transformer localization-network
self.localization = nn.Sequential(
nn.Conv2d(1, 8, kernel_size=7),
nn.MaxPool2d(2, stride=2),
nn.ReLU(True),
nn.Conv2d(8, 10, kernel_size=5),
nn.MaxPool2d(2, stride=2),
nn.ReLU(True)
)
# Regressor for the 3 * 2 affine matrix
self.fc_loc = nn.Sequential(
nn.Linear(10 * 3 * 3, 32),
nn.ReLU(True),
nn.Linear(32, 3 * 2)
)
# Initialize the weights/bias with identity transformation
self.fc_loc[2].weight.data.zero_()
self.fc_loc[2].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float))
# Spatial transformer network forward function
def stn(self, x):
xs = self.localization(x)
xs = xs.view(-1, 10 * 3 * 3)
theta = self.fc_loc(xs)
theta = theta.view(-1, 2, 3)
grid = F.affine_grid(theta, x.size())
x = F.grid_sample(x, grid)
return x
def forward(self, x):
# transform the input
x = self.stn(x)
# Perform the usual forward pass
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
model = Net().to(device)
######################################################################
# Training the model
# ------------------
#
# Now, let's use the SGD algorithm to train the model. The network is
# learning the classification task in a supervised way. In the same time
# the model is learning STN automatically in an end-to-end fashion.
optimizer = optim.SGD(model.parameters(), lr=0.01)
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % 500 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
#
# A simple test procedure to measure the STN performances on MNIST.
#
def test():
with torch.no_grad():
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
# sum up batch loss
test_loss += F.nll_loss(output, target, size_average=False).item()
# get the index of the max log-probability
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'
.format(test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
######################################################################
# Visualizing the STN results
# ---------------------------
#
# Now, we will inspect the results of our learned visual attention
# mechanism.
#
# We define a small helper function in order to visualize the
# transformations while training.
def convert_image_np(inp):
"""Convert a Tensor to numpy image."""
inp = inp.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
return inp
# We want to visualize the output of the spatial transformers layer
# after the training, we visualize a batch of input images and
# the corresponding transformed batch using STN.
def visualize_stn():
with torch.no_grad():
# Get a batch of training data
data = next(iter(test_loader))[0].to(device)
input_tensor = data.cpu()
transformed_input_tensor = model.stn(data).cpu()
in_grid = convert_image_np(
torchvision.utils.make_grid(input_tensor))
out_grid = convert_image_np(
torchvision.utils.make_grid(transformed_input_tensor))
# Plot the results side-by-side
f, axarr = plt.subplots(1, 2)
axarr[0].imshow(in_grid)
axarr[0].set_title('Dataset Images')
axarr[1].imshow(out_grid)
axarr[1].set_title('Transformed Images')
for epoch in range(1, 20 + 1):
train(epoch)
test()
# Visualize the STN transformation on some input batch
visualize_stn()
plt.ioff()
plt.show()
|
"""
`Learn the Basics <intro.html>`_ ||
`Quickstart <quickstart_tutorial.html>`_ ||
`Tensors <tensorqs_tutorial.html>`_ ||
`Datasets & DataLoaders <data_tutorial.html>`_ ||
`Transforms <transforms_tutorial.html>`_ ||
`Build Model <buildmodel_tutorial.html>`_ ||
**Autograd** ||
`Optimization <optimization_tutorial.html>`_ ||
`Save & Load Model <saveloadrun_tutorial.html>`_
Automatic Differentiation with ``torch.autograd``
=======================================
When training neural networks, the most frequently used algorithm is
**back propagation**. In this algorithm, parameters (model weights) are
adjusted according to the **gradient** of the loss function with respect
to the given parameter.
To compute those gradients, PyTorch has a built-in differentiation engine
called ``torch.autograd``. It supports automatic computation of gradient for any
computational graph.
Consider the simplest one-layer neural network, with input ``x``,
parameters ``w`` and ``b``, and some loss function. It can be defined in
PyTorch in the following manner:
"""
import torch
x = torch.ones(5) # input tensor
y = torch.zeros(3) # expected output
w = torch.randn(5, 3, requires_grad=True)
b = torch.randn(3, requires_grad=True)
z = torch.matmul(x, w)+b
loss = torch.nn.functional.binary_cross_entropy_with_logits(z, y)
######################################################################
# Tensors, Functions and Computational graph
# ------------------------------------------
#
# This code defines the following **computational graph**:
#
# .. figure:: /_static/img/basics/comp-graph.png
# :alt:
#
# In this network, ``w`` and ``b`` are **parameters**, which we need to
# optimize. Thus, we need to be able to compute the gradients of loss
# function with respect to those variables. In order to do that, we set
# the ``requires_grad`` property of those tensors.
#######################################################################
# .. note:: You can set the value of ``requires_grad`` when creating a
# tensor, or later by using ``x.requires_grad_(True)`` method.
#######################################################################
# A function that we apply to tensors to construct computational graph is
# in fact an object of class ``Function``. This object knows how to
# compute the function in the *forward* direction, and also how to compute
# its derivative during the *backward propagation* step. A reference to
# the backward propagation function is stored in ``grad_fn`` property of a
# tensor. You can find more information of ``Function`` `in the
# documentation <https://pytorch.org/docs/stable/autograd.html#function>`__.
#
print(f"Gradient function for z = {z.grad_fn}")
print(f"Gradient function for loss = {loss.grad_fn}")
######################################################################
# Computing Gradients
# -------------------
#
# To optimize weights of parameters in the neural network, we need to
# compute the derivatives of our loss function with respect to parameters,
# namely, we need :math:`\frac{\partial loss}{\partial w}` and
# :math:`\frac{\partial loss}{\partial b}` under some fixed values of
# ``x`` and ``y``. To compute those derivatives, we call
# ``loss.backward()``, and then retrieve the values from ``w.grad`` and
# ``b.grad``:
#
loss.backward()
print(w.grad)
print(b.grad)
######################################################################
# .. note::
# - We can only obtain the ``grad`` properties for the leaf
# nodes of the computational graph, which have ``requires_grad`` property
# set to ``True``. For all other nodes in our graph, gradients will not be
# available.
# - We can only perform gradient calculations using
# ``backward`` once on a given graph, for performance reasons. If we need
# to do several ``backward`` calls on the same graph, we need to pass
# ``retain_graph=True`` to the ``backward`` call.
#
######################################################################
# Disabling Gradient Tracking
# ---------------------------
#
# By default, all tensors with ``requires_grad=True`` are tracking their
# computational history and support gradient computation. However, there
# are some cases when we do not need to do that, for example, when we have
# trained the model and just want to apply it to some input data, i.e. we
# only want to do *forward* computations through the network. We can stop
# tracking computations by surrounding our computation code with
# ``torch.no_grad()`` block:
#
z = torch.matmul(x, w)+b
print(z.requires_grad)
with torch.no_grad():
z = torch.matmul(x, w)+b
print(z.requires_grad)
######################################################################
# Another way to achieve the same result is to use the ``detach()`` method
# on the tensor:
#
z = torch.matmul(x, w)+b
z_det = z.detach()
print(z_det.requires_grad)
######################################################################
# There are reasons you might want to disable gradient tracking:
# - To mark some parameters in your neural network as **frozen parameters**.
# - To **speed up computations** when you are only doing forward pass, because computations on tensors that do
# not track gradients would be more efficient.
######################################################################
######################################################################
# More on Computational Graphs
# ----------------------------
# Conceptually, autograd keeps a record of data (tensors) and all executed
# operations (along with the resulting new tensors) in a directed acyclic
# graph (DAG) consisting of
# `Function <https://pytorch.org/docs/stable/autograd.html#torch.autograd.Function>`__
# objects. In this DAG, leaves are the input tensors, roots are the output
# tensors. By tracing this graph from roots to leaves, you can
# automatically compute the gradients using the chain rule.
#
# In a forward pass, autograd does two things simultaneously:
#
# - run the requested operation to compute a resulting tensor
# - maintain the operation’s *gradient function* in the DAG.
#
# The backward pass kicks off when ``.backward()`` is called on the DAG
# root. ``autograd`` then:
#
# - computes the gradients from each ``.grad_fn``,
# - accumulates them in the respective tensor’s ``.grad`` attribute
# - using the chain rule, propagates all the way to the leaf tensors.
#
# .. note::
# **DAGs are dynamic in PyTorch**
# An important thing to note is that the graph is recreated from scratch; after each
# ``.backward()`` call, autograd starts populating a new graph. This is
# exactly what allows you to use control flow statements in your model;
# you can change the shape, size and operations at every iteration if
# needed.
######################################################################
# Optional Reading: Tensor Gradients and Jacobian Products
# --------------------------------------
#
# In many cases, we have a scalar loss function, and we need to compute
# the gradient with respect to some parameters. However, there are cases
# when the output function is an arbitrary tensor. In this case, PyTorch
# allows you to compute so-called **Jacobian product**, and not the actual
# gradient.
#
# For a vector function :math:`\vec{y}=f(\vec{x})`, where
# :math:`\vec{x}=\langle x_1,\dots,x_n\rangle` and
# :math:`\vec{y}=\langle y_1,\dots,y_m\rangle`, a gradient of
# :math:`\vec{y}` with respect to :math:`\vec{x}` is given by **Jacobian
# matrix**:
#
# .. math::
#
#
# J=\left(\begin{array}{ccc}
# \frac{\partial y_{1}}{\partial x_{1}} & \cdots & \frac{\partial y_{1}}{\partial x_{n}}\\
# \vdots & \ddots & \vdots\\
# \frac{\partial y_{m}}{\partial x_{1}} & \cdots & \frac{\partial y_{m}}{\partial x_{n}}
# \end{array}\right)
#
# Instead of computing the Jacobian matrix itself, PyTorch allows you to
# compute **Jacobian Product** :math:`v^T\cdot J` for a given input vector
# :math:`v=(v_1 \dots v_m)`. This is achieved by calling ``backward`` with
# :math:`v` as an argument. The size of :math:`v` should be the same as
# the size of the original tensor, with respect to which we want to
# compute the product:
#
inp = torch.eye(4, 5, requires_grad=True)
out = (inp+1).pow(2).t()
out.backward(torch.ones_like(out), retain_graph=True)
print(f"First call\n{inp.grad}")
out.backward(torch.ones_like(out), retain_graph=True)
print(f"\nSecond call\n{inp.grad}")
inp.grad.zero_()
out.backward(torch.ones_like(out), retain_graph=True)
print(f"\nCall after zeroing gradients\n{inp.grad}")
######################################################################
# Notice that when we call ``backward`` for the second time with the same
# argument, the value of the gradient is different. This happens because
# when doing ``backward`` propagation, PyTorch **accumulates the
# gradients**, i.e. the value of computed gradients is added to the
# ``grad`` property of all leaf nodes of computational graph. If you want
# to compute the proper gradients, you need to zero out the ``grad``
# property before. In real-life training an *optimizer* helps us to do
# this.
######################################################################
# .. note:: Previously we were calling ``backward()`` function without
# parameters. This is essentially equivalent to calling
# ``backward(torch.tensor(1.0))``, which is a useful way to compute the
# gradients in case of a scalar-valued function, such as loss during
# neural network training.
#
######################################################################
# --------------
#
#################################################################
# Further Reading
# ~~~~~~~~~~~~~~~~~
# - `Autograd Mechanics <https://pytorch.org/docs/stable/notes/autograd.html>`_
|
"""
Changing default device
=======================
It is common practice to write PyTorch code in a device-agnostic way,
and then switch between CPU and CUDA depending on what hardware is available.
Typically, to do this you might have used if-statements and ``cuda()`` calls
to do this:
.. note::
This recipe requires PyTorch 2.0.0 or later.
"""
import torch
USE_CUDA = False
mod = torch.nn.Linear(20, 30)
if USE_CUDA:
mod.cuda()
device = 'cpu'
if USE_CUDA:
device = 'cuda'
inp = torch.randn(128, 20, device=device)
print(mod(inp).device)
###################################################################
# PyTorch now also has a context manager which can take care of the
# device transfer automatically. Here is an example:
with torch.device('cuda'):
mod = torch.nn.Linear(20, 30)
print(mod.weight.device)
print(mod(torch.randn(128, 20)).device)
#########################################
# You can also set it globally like this:
torch.set_default_device('cuda')
mod = torch.nn.Linear(20, 30)
print(mod.weight.device)
print(mod(torch.randn(128, 20)).device)
################################################################
# This function imposes a slight performance cost on every Python
# call to the torch API (not just factory functions). If this
# is causing problems for you, please comment on
# `this issue <https://github.com/pytorch/pytorch/issues/92701>`__
|
char_total = 0
result = 0
while True:
try:
string = raw_input()
except EOFError:
print ("Error: EOF or empty input!")
break
char_total += len(string)
backslash = string.count("\\\\")*2
string = string.replace("\\\\", ' ')
result += string.count("\"")*2 + string.count("\\x") + backslash
print "Total Caractere: %d" % char_total
print "Total Caractere Encode: %d" % (char_total + result)
print "Caractere Encode - Caractere: %d" % result
|
def xy(coord):
x = 0
y = 0
for xy in coord:
if xy == '^':
x += 1
elif xy == 'v':
x -= 1
elif xy == '>':
y += 1
elif xy == '<':
y -= 1
coords.add((x,y))
coord_input = list(raw_input())
santa = coord_input[0::2]
robot = coord_input[1::2]
coords = {(0,0)}
xy(santa)
xy(robot)
print coords
print len(coords)
|
import unittest
# O(n^2). Array slicing
class Solution:
def isAdditiveNumber(self, num):
"""
:type num: str
:rtype: bool
"""
if not num:
return False
def is_valid_additive(a, b, c_index):
while True:
c = a + b
c_str = str(c)
if not num[c_index:].startswith(c_str):
return False
a, b, c_index = b, c, c_index + len(c_str)
if c_index >= len(num):
return c_index == len(num)
a_max_len = (len(num) - 1) >> 1
if num[0] == '0':
a_max_len = min(a_max_len, 1)
for a_len in range(1, a_max_len + 1):
a = int(num[:a_len])
remaining = len(num) - a_len
b_max_len = min(remaining >> 1, remaining - a_len)
if num[a_len] == '0':
b_max_len = min(b_max_len, 1)
for b_len in range(1, b_max_len + 1):
c_index = a_len + b_len
b = int(num[a_len:c_index])
if is_valid_additive(a, b, c_index):
return True
return False
class Test(unittest.TestCase):
def test(self):
self._test('112358', True)
self._test('199100199', True)
self._test('101', True)
self._test('011', True)
self._test('000', True)
self._test('111122335588143', True)
def _test(self, num, expected):
actual = Solution().isAdditiveNumber(num)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
import heapq
import itertools
def _kth_largest(nums, k):
heap = []
for num in itertools.islice(nums, 0, k):
heapq.heappush(heap, num)
for num in itertools.islice(nums, k, len(nums)):
if num >= heap[0]:
heapq.heappushpop(heap, num)
return heap[0]
def _kth_smallest(nums, k):
heap = []
for num in itertools.islice(nums, 0, k):
heap.append(num)
heapq._siftdown_max(heap, 0, len(heap) - 1)
for num in itertools.islice(nums, k, len(nums)):
if num <= heap[0]:
heapq._heappushpop_max(heap, num)
return heap[0]
class Solution:
def findKthLargest(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
if k <= len(nums) >> 1:
return _kth_largest(nums, k)
else:
return _kth_smallest(nums, len(nums) - k + 1)
class Test(unittest.TestCase):
def test(self):
self._test([3, 2, 1, 5, 6, 4], 2, 5)
def _test(self, nums, k, expected):
actual = Solution().findKthLargest(nums, k)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
def partition(nums, lo, hi):
pivot = nums[hi - 1]
w = lo
for r in range(lo, hi):
if nums[r] > pivot:
nums[r], nums[w] = nums[w], nums[r]
w += 1
nums[w], nums[hi - 1] = nums[hi - 1], nums[w]
return w
def kth_largest(nums, lo, hi, k):
pivot_index = partition(nums, lo, hi)
if pivot_index > k:
return kth_largest(nums, lo, pivot_index, k)
elif pivot_index < k:
return kth_largest(nums, pivot_index + 1, hi, k)
else:
return nums[pivot_index]
# Quickselect, O(1) space. Best case and average O(n), worst case O(n^2) time
class Solution:
def findKthLargest(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
return kth_largest(nums, 0, len(nums), k - 1)
class Test(unittest.TestCase):
def test(self):
self._test([3, 2, 1, 5, 6, 4], 2, 5)
self._test([0, 0], 2, 0)
def _test(self, nums, k, expected):
actual = Solution().findKthLargest(nums, k)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
_offset = ord('0') << 1
class Solution:
def addBinary(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
"""
result = []
if len(a) < len(b):
a, b = b, a
carry = 0
i = len(a) - 1
for j in range(len(b) - 1, -1, -1):
digit = ord(a[i]) + ord(b[j]) + carry - _offset
if digit >= 2:
digit -= 2
carry = 1
else:
carry = 0
result.append(str(digit))
i -= 1
for i in range(i, -1, -1):
digit = ord(a[i]) + carry - ord('0')
if digit == 2:
digit = 0
carry = 1
else:
carry = 0
result.append(str(digit))
if carry:
result.append('1')
return ''.join(reversed(result))
class Test(unittest.TestCase):
def test(self):
self._test('11', '1', '100')
def _test(self, a, b, expected):
actual = Solution().addBinary(a, b)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
from typing import List, Optional
import utils
from tree import TreeNode
# O(n) time. O(log(n)) space. Recursive DFS.
class Solution:
def sortedArrayToBST(self, nums: List[int]) -> Optional[TreeNode]:
def dfs(lo, hi):
if lo >= hi:
return None
mid = lo + ((hi - lo) >> 1)
curr = TreeNode(nums[mid])
curr.left = dfs(lo, mid)
curr.right = dfs(mid + 1, hi)
return curr
return dfs(0, len(nums))
class Test(unittest.TestCase):
def test(self):
utils.test(self, __file__, Solution, process_result=TreeNode.to_array_inorder)
if __name__ == '__main__':
unittest.main()
|
import unittest
# O(n)
class Solution:
def isOneBitCharacter(self, bits):
"""
:type bits: List[int]
:rtype: bool
"""
is_one_bit = False
i = 0
while i < len(bits):
is_one_bit = not bits[i]
i += 1 if is_one_bit else 2
return is_one_bit
class Test(unittest.TestCase):
def test(self):
self._test([1, 0, 0], True)
self._test([1, 1, 1, 0], False)
def _test(self, bits, expected):
actual = Solution().isOneBitCharacter(bits)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
class Solution:
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
return sorted(nums)[len(nums) >> 1]
class Test(unittest.TestCase):
def test(self):
self._test([1, 2, 2, 2], 2)
self._test([-1], -1)
def _test(self, nums, expected):
actual = Solution().majorityElement(nums)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
from tree import TreeNode
class Solution:
def convertBST(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
stack = []
sum_ = 0
curr = root
while curr or stack:
if curr:
stack.append(curr)
curr = curr.right
else:
curr = stack.pop()
sum_ += curr.val
curr.val = sum_
curr = curr.left
return root
class Test(unittest.TestCase):
def test_serialize(self):
self._test([5, 2, 13], [18, 20, 13])
def _test(self, vals, expected):
root = TreeNode.from_array(vals)
actual = Solution().convertBST(root)
actual = actual.to_array()
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
class Solution:
def canWinNim(self, n):
"""
:type n: int
:rtype: bool
"""
return n % 4 != 0
class Test(unittest.TestCase):
def test(self):
self._test(1, True)
self._test(2, True)
self._test(3, True)
self._test(4, False)
self._test(5, True)
self._test(6, True)
self._test(7, True)
self._test(8, False)
def _test(self, n, expected):
actual = Solution().canWinNim(n)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
class Solution:
def evalRPN(self, tokens):
"""
:type tokens: List[str]
:rtype: int
"""
stack = []
for token in tokens:
if token == '+':
stack.append(stack.pop() + stack.pop())
elif token == '-':
stack.append(-stack.pop() + stack.pop())
elif token == '*':
stack.append(stack.pop() * stack.pop())
elif token == '/':
op = stack.pop()
stack.append(int(float(stack.pop()) / op))
else:
stack.append(int(token))
return stack.pop()
class Test(unittest.TestCase):
def test(self):
self._test(['2', '1', '+', '3', '*'], 9)
self._test(['4', '13', '5', '/', '+'], 6)
self._test(['10', '6', '9', '3', '+', '-11', '*', '/', '*', '17', '+', '5', '+'], 22)
def _test(self, tokens, expected):
actual = Solution().evalRPN(tokens)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
import utils
# O(n) time. O(n) space. Iteration.
class Solution:
def addStrings(self, num1: str, num2: str) -> str:
if len(num1) < len(num2):
num1, num2 = num2, num1
result = []
carry = 0
for i in range(len(num1)):
a = ord(num1[len(num1) - 1 - i]) - ord('0')
b = ord(num2[len(num2) - 1 - i]) - ord('0') if i < len(num2) else 0
c = a + b + carry
if c >= 10:
c -= 10
carry = 1
else:
carry = 0
result.append(str(c))
if carry == 1:
result.append('1')
return ''.join(reversed(result))
class Test(unittest.TestCase):
def test(self):
utils.test(self, __file__, Solution)
if __name__ == '__main__':
unittest.main()
|
import unittest
from typing import List
import utils
# O(n^2) time. O(1) space. Brute-force.
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
for i, a in enumerate(nums):
for j in range(i + 1, len(nums)):
if a + nums[j] == target:
return [i, j]
class Test(unittest.TestCase):
def test(self):
utils.test(self, __file__, Solution)
if __name__ == '__main__':
unittest.main()
|
import unittest
class Solution:
def matrixReshape(self, nums, r, c):
"""
:type nums: List[List[int]]
:type r: int
:type c: int
:rtype: List[List[int]]
"""
if not nums or len(nums) * len(nums[0]) != r * c:
return nums
width = len(nums[0])
i = j = 0
result = []
for _ in range(r):
row = []
r = c
while width - j < r:
row += nums[i][j:]
r -= width - j
i += 1
j = 0
row += nums[i][j:j + r]
j += r
result.append(row)
return result
class Test(unittest.TestCase):
def test(self):
self._test(
[
[1, 2],
[3, 4],
],
1, 4,
[
[1, 2, 3, 4],
])
self._test(
[
[1, 2],
[3, 4],
],
2, 4,
[
[1, 2],
[3, 4],
])
def _test(self, nums, r, c, expected):
actual = Solution().matrixReshape(nums, r, c)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
import utils
def parse_num(s, i):
while i < len(s) and s[i] == ' ':
i += 1
num = 0
while i < len(s) and s[i].isdigit():
num = num * 10 + ord(s[i]) - ord('0')
i += 1
return i, num
def parse_operator(s, i):
while i < len(s) and s[i] == ' ':
i += 1
operator = s[i] if i < len(s) else '+'
return i + 1, operator
# O(n) time. O(1) space. Parsing.
class Solution:
def calculate(self, s: str) -> int:
result = 0
sign = 1
i, product = parse_num(s, 0)
while i <= len(s):
i, operator = parse_operator(s, i)
i, num = parse_num(s, i)
if operator == '*':
product *= num
elif operator == '/':
product //= num
elif operator == '+' or operator == '-':
result += sign * product
sign = 44 - ord(operator) # '+' == 43, '-' == 45
product = num
return result
class Test(unittest.TestCase):
def test(self):
utils.test(self, __file__, Solution)
if __name__ == '__main__':
unittest.main()
|
import unittest
import utils
# Built-in string searching.
class Solution:
def rotateString(self, a, b):
"""
:type a: str
:type b: str
:rtype: bool
"""
if len(a) != len(b):
return False
a += a
# See CPython fast search
# https://github.com/python/cpython/blob/master/Objects/stringlib/fastsearch.h
return b in a
class Test(unittest.TestCase):
def test(self):
cases = utils.load_test_json(__file__).test_cases
for case in cases:
args = str(case.args)
actual = Solution().rotateString(**case.args.__dict__)
self.assertEqual(case.expected, actual, msg=args)
if __name__ == '__main__':
unittest.main()
|
import unittest
from typing import List
import utils
# O(n * sum(nums)) time. O(sum(nums)) space. Space-optimized DP, 0-1 knapsack.
class Solution:
def canPartition(self, nums: List[int]) -> bool:
s = sum(nums)
if s & 1:
return False
target = s >> 1
# dp[j]: can you add up some elements in nums[:i] to get sum j?
dp = [False] * (target + 1)
dp[0] = True
for num in nums:
for j in range(target, num - 1, -1):
dp[j] |= dp[j - num]
if dp[target]:
return True
return dp[target]
class Test(unittest.TestCase):
def test(self):
cases = utils.load_test_json(__file__).test_cases
for case in cases:
args = str(case.args)
actual = Solution().canPartition(**case.args.__dict__)
self.assertEqual(case.expected, actual, msg=args)
if __name__ == '__main__':
unittest.main()
|
import unittest
import utils
class ProductOfNumbers:
def __init__(self):
self.products = [1]
def add(self, num: int) -> None:
if num == 0:
self.products = [1]
else:
self.products.append(self.products[-1] * num)
def getProduct(self, k: int) -> int:
if k >= len(self.products):
return 0
return self.products[-1] // self.products[-k - 1]
class Test(unittest.TestCase):
def test(self):
utils.test_invocations(self, __file__, ProductOfNumbers)
if __name__ == '__main__':
unittest.main()
|
import unittest
from tree import TreeNode
class Solution:
def convertBST(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
def make_bst_greater_again(node, sum_):
if not node:
return sum_
node.val += make_bst_greater_again(node.right, sum_)
return make_bst_greater_again(node.left, node.val)
make_bst_greater_again(root, 0)
return root
class Test(unittest.TestCase):
def test_serialize(self):
self._test([5, 2, 13], [18, 20, 13])
def _test(self, vals, expected):
root = TreeNode.from_array(vals)
actual = Solution().convertBST(root)
actual = actual.to_array()
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
import test_0385
from nested_integer import NestedInteger
class NestedIterator:
def __init__(self, nestedList: [NestedInteger]):
self.stack = []
self.next_index = -1
if nestedList:
self.curr_list = nestedList
self.next_item = self._move_next()
else:
self.curr_list = []
self.next_item = None
def _move_next(self):
while True:
while True:
self.next_index += 1
if self.next_index >= len(self.curr_list):
break
next_item = self.curr_list[self.next_index]
if next_item.isInteger():
return next_item.getInteger()
children = next_item.getList()
if len(children) > 0:
self.stack.append((self.next_index, self.curr_list))
self.next_index = -1
self.curr_list = children
if self.stack:
self.next_index, self.curr_list = self.stack.pop()
else:
return None
def next(self) -> int:
curr = self.next_item
self.next_item = self._move_next()
return curr
def hasNext(self) -> bool:
return self.next_item is not None
class Test(unittest.TestCase):
def test(self):
self._test('[[1,1],2,[1,1]]', [1, 1, 2, 1, 1])
self._test('[[1,1],2,[],[1,1,[1,1]]]', [1, 1, 2, 1, 1, 1, 1])
self._test('[1]', [1])
self._test('[]', [])
self._test('[[]]', [])
def _test(self, nested_list, expected):
nested_list = test_0385.Solution().deserialize(nested_list).getList()
i = NestedIterator(nested_list)
actual = []
while i.hasNext():
actual.append(i.next())
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import collections
import unittest
from typing import Optional
import utils
from tree import TreeNode
# O(n) time. O(n) space. BFS.
class Solution:
def minDepth(self, root: Optional[TreeNode]) -> int:
if not root:
return 0
q = collections.deque()
q.append(root)
depth = 1
while q:
for _ in range(len(q)):
curr = q.popleft()
if not curr.left and not curr.right:
return depth
if curr.left:
q.append(curr.left)
if curr.right:
q.append(curr.right)
depth += 1
class Test(unittest.TestCase):
def test(self):
utils.test(self, __file__, Solution, process_args=TreeNode.from_root_array)
if __name__ == '__main__':
unittest.main()
|
import unittest
class Solution:
def myPow(self, x, n):
"""
:type x: float
:type n: int
:rtype: float
"""
if not n:
return 1
if not x:
return 0
if n < 0:
pos = False
n = -n
else:
pos = True
result = 1
while n:
if n & 1:
result *= x
x *= x
n >>= 1
return result if pos else 1.0 / result
class Test(unittest.TestCase):
def test(self):
self._test(2, -2, 0.25)
self._test(2, -1, 0.5)
self._test(2, 0, 1)
self._test(2, 1, 2)
self._test(2, 2, 4)
self._test(2, 3, 8)
self._test(2, 4, 16)
self._test(2, 5, 32)
self._test(0, 1, 0)
self._test(0, 0, 1)
self._test(-2, 2, 4)
self._test(-2, 3, -8)
def _test(self, x, n, expected):
actual = Solution().myPow(x, n)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
from tree import TreeNode
class Solution:
def averageOfLevels(self, root):
"""
:type root: TreeNode
:rtype: List[float]
"""
if not root:
return []
result = []
q = [root]
while q:
sum_ = 0
next_q = []
for node in q:
sum_ += node.val
if node.left:
next_q.append(node.left)
if node.right:
next_q.append(node.right)
result.append(float(sum_) / len(q))
q = next_q
return result
class Test(unittest.TestCase):
def test(self):
self._test(
[3, 9, 20, None, None, 15, 7],
[3, 14.5, 11])
def _test(self, root, expected):
root = TreeNode.from_array(root)
actual = Solution().averageOfLevels(root)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import math
import unittest
import utils
def reverse(num):
result = 0
while num > 0:
num, r = divmod(num, 10)
result = result * 10 + r
return result
class Solution:
def largestPalindrome(self, n):
"""
:type n: int
:rtype: int
"""
if n == 1:
return 9
max_num = 10 ** n - 1
for hi in range(10 ** n - 2, 10 ** (n - 1) - 1, -1):
palindrome = hi * 10 ** n + reverse(hi)
root = int(math.ceil(math.sqrt(palindrome)))
for factor1 in range(max_num, root - 1, -1):
factor2, r = divmod(palindrome, factor1)
if r == 0:
return palindrome % 1337
if factor2 > max_num:
break
class Test(unittest.TestCase):
def test(self):
cases = utils.load_test_json(__file__).test_cases
# The last 2 tests are too slow
for case in cases[:-2]:
args = str(case.args)
actual = Solution().largestPalindrome(**case.args.__dict__)
self.assertEqual(case.expected, actual, msg=args)
if __name__ == '__main__':
unittest.main()
|
import unittest
import utils
# O(n) time. O(1) space. Iteration.
class Solution:
def shortestToChar(self, s, c):
"""
:type s: str
:type c: str
:rtype: List[int]
"""
distances = [0] * len(s)
hi = s.find(c)
for i in range(hi):
distances[i] = hi - i
while True:
lo = hi
hi = s.find(c, lo + 1)
if hi == -1:
break
mid = lo + ((hi - lo + 1) >> 1)
for i in range(lo + 1, mid):
distances[i] = i - lo
for i in range(mid, hi):
distances[i] = hi - i
for i in range(lo + 1, len(s)):
distances[i] = i - lo
return distances
class Test(unittest.TestCase):
def test(self):
cases = utils.load_test_json(__file__).test_cases
for case in cases:
args = str(case.args)
actual = Solution().shortestToChar(**case.args.__dict__)
self.assertEqual(case.expected, actual, msg=args)
if __name__ == '__main__':
unittest.main()
|
import unittest
class Solution:
def addDigits(self, num):
"""
:type num: int
:rtype: int
"""
while num > 9:
new_num = 0
while num:
num, r = divmod(num, 10)
new_num += r
num = new_num
return num
class Test(unittest.TestCase):
def test(self):
self._test(38, 2)
self._test(0, 0)
def _test(self, num, expected):
actual = Solution().addDigits(num)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
from typing import Optional
import utils
from tree import TreeNode
# O(n) time. O(log(n)) space. Top down, recursive DFS.
class Solution:
def longestZigZag(self, root: Optional[TreeNode]) -> int:
result = 0
def dfs(curr, length, is_left):
nonlocal result
result = max(result, length)
if curr.left:
if is_left:
dfs(curr.left, 1, True)
else:
dfs(curr.left, length + 1, True)
if curr.right:
if is_left:
dfs(curr.right, length + 1, False)
else:
dfs(curr.right, 1, False)
if root.left:
dfs(root.left, 1, True)
if root.right:
dfs(root.right, 1, False)
return result
class Test(unittest.TestCase):
def test(self):
utils.test(self, __file__, Solution, process_args=TreeNode.from_root_array)
if __name__ == '__main__':
unittest.main()
|
import itertools
import unittest
import utils
# O(n!) time. O(n) space. Backtracking.
class Solution:
def largestTimeFromDigits(self, a):
"""
:type a: List[int]
:rtype: str
"""
result = -1
for p in itertools.permutations(a):
hour = p[0] * 10 + p[1]
minute = p[2] * 10 + p[3]
if 0 <= hour <= 23 and 0 <= minute <= 59:
elapsed = hour * 60 + minute
if result < elapsed:
result = elapsed
if result == -1:
return ''
else:
return '{:02d}:{:02d}'.format(*divmod(result, 60))
class Test(unittest.TestCase):
def test(self):
cases = utils.load_test_json(__file__).test_cases
for case in cases:
args = str(case.args)
actual = Solution().largestTimeFromDigits(**case.args.__dict__)
self.assertEqual(case.expected, actual, msg=args)
if __name__ == '__main__':
unittest.main()
|
import unittest
import math
class Solution:
def isPowerOfThree(self, n):
"""
:type n: int
:rtype: bool
"""
if n <= 0:
return False
max_power_3 = int(math.pow(3, math.floor(math.log(0x7fffffff, 3))))
return not max_power_3 % n
class Test(unittest.TestCase):
def test(self):
self._test(1, True)
self._test(3, True)
self._test(9, True)
self._test(27, True)
self._test(81, True)
self._test(2, False)
self._test(4, False)
self._test(33, False)
self._test(45, False)
self._test(0, False)
def _test(self, n, expected):
actual = Solution().isPowerOfThree(n)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
class Solution:
def singleNonDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
lo = 0
hi = (len(nums) >> 1) - 1
while lo <= hi:
mid = lo + ((hi - lo) >> 1)
if nums[mid << 1] == nums[(mid << 1) + 1]:
lo = mid + 1
else:
hi = mid - 1
return nums[lo << 1]
class Test(unittest.TestCase):
def test(self):
self._test([1, 1, 2, 3, 3, 4, 4, 8, 8], 2)
self._test([3, 3, 7, 7, 10, 11, 11], 10)
self._test([1, 1, 2], 2)
self._test([1, 2, 2], 1)
self._test([1], 1)
def _test(self, nums, expected):
actual = Solution().singleNonDuplicate(nums)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
class Solution:
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
n = len(s)
s = list(s)
# trim whitespaces at start, end and between words
i = 0
target_lo = 0
while True:
while i < n and s[i] == ' ':
i += 1
lo = i
while i < n and s[i] != ' ':
i += 1
hi = i
if lo == hi:
break
if target_lo > 0:
s[target_lo] = ' '
target_lo += 1
target_hi = hi - lo + target_lo
s[target_lo:target_hi] = s[lo:hi]
target_lo = target_hi
n = target_lo
s = s[:n]
# reverse entire string
s.reverse()
# reverse back each word
lo = 0
for hi in range(n):
if s[hi] == ' ':
s[lo:hi] = reversed(s[lo:hi])
lo = hi + 1
if lo < n:
s[lo:n] = reversed(s[lo:n])
return ''.join(s)
class Test(unittest.TestCase):
def test(self):
self._test(" the sky is blue ", "blue is sky the")
self._test(" ", "")
def _test(self, s, expected):
actual = Solution().reverseWords(s)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
class Solution:
def countSegments(self, s):
"""
:type s: str
:rtype: int
"""
was_space = True
count = 0
for ch in s:
if ch != ' ':
if was_space:
count += 1
was_space = False
else:
was_space = True
return count
class Test(unittest.TestCase):
def test(self):
self._test('Hello, my name is John', 5)
self._test(' Hello, my name is John ', 5)
def _test(self, s, expected):
actual = Solution().countSegments(s)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
class Solution:
def findRelativeRanks(self, nums):
"""
:type nums: List[int]
:rtype: List[str]
"""
for rank, (i, num) in enumerate(sorted(enumerate(nums), reverse=True, key=lambda x: x[1])):
if rank > 2:
nums[i] = str(rank + 1)
elif rank == 0:
nums[i] = 'Gold Medal'
elif rank == 1:
nums[i] = 'Silver Medal'
elif rank == 2:
nums[i] = 'Bronze Medal'
return nums
class Test(unittest.TestCase):
def test(self):
self._test(
[5, 4, 3, 2, 1],
['Gold Medal', 'Silver Medal', 'Bronze Medal', '4', '5']
)
def _test(self, nums, expected):
actual = Solution().findRelativeRanks(nums)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
class Solution:
def maxArea(self, height):
"""
:type height: List[int]
:rtype: int
"""
lo = 0
hi = len(height) - 1
lo_height = height[lo]
hi_height = height[hi]
max_area = min(lo_height, hi_height) * (hi - lo)
while lo < hi:
if lo_height <= hi_height:
lo += 1
is_higher = height[lo] > lo_height
lo_height = height[lo]
if is_higher:
max_area = max(max_area, min(lo_height, hi_height) * (hi - lo))
else:
hi -= 1
is_higher = height[hi] > hi_height
hi_height = height[hi]
if is_higher:
max_area = max(max_area, min(lo_height, hi_height) * (hi - lo))
return max_area
class Test(unittest.TestCase):
def test(self):
self._test([2, 2, 1, 0, 4], 8)
self._test([2, 3, 1, 0, 4], 9)
def _test(self, input, expected):
actual = Solution().maxArea(input)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
class Solution:
def checkRecord(self, s):
"""
:type s: str
:rtype: bool
"""
return s.count('A') < 2 and s.count('LLL') == 0
class Test(unittest.TestCase):
def test(self):
self._test('PPALLP', True)
self._test('PPALLL', False)
def _test(self, s, expected):
actual = Solution().checkRecord(s)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
from linkedlist import ListNode
class Solution:
def insertionSortList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head:
return None
sorted_head = ListNode(0)
while head:
val = head.val
curr = sorted_head
while curr.next and curr.next.val < val:
curr = curr.next
next_ = head.next
head.next = curr.next
curr.next = head
head = next_
return sorted_head.next
class Test(unittest.TestCase):
def test(self):
self._test([4, 2, 6, 1, 3, 5, 7], [1, 2, 3, 4, 5, 6, 7])
self._test([1], [1])
self._test([], [])
def _test(self, head, expected):
head = ListNode.from_array(head)
actual = Solution().insertionSortList(head)
actual = ListNode.to_array(actual)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
from typing import List
import math
import utils
# O(n) time. O(1) space. Algebra, arithmetic progression.
class Solution:
def distributeCandies(self, candies: int, num_people: int) -> List[int]:
result = [0] * num_people
repeat = int((math.sqrt(1 + 8 * candies) - 1) / 2)
num_rounds, extra_people = divmod(repeat, num_people)
for i in range(num_people):
num_rounds_for_me = num_rounds + (i < extra_people)
a1 = i + 1
an = a1 + num_people * (num_rounds_for_me - 1)
result[i] = (a1 + an) * num_rounds_for_me // 2
extra_candies = candies - (1 + repeat) * repeat // 2
if extra_candies:
person_got_extra_candies = repeat % num_people
result[person_got_extra_candies] += extra_candies
return result
class Test(unittest.TestCase):
def test(self):
cases = utils.load_test_json(__file__).test_cases
for case in cases:
args = str(case.args)
actual = Solution().distributeCandies(**case.args.__dict__)
self.assertEqual(case.expected, actual, msg=args)
if __name__ == '__main__':
unittest.main()
|
import unittest
from linkedlist import ListNode
class Solution:
def detectCycle(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head:
return None
t = head
h = head
while True:
h = h.next
if not h:
return None
h = h.next
if not h:
return None
t = t.next
if t is h:
break
t = head
while t is not h:
t = t.next
h = h.next
return t
class Test(unittest.TestCase):
def test(self):
self._test([0, 1, 2, 3, 4, 5], 0)
self._test([0, 1, 2, 3, 4, 5], 4)
self._test([0, 1, 2, 3, 4, 5], 5)
self._test([0, 1, 2, 3, 4, 5], -1)
def _test(self, nums, loop_start_index):
root = ListNode.from_array(nums)
if loop_start_index < 0:
loop_start_node = None
else:
loop_start_node = root
for i in range(loop_start_index):
loop_start_node = loop_start_node.next
curr = loop_start_node
while curr.next:
curr = curr.next
curr.next = loop_start_node
actual = Solution().detectCycle(root)
self.assertEqual(loop_start_node, actual)
if __name__ == '__main__':
unittest.main()
|
import functools
import unittest
from typing import List
import utils
class Solution:
def shortestSuperstring(self, words: List[str]) -> str:
@functools.lru_cache(None)
def suffix(w1, w2):
best = w2
for i in range(len(w1) + 1):
if w2.startswith(w1[-i:]):
best = w2[i:]
return best
@functools.lru_cache(None)
def dp(state, last):
if state + 1 == 1 << n:
return ''
return min(
[suffix(words[last], words[i]) + dp(state | (1 << i), i) for i in range(n) if state & (1 << i) == 0],
key=len)
n = len(words)
return min([words[i] + dp(1 << i, i) for i in range(n)], key=len)
class Test(unittest.TestCase):
def test(self):
utils.test(self, __file__, Solution, check_result=self.check_result)
def check_result(self, case, actual, msg):
self.assertEqual(len(case.expected), len(actual), msg)
if __name__ == '__main__':
unittest.main()
|
import unittest
class Solution:
def lengthLongestPath(self, input):
"""
:type input: str
:rtype: int
"""
max_len = 0
path_lengths = {0: 0}
for seg in input.split('\n'):
name = seg.lstrip('\t')
depth = len(seg) - len(name)
if '.' in name:
max_len = max(max_len, path_lengths[depth] + len(name))
else:
path_lengths[depth + 1] = path_lengths[depth] + len(name) + 1
return max_len
class Test(unittest.TestCase):
def test(self):
self._test('dir\n\tsubdir1\n\tsubdir2\n\t\tfile.ext', 20)
self._test('dir\n\tsubdir1\n\tsubdir2\n\t\tfile', 0)
self._test('dir\n\tsubdir1\n\t\tfile1.ext\n\t\tsubsubdir1\n\tsubdir2\n\t\tsubsubdir2\n\t\t\tfile2.ext', 32)
def _test(self, input, expected):
actual = Solution().lengthLongestPath(input)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
from typing import Optional
import utils
from tree import TreeNode
def dfs(node):
a = node.val # robbing this node
b = 0 # not robbing this node
if node.left:
la, lb = dfs(node.left)
a += lb
b += max(la, lb)
if node.right:
ra, rb = dfs(node.right)
a += rb
b += max(ra, rb)
return a, b
class Solution:
def rob(self, root: Optional[TreeNode]) -> int:
if not root:
return 0
a, b = dfs(root)
return max(a, b)
class Test(unittest.TestCase):
def test(self):
utils.test(self, __file__, Solution, process_args=TreeNode.from_root_array)
if __name__ == '__main__':
unittest.main()
|
import unittest
from typing import List
import utils
# O(n) time. O(n) space. DFS.
class Solution:
def findCircleNum(self, isConnected: List[List[int]]) -> int:
def dfs(i):
if isConnected[i][i] == 0:
return False
isConnected[i][i] = 0
for j in range(i):
if isConnected[j][i]:
dfs(j)
for j in range(i + 1, len(isConnected)):
if isConnected[i][j]:
dfs(j)
return True
result = 0
for i in range(len(isConnected)):
if dfs(i):
result += 1
return result
class Test(unittest.TestCase):
def test(self):
utils.test(self, __file__, Solution)
if __name__ == '__main__':
unittest.main()
|
import random
import unittest
def binary_search(a, x, lo=0, hi=-1):
n = len(a)
if hi < 0:
hi += n
# See OpenJDK Arrays.binarySearch0():
# https://github.com/openjdk/jdk/blob/f37d9c8abca50b65ed232831a06d60c1d015013f/src/java.base/share/classes/java/util/Arrays.java#L1829
# See CoreCLR ArrayHelpers::BinarySearchBitwiseEquals():
# https://github.com/dotnet/coreclr/blob/310c1903838f048afc66121212342e0d55b50d11/src/classlibnative/bcltype/arrayhelpers.h#L62
# See CPython bisect.bisect_right():
# https://github.com/python/cpython/blob/898318b53d921298d1f1fcfa0f415844afbeb318/Lib/bisect.py#L15
while lo <= hi:
mid = lo + ((hi - lo) >> 1)
mid_val = a[mid]
if mid_val < x:
lo = mid + 1
elif mid_val > x:
hi = mid - 1
else:
return mid
return ~lo
def binary_search_rightmost(a, x, left=0, right=-1):
"""
Returns the index of the rightmost x's right neighbor.
arg->right is inclusive.
"""
n = len(a)
if right < 0:
right += n
while left <= right:
mid = left + (right - left) // 2
if x < a[mid]:
right = mid - 1
else:
left = mid + 1
return left
def binary_insert(a, x, left=0, right=-1):
n = len(a)
if right < 0:
right += n
index = binary_search_rightmost(a, x, left, right)
a[index + 1: right + 2] = a[index: right + 1]
a[index] = x
# Insertion
def insertion_sort(a):
# See OpenJDK Arrays.mergeSort(), Insertion sort on smallest arrays:
# https://github.com/openjdk/jdk/blob/f37d9c8abca50b65ed232831a06d60c1d015013f/src/java.base/share/classes/java/util/Arrays.java#L1349
# See CoreCLR ArrayHelpers::InsertionSort():
# https://github.com/dotnet/coreclr/blob/310c1903838f048afc66121212342e0d55b50d11/src/classlibnative/bcltype/arrayhelpers.h#L282
n = len(a)
for i in range(1, n):
for j in range(i, 0, -1):
if a[j - 1] <= a[j]:
break
a[j - 1], a[j] = a[j], a[j - 1]
def insertion_sort_memmove(a):
n = len(a)
for i in range(1, n):
next_val = a[i]
j = i - 1
while j >= 0 and a[j] > next_val:
j -= 1
# memmove(a + j + 1, a + j, i - j)
a[j + 2: i + 1] = a[j + 1: i]
a[j + 1] = next_val
def binary_insertion_sort(a):
n = len(a)
for i in range(1, n):
binary_insert(a, a[i], 0, i - 1)
def _shell_gaps(n):
gap = n // 2
while gap > 0:
yield gap
gap //= 2
def shell_sort(a):
n = len(a)
for gap in _shell_gaps(n):
for i in range(gap, n):
to_be_inserted = a[i]
j = i
while j >= gap and a[j - gap] > to_be_inserted:
a[j] = a[j - gap]
j -= gap
a[j] = to_be_inserted
# Swap
def bubble_sort(a):
n = len(a)
for i in range(n):
for j in range(1, n - i):
if a[j - 1] > a[j]:
a[j - 1], a[j] = a[j], a[j - 1]
def _quick_sort(a, left, right):
if left >= right:
return
middle = left + (right - left) // 2
pivot = a[middle]
i = left
j = right
while i <= j:
while a[i] < pivot:
i += 1
while a[j] > pivot:
j -= 1
if i > j:
break
elif i < j:
a[i], a[j] = a[j], a[i]
i += 1
j -= 1
# Optimization: tail call
# See CLR ArrayHelpers::QuickSort():
# https://github.com/kasicass/sscli20/blob/dc64e12c9b835d4d373aa04978c0e8f1763b2e1b/clr/src/vm/comarrayhelpers.h#L77
# See CLR ArraySortHelper.QuickSort():
# https://github.com/kasicass/sscli20/blob/dc64e12c9b835d4d373aa04978c0e8f1763b2e1b/clr/src/bcl/system/collections/generic/arraysorthelper.cs#L70
_quick_sort(a, left, j)
_quick_sort(a, i, right)
def quick_sort(a):
_quick_sort(a, 0, len(a) - 1)
# Selection
def selection_sort(a):
n = len(a)
for i in range(n):
min_val = a[i]
min_val_index = i
for j in range(i + 1, n):
if a[j] < min_val:
min_val = a[j]
min_val_index = j
a[i], a[min_val_index] = a[min_val_index], a[i]
# Merging
def _merge_sort(dest, src, low, high):
# Optimization: using insertion sort on smallest arrays (length < 7)
# See OpenJDK Arrays.mergeSort():
# https://github.com/openjdk/jdk/blob/f37d9c8abca50b65ed232831a06d60c1d015013f/src/java.base/share/classes/java/util/Arrays.java#L1342
if low >= high - 1:
return
mid = low + (high - low) // 2
_merge_sort(src, dest, low, mid)
_merge_sort(src, dest, mid, high)
if src[mid - 1] < src[mid]:
dest[low: high] = src[low: high]
return
p = low
q = mid
for i in range(low, high):
if q >= high or p < mid and src[p] < src[q]:
dest[i] = src[p]
p += 1
else:
dest[i] = src[q]
q += 1
def merge_sort(a):
aux = list(a)
_merge_sort(a, aux, 0, len(a))
def merge_sort_iterative(a):
n = len(a)
aux = [0] * n
sublen = 1 # sublist length
while True:
new_sublen = sublen << 1 # new sublist length after merge
for lo in range(0, n, new_sublen):
lo_end = min(lo + sublen, n)
hi = lo_end
hi_end = min(hi + sublen, n)
for i in range(lo, hi_end):
if hi >= hi_end or lo < lo_end and a[lo] <= a[hi]:
aux[i] = a[lo]
lo += 1
else:
aux[i] = a[hi]
hi += 1
if new_sublen >= n:
break
sublen = new_sublen
a, aux = aux, a
return aux
# Misc
def _down_heap(a, root, n):
while True:
child = 2 * root + 1
if child >= n:
break
# find the larger child
if child + 1 < n and a[child] < a[child + 1]:
child += 1
if a[child] <= a[root]:
break
a[root], a[child] = a[child], a[root]
root = child
def heap_sort(a):
# See CoreCLR ArrayHelpers::Heapsort():
# https://github.com/dotnet/coreclr/blob/310c1903838f048afc66121212342e0d55b50d11/src/classlibnative/bcltype/arrayhelpers.h#L242
# See OpenJDK PriorityQueue.heapify():
# https://github.com/openjdk/jdk/blob/f37d9c8abca50b65ed232831a06d60c1d015013f/src/java.base/share/classes/java/util/PriorityQueue.java#L724
# See CPython heapq.heapify():
# https://github.com/python/cpython/blob/898318b53d921298d1f1fcfa0f415844afbeb318/Lib/heapq.py#L168
n = len(a)
for i in range(n // 2 - 1, -1, -1):
_down_heap(a, i, n)
for i in range(n - 1, -1, -1):
a[0], a[i] = a[i], a[0]
_down_heap(a, 0, i)
def radix_sort(a):
# See com.indeed.util.core.sort.RadixSort.radixSort():
# https://github.com/indeedeng/util/blob/2e640a121abfcc6fad6947baeb7adecee1dc50ca/util-core/src/main/java/com/indeed/util/core/sort/RadixSort.java#L7
n = len(a)
count_scratch = [0] * 0x10000
scratch = [0] * n
sum_ = 0
for num in a:
radix = num & 0xFFFF
count_scratch[radix] += 1
for i in range(0x10000):
tmp = count_scratch[i]
count_scratch[i] = sum_
sum_ += tmp
for i in range(n):
num = a[i]
radix = num & 0xFFFF
offset = count_scratch[radix]
scratch[offset] = a[i]
count_scratch[radix] += 1
count_scratch = [0] * 0x10000
sum_ = 0
for num in a:
radix = (num >> 16) + 0x8000
count_scratch[radix] += 1
for i in range(0x10000):
tmp = count_scratch[i]
count_scratch[i] = sum_
sum_ += tmp
for i in range(n):
num = scratch[i]
radix = (num >> 16) + 0x8000
offset = count_scratch[radix]
a[offset] = scratch[i]
count_scratch[radix] += 1
class Test(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(Test, self).__init__(*args, **kwargs)
start = -5
end = 5
self.ordered = list(range(start, end))
cloned = list(self.ordered)
random_func = random.Random(1).random
random.shuffle(cloned, random_func)
self.unordered = cloned
def test(self):
self._test(insertion_sort)
self._test(insertion_sort_memmove)
self._test(binary_insertion_sort)
self._test(shell_sort)
self._test(bubble_sort)
self._test(quick_sort)
self._test(selection_sort)
self._test(merge_sort)
self._test(merge_sort_iterative)
self._test(heap_sort)
self._test(radix_sort)
def _test(self, func):
unordered = list(self.unordered)
func(unordered)
self.assertEqual(self.ordered, unordered)
if __name__ == '__main__':
unittest.main()
|
import unittest
class Solution:
def findComplement(self, num):
"""
:type num: int
:rtype: int
"""
mask = num
mask |= mask >> 1
mask |= mask >> 2
mask |= mask >> 4
mask |= mask >> 8
mask |= mask >> 16
return num ^ mask
class Test(unittest.TestCase):
def test(self):
self._test(5, 2)
self._test(1, 0)
def _test(self, num, expected):
actual = Solution().findComplement(num)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
import collections
max_balls = 6
def _delete_3balls(board, lo, hi):
if lo < 0:
return board[:lo + 1] + board[hi:]
while True:
ball = board[lo]
new_lo, new_hi = lo - 1, hi
while new_lo >= 0 and board[new_lo] == ball:
new_lo -= 1
while new_hi < len(board) and board[new_hi] == ball:
new_hi += 1
if lo - new_lo + new_hi - hi >= 3:
lo, hi = new_lo, new_hi
else:
break
return board[:lo + 1] + board[hi:]
def _dfs(board, hand, inserted, num_inserted, min_num_inserted):
if not board:
return num_inserted
if num_inserted >= min_num_inserted:
return min_num_inserted
for ball, num_balls in hand.items():
if inserted[ball] >= num_balls:
continue
for i in range(len(board)):
if board[i] != ball:
continue
if i + 1 < len(board) and board[i] == board[i + 1]:
new_board = _delete_3balls(board, i - 1, i + 2)
inserted[ball] += 1
min_num_inserted = _dfs(new_board, hand, inserted, num_inserted + 1, min_num_inserted)
inserted[ball] -= 1
elif inserted[ball] + 2 <= num_balls:
new_board = _delete_3balls(board, i - 1, i + 1)
inserted[ball] += 2
min_num_inserted = _dfs(new_board, hand, inserted, num_inserted + 2, min_num_inserted)
inserted[ball] -= 2
return min_num_inserted
class Solution:
def findMinStep(self, board, hand):
"""
:type board: str
:type hand: str
:rtype: int
"""
hand = collections.Counter(hand)
inserted = collections.Counter()
min_inserted_balls = _dfs(board, hand, inserted, 0, max_balls)
return min_inserted_balls if min_inserted_balls < max_balls else -1
class Test(unittest.TestCase):
def test(self):
self._test('WRRBBW', 'RB', -1)
self._test('WWRRBBWW', 'WRBRW', 2)
self._test('G', 'GGGGG', 2)
self._test('RBYYBBRRB', 'YRBGB', 3)
def _test(self, board, hand, expected):
actual = Solution().findMinStep(board, hand)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
from typing import List
import utils
# O(n) time. O(1) space. Kadane's algorithm.
class Solution:
def maxProfit(self, prices: List[int]) -> int:
max_so_far = max_ending_here = 0
for i in range(1, len(prices)):
diff = prices[i] - prices[i - 1]
max_ending_here = max(diff, max_ending_here + diff)
max_so_far = max(max_so_far, max_ending_here)
return max_so_far
class Test(unittest.TestCase):
def test(self):
utils.test(self, __file__, Solution)
if __name__ == '__main__':
unittest.main()
|
import unittest
from tree import TreeNode, null
class Solution:
def __init__(self):
self.sum = 0
self.paths = []
def pathSum(self, root, sum_):
"""
:type root: TreeNode
:type sum: int
:rtype: List[List[int]]
"""
if not root:
return []
self.sum = sum_
self._path_sum(root, 0, [])
return self.paths
def _path_sum(self, node, sum_, path):
sum_ += node.val
path = list(path)
path.append(node.val)
if not node.left and not node.right:
if sum_ == self.sum:
self.paths.append(path)
else:
if node.left:
self._path_sum(node.left, sum_, path)
if node.right:
self._path_sum(node.right, sum_, path)
class Test(unittest.TestCase):
def test(self):
self._test([5, 4, 8, 11, null, 13, 4, 7, 2, null, null, 5, 1],
22,
[[5, 4, 11, 2], [5, 8, 4, 5]])
def _test(self, vals, sum_, expected):
root = TreeNode.from_array(vals)
self.assertEqual(expected, Solution().pathSum(root, sum_))
if __name__ == '__main__':
unittest.main()
|
import unittest
import collections
def _read_element(formula, i):
i += 1
while i < len(formula) and formula[i].islower():
i += 1
return i
def _read_num(formula, i):
num = ord(formula[i]) - ord('0')
i += 1
while i < len(formula) and formula[i].isdigit():
num = num * 10 + ord(formula[i]) - ord('0')
i += 1
return i, num
def _mul(stack, num):
brackets = 1
i = len(stack) - 2
while i >= 0:
if stack[i] == '(':
if brackets == 1:
break
brackets -= 1
i -= 1
elif stack[i] == ')':
brackets += 1
i -= 1
else:
stack[i] *= num
i -= 2
def _count(stack):
counter = collections.Counter()
i = 0
while i < len(stack):
item = stack[i]
if item == '(' or item == ')':
i += 1
continue
counter[item] += stack[i + 1]
i += 2
return ''.join(element + (str(num) if num > 1 else '') for element, num in sorted(counter.items()))
# O(n) time. O(n) space. Iteration.
class Solution:
def countOfAtoms(self, formula):
"""
:type formula: str
:rtype: str
"""
stack = []
i = 0
while i < len(formula):
ch = formula[i]
if ch.isupper():
hi = _read_element(formula, i)
stack.append(formula[i:hi])
stack.append(1)
i = hi
elif ch.isdigit():
hi, num = _read_num(formula, i)
stack[-1] = num
i = hi
elif ch == '(':
stack.append('(')
i += 1
else:
stack.append(')')
if i + 1 < len(formula) and formula[i + 1].isdigit():
hi, num = _read_num(formula, i + 1)
_mul(stack, num)
i = hi
else:
i += 1
return _count(stack)
class Test(unittest.TestCase):
def test(self):
self._test('H2O', 'H2O')
self._test('Mg(OH)2', 'H2MgO2')
self._test('K4(ON(SO3)2)2', 'K4N2O14S4')
self._test('Mg12', 'Mg12')
def _test(self, formula, expected):
actual = Solution().countOfAtoms(formula)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
import utils
def evaluate_xyz(x, y, z, p, q):
yz = evaluate_yz(y, z, q)
return x + yz if p == '+' else x - yz
def evaluate_yz(y, z, q):
return y * z if q == '*' else y // z
def dfs(s, i):
# Any expression given by this problem can be reduced to an equivalent general form: x + y * z
x = 0
y = 1
z = 0
p = '+'
q = '*'
while i < len(s):
c = s[i]
if ord('0') <= ord(c) <= ord('9'):
z = z * 10 + ord(c) - ord('0')
elif c == '+' or c == '-':
x = evaluate_xyz(x, y, z, p, q)
y = 1
z = 0
p = c
q = '*'
elif c == '*' or c == '/':
y = evaluate_yz(y, z, q)
z = 0
q = c
elif c == '(':
z, i = dfs(s, i + 1)
elif c == ')':
break
i += 1
return evaluate_xyz(x, y, z, p, q), i
# O(n) time. O(depth of parentheses) space. One pass, recursion, math reduction.
class Solution:
def calculate(self, s: str) -> int:
return dfs(s, 0)[0]
class Test(unittest.TestCase):
def test(self):
cases = utils.load_test_json(__file__).test_cases
for case in cases:
args = str(case.args)
actual = Solution().calculate(**case.args.__dict__)
self.assertEqual(case.expected, actual, msg=args)
if __name__ == '__main__':
unittest.main()
|
import unittest
from linkedlist import ListNode
class Solution:
def getIntersectionNode(self, a, b):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
def list_len(x):
result = 0
while x:
x = x.next
result += 1
return result
a_len = list_len(a)
b_len = list_len(b)
if a_len > b_len:
for _ in range(a_len - b_len):
a = a.next
else:
for _ in range(b_len - a_len):
b = b.next
while a is not b:
a = a.next
b = b.next
return a
class Test(unittest.TestCase):
def test(self):
self._test([0, 1, 2, 3, 4], [5, 6, 7], 2)
def _test(self, a, b_before_intersection, a_len_before_intersection):
a = ListNode.from_array(a)
b = ListNode.from_array(b_before_intersection)
intersection = a
for _ in range(a_len_before_intersection):
intersection = intersection.next
b_end = b
while b_end.next:
b_end = b_end.next
b_end.next = intersection
actual = Solution().getIntersectionNode(a, b)
self.assertEqual(intersection, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
class Solution:
def findNthDigit(self, n):
"""
:type n: int
:rtype: int
"""
if n < 10:
return n
n -= 10
num_digits = 2
count = 90
while n >= count * num_digits:
n -= count * num_digits
num_digits += 1
count *= 10
q, r = divmod(n, num_digits)
result = q // (10 ** (num_digits - 1 - r)) % 10
return result if r else result + 1
class Test(unittest.TestCase):
def test(self):
self._test(1, 1)
self._test(9, 9)
self._test(10, 1)
self._test(11, 0)
self._test(20, 1)
self._test(21, 5)
self._test(30, 2)
self._test(31, 0)
self._test(32, 2)
self._test(33, 1)
self._test(188, 9)
self._test(189, 9)
self._test(190, 1)
self._test(191, 0)
self._test(192, 0)
self._test(2887, 9)
self._test(2888, 9)
self._test(2889, 9)
self._test(2890, 1)
self._test(2891, 0)
self._test(2892, 0)
self._test(2893, 0)
def _test(self, n, expected):
actual = Solution().findNthDigit(n)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
import math
class Solution:
def constructRectangle(self, area):
"""
:type area: int
:rtype: List[int]
"""
w = int(math.sqrt(area))
while area % w:
w -= 1
return [area // w, w]
class Test(unittest.TestCase):
def test(self):
self._test(1, [1, 1])
self._test(2, [2, 1])
self._test(3, [3, 1])
self._test(4, [2, 2])
self._test(5, [5, 1])
self._test(6, [3, 2])
self._test(7, [7, 1])
self._test(8, [4, 2])
self._test(9, [3, 3])
def _test(self, n, expected):
actual = Solution().constructRectangle(n)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
class Solution:
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
lo = 0
hi = len(nums) - 1
while lo <= hi:
mid = lo + ((hi - lo) >> 1)
mid_val = nums[mid]
if mid_val < target:
lo = mid + 1
elif mid_val > target:
hi = mid - 1
else:
mid_tmp = mid
hi_tmp = hi
hi = mid
while lo <= hi:
mid = lo + ((hi - lo) >> 1)
if nums[mid] < target:
lo = mid + 1
else:
hi = mid - 1
start = lo
lo = mid_tmp
hi = hi_tmp
while lo <= hi:
mid = lo + ((hi - lo) >> 1)
if nums[mid] > target:
hi = mid - 1
else:
lo = mid + 1
return [start, hi]
return [-1, -1]
class Test(unittest.TestCase):
def test(self):
self._test([5, 7, 7, 8, 8, 10], 8, [3, 4])
def _test(self, nums, target, expected):
actual = Solution().searchRange(nums, target)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
class Solution:
def convert(self, s, n):
"""
:type s: str
:type n: int
:rtype: str
"""
if n == 1 or n >= len(s):
return s
result = [''] * n
row = 0
step = 1
for ch in s:
result[row] += ch
if row == 0:
step = 1
elif row == n - 1:
step = -1
row += step
return ''.join(result)
class Test(unittest.TestCase):
def test(self):
self._test('PAYPALISHIRING', 3, 'PAHNAPLSIIGYIR')
self._test('012345', 1, '012345')
self._test('012345', 2, '024135')
self._test('012345', 3, '041352')
self._test('0123456789', 4, '0615724839')
self._test('0123456789', 5, '0817926354')
def _test(self, s, numRows, expected):
actual = Solution().convert(s, numRows)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import collections
import unittest
from typing import List
import utils
from tree import TreeNode
# O(n) time. O(number of groups) space. Hash table.
class Solution:
def groupThePeople(self, groupSizes: List[int]) -> List[List[int]]:
group_size_to_people = collections.defaultdict(list)
for person, group_size in enumerate(groupSizes):
group_size_to_people[group_size].append(person)
return [people[i:i + group_size] for group_size, people in
group_size_to_people.items() for i in range(0, len(people), group_size)]
class Test(unittest.TestCase):
def test(self):
cases = utils.load_test_json(__file__).test_cases
for case in cases:
args = str(case.args)
expected = [tuple(sorted(tuple(sorted(group)) for group in possible_solution))
for possible_solution in case.expected]
actual = Solution().groupThePeople(**case.args.__dict__)
actual = tuple(sorted(tuple(sorted(group)) for group in actual))
self.assertIn(actual, expected, msg=args)
if __name__ == '__main__':
unittest.main()
|
import unittest
from typing import List
import utils
# O(n) time. O(n) space. Kadane's algorithm, DP.
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
# dp[i]: the maximum subarray that ends at i
dp = [0] * len(nums)
dp[0] = max_so_far = nums[0]
for i in range(1, len(nums)):
dp[i] = max(nums[i], dp[i - 1] + nums[i])
max_so_far = max(max_so_far, dp[i])
return max_so_far
class Test(unittest.TestCase):
def test(self):
utils.test(self, __file__, Solution)
if __name__ == '__main__':
unittest.main()
|
import unittest
from tree import TreeNode, null
class Solution:
def flatten(self, root):
"""
:type root: TreeNode
:rtype: void Do not return anything, modify root in-place instead.
"""
stack = [root]
prev = TreeNode(0)
while stack:
curr = stack.pop()
while curr:
if curr.right:
stack.append(curr.right)
prev.left = None
prev.right = curr
prev = curr
curr = curr.left
class Test(unittest.TestCase):
def test(self):
self._test([1, 2, 5, 3, 4, null, 6],
[1, null, 2, null, 3, null, 4, null, 5, null, 6])
def _test(self, vals, expected):
root = TreeNode.from_array(vals)
Solution().flatten(root)
self.assertEqual(expected, root.to_array())
if __name__ == '__main__':
unittest.main()
|
import unittest
from typing import List
import utils
# O(len(coins) * amount) time. O(amount) space. Space-optimized DP, unbounded knapsack.
class Solution:
def coinChange(self, coins: List[int], amount: int) -> int:
max_num_coins = amount + 1
# dp[j]: the fewest number of coins you need in coins[:i] to make up amount j
dp = [max_num_coins] * max_num_coins
dp[0] = 0
for coin in coins:
for j in range(coin, max_num_coins):
dp[j] = min(dp[j], dp[j - coin] + 1)
return dp[amount] if dp[amount] < max_num_coins else -1
class Test(unittest.TestCase):
def test(self):
utils.test(self, __file__, Solution)
if __name__ == '__main__':
unittest.main()
|
import unittest
def gcd_euclid(a, b):
if a == 0 or b == 0:
return 0
while b != 0:
a, b = b, a % b
return a
class Solution:
def rotate(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: void Do not return anything, modify nums in-place instead.
"""
if not nums:
return
n = len(nums)
k %= n
if k == 0:
return
if k < 0:
k += n
gcd = gcd_euclid(n, k)
inner_loops = n // gcd
for i in range(gcd):
index = i
prev = nums[index]
for j in range(inner_loops):
index = (index + k) % n
nums[index], prev = prev, nums[index]
class Test(unittest.TestCase):
def test(self):
self._test([1, 2, 3, 4, 5, 6, 7], 3, [5, 6, 7, 1, 2, 3, 4])
self._test([1, 2, 3, 4, 5, 6, 7], 10, [5, 6, 7, 1, 2, 3, 4])
self._test([1, 2, 3, 4, 5, 6, 7], -4, [5, 6, 7, 1, 2, 3, 4])
self._test([1, 2, 3, 4, 5, 6, 7], 4, [4, 5, 6, 7, 1, 2, 3])
def _test(self, nums, k, expected):
Solution().rotate(nums, k)
self.assertEqual(expected, nums)
if __name__ == '__main__':
unittest.main()
|
import unittest
from linkedlist import ListNode
class Solution:
def deleteDuplicates(self, head):
"""
:type curr: ListNode
:rtype: ListNode
"""
curr = head
while curr:
next_ = curr.next
while next_ and next_.val == curr.val:
next_ = next_.next
curr.next = next_
curr = next_
return head
class Test(unittest.TestCase):
def test(self):
self._test([1, 1, 2], [1, 2])
self._test([1, 1, 2, 3, 3], [1, 2, 3])
def _test(self, nums, expected):
head = ListNode.from_array(nums)
actual = Solution().deleteDuplicates(head)
actual = ListNode.to_array(actual)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
# O(n).
class Solution:
def pivotIndex(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return -1
hi_sum = sum(nums)
lo_sum = 0
for i, num in enumerate(nums):
hi_sum -= num
if lo_sum == hi_sum:
return i
lo_sum += num
return -1
class Test(unittest.TestCase):
def test(self):
self._test([1, 7, 3, 6, 5, 6], 3)
self._test([1, 2, 3], -1)
def _test(self, nums, expected):
actual = Solution().pivotIndex(nums)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
import unittest
def lenRecur(aStr):
'''
aStr: a string
returns: int, the length of aStr
'''
if aStr == '':
return 0
else:
return lenRecur(aStr[:-1]) + 1
class TestLenRecur(unittest.TestCase):
def test_with_len_one(self):
self.assertEqual(lenRecur('a'), 1)
def test_with_len_two(self):
self.assertEqual(lenRecur('aa'), 2)
def test_with_len_ten(self):
self.assertEqual(lenRecur('aaaaaaaaaa'), 10)
if __name__ == '__main__':
unittest.main()
|
# Практика. Класс Point
from math import sqrt
class Point:
list_points = []
# Метод написания кода DRY (Don't Repeat Yourself)
def __init__(self, coord_x = 0, coord_y = 0):
self.x = coord_x
self.y = coord_y
Point.list_points.append(self) # Отразится на всех экземплярах класса Point
def move_to(self, new_x, new_y):
self.x = new_x
self.y = new_y
def go_home(self):
self.move_to(0, 0)
def print_point(self):
print(f"Точка с координатами ({self.x}, {self.y})")
def calc_distance(self, another_point):
if not isinstance(another_point, Point):
raise ValueError("Аргумент должен принадлежать классу Point")
return sqrt((self.x - another_point.x) ** 2 + (self.y - another_point.y) ** 2)
p1 = Point(3, 4)
print([p1.x, p1.y])
p2 = Point(-54, 32)
print([p2.x, p2.y])
p3 = Point() # Выдаст ошибку TypeError, если аргументы метода __init__ обязательны
print([p3.x, p3.y])
print(Point.list_points)
print()
p3.move_to(4, 5)
print([p3.x, p3.y])
p3.move_to(-90, 5)
print([p3.x, p3.y])
print(Point.list_points)
print()
p4 = Point(4)
print([p4.x, p4.y])
p4.move_to(4, 8)
print([p4.x, p4.y])
p4.move_to(8, 8)
print([p4.x, p4.y])
p4.go_home()
print([p4.x, p4.y])
print(Point.list_points)
print()
p5 = Point()
p5.print_point()
p5.move_to(7, -43)
p5.print_point()
print(Point.list_points)
print()
p7 = Point(6, 0)
p8 = Point(0, 8)
#p7.calc_distance(90) # Вызовет исключение ValueError
print(p7.calc_distance(p8))
print(Point.list_points)
print(Point.list_points[2])
print('y = ' + str(Point.list_points[2].y))
print()
print(p7.list_points)
|
import hashlib, sqlite3, string
def addUser(user, password):
if (special(user)):
return "invlaid character in username"
if (len(password)<8):
return "password too short"
db=sqlite3.connect('data/tables.db')
c=db.cursor()
myHashObj=hashlib.sha1()
myHashObj.update(password)
q='SELECT * FROM users'
c.execute(q)
userInfo=c.fetchall()
for data in userInfo:
if (user in data):
db.close()
return "ERROR: username already in use"
q="INSERT INTO users VALUES (NULL, \""+user+'\", \"'+myHashObj.hexdigest()+'\")'
print q
c.execute(q)
db.commit()
db.close()
return "registration succesful, enter user and pass to login"
def userLogin(user, password):
db=sqlite3.connect('data/tables.db')
c=db.cursor()
myHashObj=hashlib.sha1()
myHashObj.update(password)
q='SELECT username FROM users'
print "hi"
c.execute(q)
data=c.fetchall()
for stuff in data:
if(user in stuff):
print "bye"
q='SELECT password FROM users WHERE username = "'+user+'";'
c.execute(q)
password=c.fetchall()
q='SELECT userID From users WHERE username = "'+user+'";'
c.execute(q)
stuff=c.fetchall()
db.close()
if(myHashObj.hexdigest()==password[0][0]):
return ['True', str(stuff[0][0])]
db.close()
return ['False', 'bad user/pass']
def special(user):
return any((ord(char)<48 or (ord(char)>57 and ord(char)<65) or (ord(char)>90 and ord(char)<97) or ord(char)>123) for char in user)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
===========================================
FileName: list.py
Desc:
Author: ruizhong.li
Version:
CreateTime: 2017-05-01
==========================================
"""
movices = ["The Holy Grail",1975,"Terry Jones & Terry Gilliam",91,["Graham Chapman",["Michael Palin","John Class","Terry Gilliam"]]]
def print_lol(the_list):
for each_item in the_list:
if isinstance(each_item,list):
print_lol(each_item)
else:
print(each_item)
if __name__ == '__main__':
print_lol(movices)
|
def train_test_split(X, y, train_size=0.8,):
"""
学習用データを分割する。
Parameters
----------
X : 次の形のndarray, shape (n_samples, n_features)
学習データ
y : 次の形のndarray, shape (n_samples, )
正解値
train_size : float (0<train_size<1)
何割をtrainとするか指定
Returns
----------
X_train : 次の形のndarray, shape (n_samples, n_features)
学習データ
X_test : 次の形のndarray, shape (n_samples, n_features)
検証データ
y_train : 次の形のndarray, shape (n_samples, )
学習データの正解値
y_test : 次の形のndarray, shape (n_samples, )
検証データの正解値
"""
train_len = int(len(X)*train_size)
X_train, X_test = X[:train_len], X[train_len:]
y_train, y_test = y[:train_len], y[train_len:]
return X_train, X_test, y_train, y_test |
#with open('data/product.txt', 'r') as f:
#f_contents = f.read()
#print(f_contents)
#with open('data/product.txt', 'r') as f:
#f_contents = f.readlines()
#print(f_contents, end='')
#with open('data/product.txt', 'r') as f:
#for line in f:
#print(line, end='')
with open('data/product.txt', 'r') as rf:
with open('data/_copyproduct.txt', 'w') as wf:
for line in rf: # for each line in orginal write to wf
wf.write(line) # Copies entire file into a new one. |
x = "Anthony"
y = str.maketrans('nol','xyz')
x = x.translate(y)
print (x)
z = str.maketrans('xyz','nol')
x = x.translate(z)
print (x)
#x[7] = "y"
#x = x[7].replace('o','y')
#print (x)
b = list(x)
b[7] = "y"
print (b)
x = ''.join(b)
print (x)
|
'''
Computational Models of Cognition
Final Project
11/14/18
Functions for implementing and running the Pi-First model in a replication study
of Lee et al. (2011).
'''
import random
import numpy as np
class Game:
def __init__(self, num_arms, pi, gamma, environment):
self.trial = 0
self.arms = []
self.num_arms = num_arms
self.pi = pi
self.gamma = gamma
self.probabilities = self.get_arm_probabilities(environment)
for i in range(num_arms):
temp_arm = Arm(self.probabilities[i], i)
self.arms.append(temp_arm)
def get_total_rewards(self):
'''
Parameters: self
Returns: A string listing the number of wins in the current game, the number of
losses in the current game, and the reward rate
'''
total_wins = 0
for i in range(self.num_arms):
total_wins += self.arms[i].number_wins
string = "Total Wins: " + str(total_wins) + " ; Total Losses: " + str(self.trial - total_wins) + " ; Reward Rate: " + str(total_wins / self.trial)
return string
def get_arm_probabilities(self, environment):
'''
Parameters:
self
environment: "n" for neutral, "s" for sparse, or "p" for plentiful
Returns: a list of arm probabilities sampled from the corresponding Beta distributions
(Beta(1,1) for neutral; Beta(2,4) for sparse; Beta(4,2) for plentiful)
'''
probabilities = []
for i in range(self.num_arms):
if environment == "n":
probabilities.append(np.random.beta(1,1))
elif environment == "s":
probabilities.append(np.random.beta(2,4))
else:
probabilities.append(np.random.beta(4,2))
return probabilities
class Arm:
def __init__(self, probability, number):
self.number_wins = 0
self.number_choices = 0
self.probability = probability
self.number = number
def expected(self):
'''
Parameters: self
Returns: the expected value of the arm in question
'''
#Avoid divide by zero
if self.number_choices != 0:
return self.number_wins / self.number_choices
else:
return 0
def pi_first(game):
'''
Implements the Pi-First heuristic.
Parameters:
game: current game object being played
Returns: a tuple that contains the current trial's choice and result
'''
next_choice = decide_next_move(game)
result = calculate_result(next_choice, game)
update_game(next_choice, result, game)
return (next_choice, result)
def decide_next_move(game):
'''
Decides which arm to choose next depending on the Pi-First heuristic.
Parameters: the game object
Returns: an the arm object that will be chosen next
'''
random_num = random.random()
if game.trial < game.pi:
#Explore
return choose_randomly(game)
else:
if random_num <= game.gamma:
#Exploit
best_arm_value = game.arms[0].expected()
arms_with_best_value = []
#Find the arm with the best expected value
for i in range(game.num_arms):
if game.arms[i].expected() > best_arm_value:
best_arm_value = game.arms[i].expected()
#Check if more than one arm has the best expected value
for i in range(game.num_arms):
if game.arms[i].expected() == best_arm_value:
arms_with_best_value.append(game.arms[i])
#Choose randomly from the list of arms with the best expected value
random_num = random.randint(0, len(arms_with_best_value) - 1)
return arms_with_best_value[random_num]
#Fail to exploit; explore (choose randomly)
else:
return choose_randomly(game)
def calculate_result(choice, game):
'''
Calculates the outcome of the choice (a win or loss) based on the probabilities associated with each arm.
Parameters:
choice: the choice of arm
game: the game object being played, which holds the probabilities of success for the arms
Returns: int outcome (0 if loss, 1 if win)
'''
win_number = random.random()
#Probability of success for the chosen arm
prob_success = choice.probability
if win_number <= prob_success:
return 1
else:
return 0
def update_game(choice, result, game):
'''
Updates expected win rates for arms and trial count after each trial
Parameters:
choice: last arm object chosen
result: win or loss (1 or 0)
'''
game.trial += 1
choice.number_choices += 1
if result == 1:
choice.number_wins += 1
def choose_randomly(game):
'''
Chooses one arm randomly.
Returns: an arm object
'''
randint = random.randint(0, game.num_arms - 1)
return game.arms[randint]
def run_pi_first_simulation(n, trials, choices_filename, results_filename, num_arms, pi, gamma, environment):
'''
Parameters:
n: number of games
trials: number of trials for each game
choices_filename: file to be created and written with choice data
results_filename: file to be created and written with results data
num_arms: number of arms in the game
pi: a value for pi
gamma: a value for gamma
environment: "n" for neutral, "s" for sparse, "p" for plentiful
Writes outcome of simulation to a file in .csv format.
'''
choices_file = open(choices_filename, "w")
results_file = open(results_filename, "w")
#Write a header
for h in range(trials):
choices_file.write("trial" + str(h) + "choice")
results_file.write("trial" + str(h) + "result")
if h < trials - 1:
choices_file.write(",")
results_file.write(",")
#Initialize and run n games
for i in range(n):
game = Game(num_arms, pi, gamma, environment)
choices_file.write("\n")
results_file.write("\n")
#Run trials for each game
for j in range(trials):
trial = pi_first(game)
choices_file.write(str(trial[0].number))
results_file.write(str(trial[1]))
#Write a comma after the result if the trial is not the last trial
if j < trials - 1:
choices_file.write(",")
results_file.write(",")
def generate_model_data():
'''
Run the Pi-First model in neutral, sparse, and plentiful environments with parameters
chosen for our experiment.
'''
#Neutral environment
run_pi_first_simulation(500, 8, "./ModelData/PiFirst/piFirstNeutral8Choices.csv", "./ModelData/PiFirst/piFirstNeutral8Results.csv", 2, 4, .9, "n")
run_pi_first_simulation(500, 16, "./ModelData/PiFirst/piFirstNeutral16Choices.csv", "./ModelData/PiFirst/piFirstNeutral16Results.csv", 2, 8, .9, "n")
#Sparse environment
run_pi_first_simulation(500, 8, "./ModelData/PiFirst/piFirstSparse8Choices.csv", "./ModelData/PiFirst/piFirstSparse8Results.csv", 2, 4, .9, "s")
run_pi_first_simulation(500, 16, "./ModelData/PiFirst/piFirstSparse16Choices.csv", "./ModelData/PiFirst/piFirstSparse16Results.csv", 2, 8, .9, "s")
#Plentiful environment
run_pi_first_simulation(500, 8, "./ModelData/PiFirst/piFirstPlentiful8Choices.csv", "./ModelData/PiFirst/piFirstPlentiful8Results.csv", 2, 4, .9, "p")
run_pi_first_simulation(500, 16, "./ModelData/PiFirst/piFirstPlentiful16Choices.csv", "./ModelData/PiFirst/piFirstPlentiful16Results.csv", 2, 8, .9, "p")
#generate_model_data() |
""" --- Stonehenge Game ---
=== CSC148 Winter 2018 ===
University of Toronto
Assignment 2
Submitted by: Eric Koehli
=== Module Description ===
This module contains the Stonehenge game and Stonehenge game state.
"""
from typing import Any, List, Union, Dict, Tuple
from copy import deepcopy
from game import Game
from game_state import GameState
class StonehengeGame(Game):
"""
A playable stonehenge game implementation. This is a subclass of Game.
=== Public Attributes ===
current_state: The current state of the game.
"""
current_state: 'StonehengeGameState'
def __init__(self, p1_starts: bool = True, size: int = -1) -> None:
"""
Initialize this Game, using p1_starts to find who the first player is.
If <p1_starts> is true, p1 starts the game. If false, p2 starts.
"""
while size not in [1, 2, 3, 4, 5]:
try:
size = int(input('Enter the side length of the board'
' between 1 and 5 inclusive: '))
except ValueError:
print("Oops! That wasn't in the correct range... Please "
"try again.")
self.current_state = StonehengeGameState(p1_starts, size)
def get_instructions(self) -> str:
"""
Return the instructions for this Game.
>>> sh = StonehengeGame(True, 2)
>>> sh2 = StonehengeGame(False, 5)
>>> sh.get_instructions() == sh2.get_instructions()
True
>>> print(sh.get_instructions())
Welcome to Stonehenge! The goal of the game is to capture at least
half of the total number of ley-lines before your opponent does.
Good luck!
"""
res = 'Welcome to Stonehenge! The goal of the game is to capture at ' \
'least\nhalf of the total number of ley-lines before your ' \
'opponent does.\nGood luck!'
return res
def is_over(self, state: 'StonehengeGameState') -> bool:
"""
Return whether or not this game is over at state.
>>> sh = StonehengeGame(True, 1)
>>> sh2 = StonehengeGame(True, 2)
>>> sh_a = sh.current_state.make_move('A')
>>> sh_b = sh2.current_state.make_move('B')
>>> sh.is_over(sh_a)
True
>>> sh.is_over(sh_b)
False
>>> sh2.is_over(sh_a)
True
>>> sh2.is_over(sh_b)
False
"""
ones, twos = 0, 0
for board_line in state.board:
for ch in board_line:
if ch == 'p1':
ones += 1
elif ch == 'p2':
twos += 1
num_ley_lines = len(state.ley_lines)
if ones / num_ley_lines >= 0.5:
return True
elif twos / num_ley_lines >= 0.5:
return True
return False
def is_winner(self, player: str) -> bool:
"""
Return whether player has won the game.
Precondition: player is 'p1' or 'p2'.
>>> sh = StonehengeGame(True, 1)
>>> sh2 = StonehengeGame(True, 2)
>>> sh_a = sh.current_state.make_move('A')
>>> sh_b = sh2.current_state.make_move('B')
>>> sh.is_winner('p1')
False
>>> sh.is_winner('p2')
False
>>> sh2.is_winner('p1')
False
"""
return (self.current_state.get_current_player_name() != player
and self.is_over(self.current_state))
def str_to_move(self, string: str) -> Union[str, int]:
"""
Return the move that string represents. If string is not a move,
return some invalid move.
>>> sh = StonehengeGame(size=2)
>>> sh2 = StonehengeGame(False, 4)
>>> sh.str_to_move('A')
'A'
>>> sh.str_to_move('a')
'A'
>>> sh.str_to_move('0')
-1
>>> sh2.str_to_move('hi')
'HI'
"""
if not string.strip().isalpha():
return -1
return string.strip().upper()
class StonehengeGameState(GameState):
"""
The Game State for the game Stonehenge.
=== Public Attributes ===
ley_lines:
A dictonary data structure to store the current
ley-line information in the game. Each key is a
string of a ley-line and each value is a list of
cells in that ley-line.
board:
A representation of the current stonehenge board.
Each sublist contains a row of the board.
"""
ley_lines: Dict[str, List[str]]
board: List[List[str]]
def __init__(self, is_p1_turn: bool = True, board_length: int = 1,
ley_lines: Dict[str, List[str]] = None,
board: List[List[str]] = None) -> None:
"""
Initialize this game state and set the current player based on
is_p1_turn.
>>> shgs = StonehengeGameState()
>>> shgs.ley_lines
{'ley_line1': ['@', 'A'], 'ley_line2': ['@', 'B', 'C'], 'ley_line3': \
['@', 'A', 'B'], 'ley_line4': ['@', 'C'], 'ley_line5': ['@', 'B'], \
'ley_line6': ['@', 'C', 'A']}
"""
GameState.__init__(self, is_p1_turn)
if ley_lines is None:
self.ley_lines = self._build_ley_lines(board_length)
self.board = self._make_board(board_length)
else:
self.ley_lines, self.board = ley_lines, board
def _build_ley_lines(self, board_length) -> Dict[str, List[str]]:
"""A helper method to create the data structure to hold the ley-line
information from the game.
"""
if board_length == 1:
ley_lines = {'ley_line1': ['A'], 'ley_line2': ['B', 'C'],
'ley_line3': ['A', 'B'], 'ley_line4': ['C'],
'ley_line5': ['B'], 'ley_line6': ['C', 'A']}
elif board_length == 2:
ley_lines = {'ley_line1': ['A', 'C'], 'ley_line2': ['B', 'D', 'F'],
'ley_line3': ['A', 'B'], 'ley_line4': ['E', 'G'],
'ley_line5': ['C', 'D', 'E'],
'ley_line6': ['F', 'G'], 'ley_line7': ['E', 'B'],
'ley_line8': ['F', 'C'], 'ley_line9': ['G', 'D', 'A']}
elif board_length == 3:
ley_lines = {'ley_line1': ['A', 'C', 'F'],
'ley_line2': ['B', 'D', 'G', 'J'],
'ley_line3': ['A', 'B'], 'ley_line4': ['E', 'H', 'K'],
'ley_line5': ['C', 'D', 'E'], 'ley_line6': ['I', 'L'],
'ley_line7': ['F', 'G', 'H', 'I'],
'ley_line8': ['J', 'K', 'L'],
'ley_line9': ['I', 'E', 'B'], 'ley_line10': ['J', 'F'],
'ley_line11': ['K', 'G', 'C'],
'ley_line12': ['L', 'H', 'D', 'A']}
elif board_length == 4:
ley_lines = {'ley_line1': ['A', 'C', 'F', 'J'],
'ley_line2': ['B', 'D', 'G', 'K', 'O'],
'ley_line3': ['A', 'B'],
'ley_line4': ['E', 'H', 'L', 'P'],
'ley_line5': ['C', 'D', 'E'],
'ley_line6': ['I', 'M', 'Q'],
'ley_line7': ['F', 'G', 'H', 'I'],
'ley_line8': ['N', 'R'],
'ley_line9': ['J', 'K', 'L', 'M', 'N'],
'ley_line10': ['O', 'P', 'Q', 'R'],
'ley_line11': ['N', 'I', 'E', 'B'],
'ley_line12': ['O', 'J'],
'ley_line13': ['P', 'K', 'F'],
'ley_line14': ['Q', 'L', 'G', 'C'],
'ley_line15': ['R', 'M', 'H', 'D', 'A']}
else:
ley_lines = {'ley_line1': ['A', 'C', 'F', 'J', 'O'],
'ley_line2': ['B', 'D', 'G', 'K', 'P', 'U'],
'ley_line3': ['A', 'B'],
'ley_line4': ['E', 'H', 'L', 'Q', 'V'],
'ley_line5': ['C', 'D', 'E'],
'ley_line6': ['I', 'M', 'R', 'W'],
'ley_line7': ['F', 'G', 'H', 'I'],
'ley_line8': ['N', 'S', 'X'],
'ley_line9': ['J', 'K', 'L', 'M', 'N'],
'ley_line10': ['T', 'Y'],
'ley_line11': ['O', 'P', 'Q', 'R', 'S', 'T'],
'ley_line12': ['U', 'V', 'W', 'X', 'Y'],
'ley_line13': ['T', 'N', 'I', 'E', 'B'],
'ley_line14': ['U', 'O'],
'ley_line15': ['V', 'P', 'J'],
'ley_line16': ['W', 'Q', 'K', 'F'],
'ley_line17': ['X', 'R', 'L', 'G', 'C'],
'ley_line18': ['Y', 'S', 'M', 'H', 'D', 'A']}
for ley_line in ley_lines:
if '@' not in ley_lines[ley_line]:
ley_lines[ley_line].insert(0, '@')
return ley_lines
def _make_board(self, board_length: int) -> List[List[str]]:
"""Return a list of strings that represents the current state
of the stonehenge game's board.
This is a helper function for __init__
"""
if board_length == 1:
board = ' @ @\n / /\n@ - A - B\n \\ / \\\n @ - ' \
'C @\n \\\n @\n'
elif board_length == 2:
board = ' @ @\n / /\n @ - A - B @\n ' \
' / \\ / \\ /\n@ - C - D - E\n \\ / \\ / \\\n ' \
'@ - F - G @\n \\ \\\n @ @\n'
elif board_length == 3:
board = ' @ @\n / /\n @ - A - B @\n ' \
' / \\ / \\ /\n @ - C - D - E @\n ' \
' / \\ / \\ / \\ /\n@ - F - G - H - I\n ' \
' \\ / \\ / \\ / \\\n @ - J - K - L @\n ' \
' \\ \\ \\\n @ @ @\n'
elif board_length == 4:
board = ' @ @\n / /\n ' \
' @ - A - B @\n / \\ / \\ /\n @ - C - D - E' \
' @\n / \\ / \\ / \\ /\n @ - F - G - H - I @\n' \
' / \\ / \\ / \\ / \\ /\n@ - J - K - L - M - N\n ' \
' \\ / \\ / \\ / \\ / \\\n @ - O - P - Q - R @\n ' \
' \\ \\ \\ \\\n @ @ @ @\n'
else:
board = ' @ @\n / /\n ' \
' @ - A - B @\n / \\ / \\ /\n ' \
' @ - C - D - E @\n / \\ / \\ / \\ /\n ' \
' @ - F - G - H - I @\n ' \
' / \\ / \\ / \\ / \\ /\n @ - J - K - L - M - N @\n ' \
' / \\ / \\ / \\ / \\ / \\ /\n@ - O - P - Q - R' \
' - S - T\n \\ / \\ / \\ / \\ / \\ / \\\n' \
' @ - U - V - W - X - Y @\n ' \
' \\ \\ \\ \\ \\\n @ @ @ @ @\n'
board = self._organize_board(board)
return board
def _organize_board(self, str_board: str) -> List[List[str]]:
"""Return a list of a list of strings, where each sublist
is a line in the board game.
This is a helper function for _make_board and __init__.
"""
board = list(str_board)
outter_list = []
i, j = 0, 0
for ch in board:
j += 1
if ch == '\n':
outter_list.append(board[i:j])
i = j
# remove the last \n
length = len(outter_list)
outter_list[length - 1].pop()
return outter_list
def __str__(self) -> str:
r"""
Return a string representation of the current state of the game.
>>> shgs = StonehengeGameState()
>>> print(shgs)
@ @
/ /
@ - A - B
\ / \
@ - C @
\
@
>>> shgs2 = StonehengeGameState(True, 2)
>>> print(shgs2)
@ @
/ /
@ - A - B @
/ \ / \ /
@ - C - D - E
\ / \ / \
@ - F - G @
\ \
@ @
>>> print(StonehengeGameState(True, 3))
@ @
/ /
@ - A - B @
/ \ / \ /
@ - C - D - E @
/ \ / \ / \ /
@ - F - G - H - I
\ / \ / \ / \
@ - J - K - L @
\ \ \
@ @ @
>>> print(shgs2.make_move('E'))
@ @
/ /
@ - A - B 1
/ \ / \ /
@ - C - D - 1
\ / \ / \
@ - F - G 1
\ \
@ @
"""
board = deepcopy(self.board)
for board_line in board:
for i in range(len(board_line)):
if board_line[i] == 'p1':
board_line[i] = '1'
elif board_line[i] == 'p2':
board_line[i] = '2'
res = ''
for board_line in board:
res += ''.join(board_line)
return res
def get_possible_moves(self) -> List[str]:
"""
Return all possible moves that can be applied to this state.
>>> shgs = StonehengeGameState()
>>> shgs.get_possible_moves()
['A', 'B', 'C']
>>> shgs2 = StonehengeGameState(board_length=5)
>>> shgs2.get_possible_moves()
['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', \
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y']
>>> new_state = shgs.make_move('A')
>>> new_state.get_possible_moves()
[]
>>> shgs = StonehengeGameState(board_length=2)
>>> shgs.get_possible_moves()
['A', 'B', 'C', 'D', 'E', 'F', 'G']
>>> first_move = shgs.make_move('A')
>>> second_move = first_move.make_move('D')
>>> second_move.get_possible_moves()
['B', 'C', 'E', 'F', 'G']
"""
all_cells = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K',
'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y']
legal_moves = []
ones, twos = 0, 0
for board_line in self.board:
for cell in board_line:
if cell in all_cells:
legal_moves.append(cell)
elif cell == 'p1':
ones += 1
elif cell == 'p2':
twos += 1
num_ley_lines = len(self.ley_lines)
if ones / num_ley_lines >= 0.5:
return []
elif twos / num_ley_lines >= 0.5:
return []
return legal_moves
def make_move(self, move: str) -> 'StonehengeGameState':
"""
Return the GameState that results from applying move to this GameState.
>>> shgs = StonehengeGameState(board_length=1)
>>> shgs2 = StonehengeGameState(board_length=2)
>>> shgs3 = StonehengeGameState(board_length=1)
>>> repr(shgs) == repr(shgs2)
False
>>> repr(shgs) == repr(shgs3)
True
>>> new_state1 = shgs.make_move('A')
>>> repr(new_state1) == repr(shgs3)
False
>>> repr(shgs) == repr(new_state1)
False
"""
new_ley_lines, ley_line_location = self._update_ley_lines(move)
new_board = self._update_board(move, ley_line_location)
new_state = StonehengeGameState(not self.p1_turn,
ley_lines=new_ley_lines,
board=new_board)
return new_state
def _update_ley_lines(self, move: str) -> Tuple[Dict[str, List[str]],
Dict[int, str]]:
"""Return an updated version of <self.ley_lines> based
off of <move>.
This is a helper function for make_move.
"""
if self.get_current_player_name() == 'p1':
player = '1'
else:
player = '2'
new_ley_lines = deepcopy(self.ley_lines)
for ley_line in new_ley_lines.values():
for i in range(len(ley_line)):
if ley_line[i] == move:
ley_line[i] = player
ley_line_location = self._check_ley_markers(new_ley_lines)
return new_ley_lines, ley_line_location
def _check_ley_markers(self,
ley_lines: Dict[str, List[str]]) -> Dict[int, str]:
"""Check all the <ley_lines> to see if the move that was just
applied claimed any of ley-lines. Return a dictionary where each
key is an int that represents the ley-line that was claimed and the
value is a string of which player claimed it.
"""
ley_line_location = {}
for ley_line in ley_lines:
ley_line_num = self._get_ley_line_location(ley_line)
# ley_lines[ley_line] is a list of one of the ley-lines
if '@' not in ley_lines[ley_line]:
continue
ones, twos = 0, 0
for i in range(len(ley_lines[ley_line])):
if ley_lines[ley_line][i] == '1':
ones += 1
elif ley_lines[ley_line][i] == '2':
twos += 1
ley_line_length = len(ley_lines[ley_line]) - 1
if ones / ley_line_length >= 0.5:
ley_lines[ley_line][0] = 'p1'
ley_line_location[ley_line_num] = 'p1'
elif twos / ley_line_length >= 0.5:
ley_lines[ley_line][0] = 'p2'
ley_line_location[ley_line_num] = 'p2'
return ley_line_location
def _get_ley_line_location(self, ley_line: str) -> int:
"""Return the integer that occurs in <ley_line>.
This is a helper function for _check_ley_markers.
"""
nums = []
for ch in ley_line:
if ch.isdigit():
nums.append(ch)
str_val = ''.join(nums)
return int(str_val)
def _update_board(self, move: str,
ley_line_location: Dict[int, str]) -> List[List[str]]:
"""Return an updated version of attribute <self.board> based off
<move> and <ley_lines>.
Part 1: Apply the move by modifying the board.
Part 2: Change any ley-line markers, IF needed.
"""
new_board = deepcopy(self.board)
if self.get_current_player_name() == 'p1':
player = '1'
else:
player = '2'
# Part 1:
found = False
while not found:
for line in new_board:
if self._update_board_part_1(move, player, line):
found = True
# Part 2:
for location, player in ley_line_location.items():
self._update_board_part_2(location, player, new_board)
# seen = 0
# for board_line in new_board:
# for i in range(len(board_line)):
# if (board_line[i] == '@' or board_line[i] == 'p1' or
# board_line[i] == 'p2'):
# seen += 1
# if seen == location:
# board_line[i] = player
return new_board
def _update_board_part_1(self, move: str, player: str,
line: List[str]) -> bool:
"""Update the board by replacing <move> with the <player>
that made the move. Return True iff <line> has been modified.
"""
for i in range(len(line)):
if line[i] == move:
line[i] = player
return True
return False
def _update_board_part_2(self, location: int, player: str,
new_board: List[List[str]]) -> None:
"""
Helper function for _update_board
"""
seen = 0
for board_line in new_board:
for i in range(len(board_line)):
if (board_line[i] == '@' or board_line[i] == 'p1' or
board_line[i] == 'p2'):
seen += 1
if seen == location:
board_line[i] = player
# def _update_board_part_2b(self, location: int, player: str, seen: int,
# board_line: List[str], i: int) -> None:
# """
#
# """
# if (board_line[i] == '@' or board_line[i] == 'p1' or
# board_line[i] == 'p2'):
# seen += 1
# if seen == location:
# board_line[i] = player
# def _update_board_part_2b(self, location: int, player: str, seen: int,
# board_line: List[str]) -> int:
# """Modify <board_line> at the '@' given by <location>
# with <player>. <seen> is the number of '@' symbols
# seen so far.
# """
# for i in range(len(board_line)):
# if (board_line[i] == '@' or board_line[i] == 'p1' or
# board_line[i] == 'p2'):
# seen += 1
# if seen == location:
# board_line[i] = player
# return seen
def __repr__(self) -> Any:
"""
Return a representation of this state (which can be used for
equality testing).
>>> shgs = StonehengeGameState(board_length=1)
>>> shgs2 = StonehengeGameState(board_length=2)
>>> shgs3 = StonehengeGameState(board_length=1)
>>> repr(shgs) == repr(shgs2)
False
>>> repr(shgs) == repr(shgs3)
True
>>> print(repr(shgs))
Current player: p1
ley_line1: ['@', 'A']
ley_line2: ['@', 'B', 'C']
ley_line3: ['@', 'A', 'B']
ley_line4: ['@', 'C']
ley_line5: ['@', 'B']
ley_line6: ['@', 'C', 'A']
"""
res = ''
if self.p1_turn:
player = 'p1'
else:
player = 'p2'
res += 'Current player: {}\n'.format(player)
for ley_line in self.ley_lines:
res += '{}: {}\n'.format(ley_line, self.ley_lines[ley_line])
return res.strip()
def rough_outcome(self) -> float:
"""
Return an estimate in interval [LOSE, WIN] of best outcome the current
player can guarantee from state self.
>>> shgs = StonehengeGameState(board_length=1)
>>> shgs2 = StonehengeGameState(board_length=2)
>>> shgs.rough_outcome()
1
>>> shgs2.rough_outcome()
0
>>> new_state = shgs.make_move('A')
>>> new_state.rough_outcome()
-1
"""
player, opponent = 'p1', 'p2'
if player != self.get_current_player_name():
player, opponent = 'p2', 'p1'
if self._is_over(opponent):
# Then the current player loses.
return self.LOSE
# for each move, check if the player that made the move won
for move in self.get_possible_moves():
new_state = self.make_move(move) # opponent is the current player
if self._is_over_player(new_state, player):
return self.WIN
for move in self.get_possible_moves():
new_state = self.make_move(move) # opponent is the current player
for next_move in new_state.get_possible_moves():
next_state = new_state.make_move(next_move) # player
if self._is_over_player(next_state, opponent):
return self.LOSE
if self._is_over_player(new_state, opponent):
return self.LOSE
return self.DRAW
def _is_over(self, opponent: str) -> bool:
"""Return true iff the game is over.
"""
score = 0
for board_line in self.board:
for cell in board_line:
if cell == opponent:
score += 1
num_ley_lines = len(self.ley_lines)
if score / num_ley_lines >= 0.5:
return True
return False
def _is_over_player(self, state: 'StonehengeGameState',
player: str) -> bool:
"""Return true iff the game is over.
"""
score = 0
for board_line in state.board:
for cell in board_line:
if cell == player:
score += 1
num_ley_lines = len(state.ley_lines)
if score / num_ley_lines >= 0.5:
return True
return False
# def is_over_v2(self) -> bool:
# """
# Return whether or not this game is over at state.
#
# This method is to be used for the iterative strategy.
#
# >>> sh = StonehengeGameState(board_length=1)
# >>> sh2 = StonehengeGameState(board_length=2)
# >>> sh_a = sh.make_move('A')
# >>> sh_b = sh2.make_move('B')
# >>> sh.is_over_v2()
# False
# >>> sh_a.is_over_v2()
# True
# >>> sh_b.is_over_v2()
# False
# """
# ones, twos = 0, 0
# for board_line in self.board:
# for ch in board_line:
# if ch == 'p1':
# ones += 1
# elif ch == 'p2':
# twos += 1
#
# num_ley_lines = len(self.ley_lines)
# if ones / num_ley_lines >= 0.5:
# return True
# elif twos / num_ley_lines >= 0.5:
# return True
# return False
if __name__ == '__main__':
import doctest
doctest.testmod()
from python_ta import check_all
check_all(config="a2_pyta.txt")
|
""" Searching & Sorting Algorithms
=== University of Toronto ===
Department of Computer Science
__author__ = 'Eric K'
=== Module Description ===
This module contains some searching and sorting algorithms.
"""
from typing import List, Tuple
# Remark: recall that len([1, 2, 3]) == 3 and
# len(lst) - 1 is the last index of the list <lst>. Also,
# range(len(lst)) takes you from index 0 to len(lst) - 1
# because range is not inclusive.
def linear_search(lst: list, value: object) -> int:
"""
Return the index <i> of the first occurance of <value>
in the list <lst>, else return -1.
>>> linear_search([1, 2, 3, 4], 3)
2
>>> linear_search([1, 2, 3, 4], 5)
-1
>>> linear_search([1, 2, 3, 4], 4)
3
>>> linear_search([1, 2, 3, 4], 1)
0
"""
i = 0
lst_length = len(lst)
while i != lst_length and value != lst[i]:
i += 1
if i == lst_length:
return -1
else:
return i
def binary_search(lst: list, value: object) -> int:
"""
Return the index <i> of the first occurance of <value>
in the list <lst>, else return -1.
Precondition: assume that the list is sorted
"""
start = 0
end = len(lst) - 1
while start <= end:
mid = (start + end) // 2
if lst[mid] < value:
start = mid + 1
else:
end = mid - 1
if start == len(lst) or lst[start] != value:
return -1
else:
return start
def bubble_sort(lst: list) -> None:
""" Modify list <lst> from smallest to largest.
>>> L = [7, 3, 5, 2]
>>> bubble_sort(L)
[2, 3, 5, 7]
"""
# Index of the last unsorted item
end = len(lst) - 1 # The index of the last element in the list
while end != 0:
# Bubble once through the unsorted section of the list to move the
# largest item to index end
for i in range(end): # index 0 to len(lst) - 2 because lst[i + 1]
if lst[i] > lst[i + 1]:
lst[i], lst[i + 1] = lst[i + 1], lst[i] # Swap the elements
end -= 1
def get_index_of_smallest(lst: list, i: int) -> int:
""" Return the index <i> of the smallest item in lst[i:].
>>> get_index_of_smallest([2, 7, 3, 5], 1)
2
"""
# Index of the smallest item so far
index_of_smallest = i
len_lst = len(lst)
for j in range(i + 1, len_lst):
if lst[j] < lst[index_of_smallest]:
index_of_smallest = j
return index_of_smallest
def selection_sort(lst: list) -> None:
""" Modify list L from smallest to largest.
>>> L = [7, 3, 5, 2]
>>> selection_sort(L)
[2, 3, 5, 7]
"""
len_lst = len(lst)
for i in range(len_lst): # from index 0 to index len(lst) - 1
# Find the index of the smallest item in L[i:] and swap that item with
# the item at index i.
index_of_smallest = get_index_of_smallest(lst, i)
lst[index_of_smallest], lst[i] = lst[i], lst[index_of_smallest]
return None
def insertion_sort(lst: list) -> None:
""" Modify list L from smallest to largest.
>>> L = [7, 3, 5, 2]
>>> insertion_sort(L)
>>> L
[2, 3, 5, 7]
"""
len_lst = len(lst)
for i in range(len_lst):
_insert(lst, i)
return None
def _insert(lst: list, i: int) -> None:
""" Move L[i] to where it belongs in L[i + 1]
Precondition: L[:i] is sorted from smallest to largest.
>>> L = [7, 3, 5, 2]
>>> _insert(L, 1)
>>> L
[3, 7, 5, 2]
"""
# The value to be inserted into the sorted part of the list
value = lst[i]
# Find the index, j, where the value belongs.
# Make room for the value by shifting
while i > 0 and lst[i - 1] > value:
# Shift L[j - 1] one position to the right
lst[i] = lst[i - 1]
i -= 1
# Put the value where it belongs
lst[i] = value
return None
# Alternative version
# j = i
# while j != 0 and lst[j - 1] > value:
# Shift L[j - 1] one position to the right
# lst[j] = lst[j - 1]
# j -= 1
# Put the value where it belongs
# lst[j] = value
# return None
def find_value_indexes(item_list: list, index_list: List[int],
v: object) -> List[int]:
"""v may appear multiple times in item_list. index_list contains zero or
more indexes. Return a list of the indexes from index_list at which v
appears in item_list.
Precondition: the values in index_list are valid indexes in item_list.
>>> find_value_indexes([6, 8, 8, 5, 8], [0, 2, 4], 8)
[2, 4]
"""
indices = []
for i in range(len(item_list)):
if (item_list[i] == v) and (i in index_list):
indices.append(i)
return indices
def bubble_up(lst: list, start: int, end: int) -> None:
"""Bubble up through L[start:end], swapping items that are out of order.
>>> L = [4, 3, 2, 1, 0]
>>> bubble_up(L, 0, 3)
>>> L
[3, 2, 1, 4, 0]
>>> L = [4, 3, 2, 1, 0]
>>> bubble_up(L, 2, 4)
>>> L
[4, 3, 1, 0, 2]
"""
for i in range(start, end):
if lst[i] > lst[i + 1]:
lst[i], lst[i + 1] = lst[i + 1], lst[i]
return None
def bubble_down(lst: list, start: int, end: int) -> None:
"""Bubble down through <lst> from indexes <end> through <start>, swapping
items that are out of place.
>>> L = [4, 3, 2, 1, 0]
>>> bubble_down(L, 1, 3)
>>> L
[4, 1, 3, 2, 0]
"""
i = end
while i > start:
if lst[i] < lst[i - 1]:
lst[i], lst[i - 1] = lst[i - 1], lst[i]
i -= 1
return None
def insert(lst: List[int], v: int) -> None:
"""Insert v into lst just before the rightmost item greater than v, or at
index 0 if no items are greater than v.
>>> my_list = [3, 10, 4, 2]
>>> insert(my_list, 5)
>>> my_list
[3, 5, 10, 4, 2]
>>> my_list = [5, 4, 2, 10]
>>> insert(my_list, 20)
>>> my_list
[20, 5, 4, 2, 10]
"""
if len(lst) == 0:
lst.append(v)
return None
i = len(lst) - 1
while i != 0:
if lst[i] > v:
lst.insert(i, v)
return None
i -= 1
if i == 0:
lst.insert(0, v)
return None
# *******************************************************
# Recursive sorting algorithms
# *******************************************************
def mergesort(lst: list) -> list:
"""Return a sorted list with the same elements as <lst>.
This is a *non-mutating* version of mergesort; it does not mutate the
input list.
"""
if len(lst) < 2:
return lst[:]
else:
# Divide the list into two parts, and sort them recursively.
mid = len(lst) // 2
left_sorted = mergesort(lst[:mid])
right_sorted = mergesort(lst[mid:])
# Merge the two sorted halves. Need a helper here!
return _merge(left_sorted, right_sorted)
def _merge(lst1: list, lst2: list) -> list:
"""Return a sorted list with the elements in <lst1> and <lst2>.
Precondition: <lst1> and <lst2> are sorted.
"""
index1 = 0
index2 = 0
merged = []
while index1 < len(lst1) and index2 < len(lst2):
if lst1[index1] <= lst2[index2]:
merged.append(lst1[index1])
index1 += 1
else:
merged.append(lst2[index2])
index2 += 1
# Now either index1 == len(lst1) or index2 == len(lst2).
assert index1 == len(lst1) or index2 == len(lst2)
# The remaining elements of the other list
# can all be added to the end of <merged>.
# Note that at most ONE of lst1[index1:] and lst2[index2:]
# is non-empty, but to keep the code simple, we include both.
return merged + lst1[index1:] + lst2[index2:]
def quicksort(lst: list) -> list:
"""Return a sorted list with the same elements as <lst>.
This is a *non-mutating* version of quicksort; it does not mutate the
input list.
"""
if len(lst) < 2:
return lst[:]
else:
# Pick pivot to be first element.
# Could make lots of other choices here (e.g., last, random)
pivot = lst[0]
# Partition rest of list into two halves
smaller, bigger = _partition(lst[1:], pivot)
# Recurse on each partition
smaller_sorted = quicksort(smaller)
bigger_sorted = quicksort(bigger)
# Return! Notice the simple combining step
return smaller_sorted + [pivot] + bigger_sorted
def _partition(lst: list, pivot: object) -> Tuple[list, list]:
"""Return a partition of <lst> with the chosen pivot.
Return two lists, where the first contains the items in <lst>
that are <= pivot, and the second is the items in <lst> that are > pivot.
"""
smaller = []
bigger = []
for item in lst:
if item <= pivot:
smaller.append(item)
else:
bigger.append(item)
return smaller, bigger
if __name__ == '__main__':
import doctest
doctest.testmod()
|
import sympy as sp
from sympy.logic.boolalg import Or, And
_op_identity = {And: True, Or: False}
def check_value(order):
""" Checks that the value of order is between 0 and 1 """
if not (0 <= order <= 1):
raise ValueError
def unique(sequence):
""" Returns the sequence without duplicate elements """
return list(set(sequence))
def conjuncts(sentence):
""" Returns a list of all the conjuncts in the given sentence """
return dissociate(And, [sentence])
def disjuncts(sentence):
""" Returns a list of all the disjuncts in the given sentence """
return dissociate(Or, [sentence])
def dissociate(op, args):
""" Given an associative operator, return a flattened list result """
result = []
def collect(subargs):
for arg in subargs:
if arg.func == op:
collect(arg.args)
else:
result.append(arg)
collect(args)
return result
def associate(op, args):
""" Given an associative operator, return an flattened expression
such that nested instances of the same operator is at the top level.
"""
args = dissociate(op, args)
if len(args) == 0:
return _op_identity[op]
elif len(args) == 1:
return args[0]
else:
return op(*args)
def first(iterable, default=None):
""" Returns the first element of an iterable """
return next(iter(iterable), default)
|
def myAbs(n):
if n > 0:
return n
else:
return -n
print(myAbs(10))
# x作为定参,n可缺省默认2
def power(x, n=2):
c = 1;
while n > 0:
n -= 1
c = c * x
return c
print(power(2, 10))
def count(x):
sum = 0
for n in x:
sum += n;
return sum
print('count=', count([1, 2]))
# * 代表可接受不定个参数,相当于java中的 ...
def count(*x):
sum = 0
for n in x:
sum += n
return sum
print('count=', count(1, 2, 3, 2.3))
list = [1, 2, 3]
print('list sum=', count(*list))
# 关键字参数 *kw
def persion(name, age, **kw):
return 'name=%s,age=%d,other=%s' % (name, age, kw)
def persion(name, age, **kw):
var = kw;
if 'city' in kw:
print(kw['city'])
if 'school' in kw:
print(kw['school'])
print('name:', name, 'age:', age, 'other:', kw, '\n')
persion('wuxinxi', 20, city='安徽亳州', school='亳州三中')
# 限制关键字参数名,只接受city、school,但是和位置无关
# 如果想缺省,给定默认值即可 city='亳州'
def persion(name, age, *, city, school):
return 'name=%s,age=%d,city=%s,school=%s' % (name, age, city, school)
print(persion('wuxinxi', 25, city='亳州', school='亳州第三完全中学'), '\n')
def person(name, age, *args, city, job):
print(name, age, args, city, job)
print(person('wu', 12, 'jh', city='an', job='sz'))
print('\n', '------------------------分界线-------------------------------', '\n')
# 计算1个或多个参数的乘积
def product(x, *s):
productCnt = x
for n in s:
productCnt *= n
return productCnt
print('乘积结果=', product(1, 2, 3, 4, 5, 6, 7))
print('\n', '------------------------分界线-------------------------------', '\n')
def fact(n):
if n == 1:
return n
return n * fact(n - 1)
print('递归算法:', fact(10),'\n')
# 如果fact(1000)就会出现问题了,栈溢出
# 优化,尾递归,函数返回的时候调用自身本身,返回语句无表达式
def fact(n):
return fact_iter(n,1)
def fact_iter(n, p):
if n == 1:
return p
return fact_iter(n - 1, n * p)
print('优化后递归算法:',fact(100),'\n')
print('\n', '------------------------分界线-------------------------------', '\n')
def move(n,a,b,c):
if n==1:
print(a,'--->',c)
else:
move(n-1,a,c,b)
move(1,a,b,c)
move(n-1,b,a,c)
print(move(4,'A','B','C')) |
"""
graph2analysis.py does the graph analysis for graph2 (built by graph2.py).
We compare the diameter of graph2 with the diameter of 20 random graphs.
The random graphs are erdos-renyi graphs where p=edge density of graph2
If a graph is not connected, we calculate the diameter by summing the
diameters of each connected component.
This file can also be used to analyze the control graphs. Just make sure
that you adjust which graph is being loaded.
"""
import praw
import requests
from ratelimit import limits, RateLimitException, sleep_and_retry
from backoff import on_exception, expo
from psaw import PushshiftAPI
import csv
import networkx as nx
import matplotlib.pyplot as plt
import math
G = nx.read_adjlist("graph2c.adjlist")
nx.draw_networkx(G, with_labels=False, node_size=10, node_color="green", alpha=0.5)
plt.savefig("graph2c.png")
plt.show()
#print(G.edges)
print(len(G.nodes))
# make p equal to the ratio of edges in G over max. number of possible edges in G
# in other words, edge density
p = len(G.edges)/((len(G.nodes)*(len(G.nodes)-1))/2)
print("p = "+str(p))
print("Diameters of Erdos-Renyi Graphs")
for seed in range(20):
er = nx.erdos_renyi_graph(len(G.nodes), p, seed = seed)
if nx.is_connected(er):
print(nx.diameter(er))
else:
print("Erdos-Renyi Graph is not connected. Sum of diameter of connected components:")
connected_components = list(nx.connected_components(er))
connected_components_subgraphs = list(map(lambda x: er.subgraph(x), connected_components))
print(sum(list(map(lambda x: nx.diameter(x), connected_components_subgraphs))))
print("--------------------")
print("Diameter of Link Graph")
if nx.is_connected(G):
print(nx.diameter(G))
else:
print("Graph is not connected. Sum of diameter of connected components:")
connected_components = list(nx.connected_components(G))
connected_components_subgraphs = list(map(lambda x: G.subgraph(x), connected_components))
print(sum(list(map(lambda x: nx.diameter(x), connected_components_subgraphs)))) |
# Reading train and building the decision tree
train_df = pd.read_csv('sample_train.csv')
train_df = train_df.drop(columns=['reviews.text'])
print('Building Tree...')
root = build_tree(train_df)
print('Tree was built successfully')
print()
# Evaluating the accuracy by traversing the tree using the train sample
print('Processing Train...')
count = 0
output = []
for i, row in train_df.iterrows():
output.append(root.traverse(row))
if train_df.iloc[i, -1] == output[-1]:
count = count + 1
print('Accuracy = {:0.2f}%' .format((count / len(train_df.index) * 100)))
print('Train processed successfully')
print()
# Predicting the ratings of the test sample
test_df = pd.read_csv('sample_test.csv')
print('Processing Test...')
count = 0
output = []
for i, row in test_df.iterrows():
output.append(root.traverse(row))
label = {'rating': output}
output_df = pd.DataFrame(label)
output_df.to_csv('result.csv', index=False)
print('Predictions saved to (result.csv)')
print()
# Predicting the result of a user entered case
while True:
print('Try new case? (Y/N)')
choice = input()
if choice == 'N' or choice == 'n':
break
elif choice == 'Y' or choice == 'y':
print('Enter new case:')
inputs = input().split()
case = {}
for i in range(0, 25):
col_name = train_df.columns[i]
case[col_name] = int(inputs[i])
case = pd.Series(case)
print('Prediction: ', root.traverse(case))
print()
|
def type_finder(l):
return [str(item) for item in l if (type(item) == int or type(item) == float)]
# new_list = []
# for item in l:
# if type(item) == int:
# new_list.append(str(item))
# return new_list
list_items = [1,2,3,4,(1,2,3,4,),{1:3,3:5,},'rohit','mohit',1.1,2.5]
print(type_finder(list_items))
|
name = input("Enter your name :")
print(f"The reverse of entred name is {name[::-1]}") |
# num1 = input("Enter 1 numbers " )
# num2 = input("Enter 2 numbers " )
# num3 = input("Enter 3 numbers " )
# num1,num2,num3 = int(input("Enter three numbers " )).split()
# average = (num1 + num2 + num3)/3
# print("Average of values are " + str(average))
# print(f"The average of {num1},{num2} and {num3} are {average}")
num1,num2,num3 = input ("Enter three numbers comma seprated : ").split(",")
average = (int(num1) + int(num2)+ int(num3))/3
print(f"the average of entred values are {average}") |
def cube_function(numbers):
new = {}
for i in range(1,numbers+1):
new[i] = i**3
return new
num = 5
print(cube_function(num)) |
menu = {'noodle':'500원', 'ham':'200원', 'egg':'100원', 'spaghetti':'900원'}
# Dictionary로 선언된 메뉴에서 메뉴의 이름(key)만을 추출해 리스트로 생성 및 해당 리스트를 메뉴목록으로 출력하기 위한 String 작업 수행
menuList = list(menu.keys())
menuStr = '('
i = 0
for i in range(len(menuList)):
menuStr += menuList[i] + ' '
i += 1
menuStr += ')'
while 1:
print('안녕하세요 다음의 메뉴 중 원하는 메뉴를 선택하세요.')
inputMenu = str(input(menuStr+' '))
if inputMenu in menu:
# 입력된 메뉴가 존재하는 메뉴인 경우 메뉴 Dictionary로부터 가격을 가져와 출력
print(menu.get(inputMenu))
else:
print('그런 메뉴는 없습니다.') |
import collections
class LRUCache:
def __init__(self, limit=10):
self.limit = limit
self.cache = collections.OrderedDict()
"""
Retrieves the value associated with the given key. Also needs to move the key-value pair to the top of the order such that the pair is considered most-recently used. Returns the value associated with the key or None if the key-value pair doesn't exist in the cache.
"""
def get(self, key):
# attempt to remove the value associated with the key
try:
# there might not be a value associated with the key
# otherwise, store the value in a variable
value = self.cache.pop(key)
# re-add the key-value pair in order to reset the ordering
# this will set this key-value pair as the newest element
self.cache[key] = value
# return the value
return value
except KeyError:
# if there isn't return None
return None
"""
Adds the given key-value pair to the cache. The newly-added pair should be considered the most-recently used entry in the cache. If the cache is already at max capacity before this entry is added, then the oldest entry in the cache needs to be removed to make room. Additionally, in the case that the key already exists in the cache, we simply want to overwrite the old value associated with the key with the newly-specified value.
"""
def set(self, key, value):
# attempt to fetch the key value associated with the key
try:
self.cache.pop(key)
# if it isn't in the cache, we need to go ahead and add it
except KeyError:
# check to see if the cache is at its max capacity
if len(self.cache) >= self.limit:
# if it is, remove the least-recently used key-value pair
self.cache
# add the key-value pair to the cache
|
from logical.player import Player
import random
class Game:
def __init__(self, name, tiles, score, amount):
self.player_name = name
self.player_tiles = tiles
self.player_score = score
self.players_amount = amount
#EL JUEGO ACABA CUANDO EL JUGADOR SE QUEDA SIN TURNOS, TRADUCE A MANO EN EL CODIGO ANTERIOR
def player_turns(self):
return self
def game(self):
for player_number in range(0, self.players_amount):
print ("Jugador", player_number + 1)
for turns in range(0, self.players_amount):
print((turns + 1), ':', )
def check(mesa, fh):
if fh == [6, 6] and len(mesa) == 0:
return True
if len(mesa) != 0:
if mesa[0][0] == fh[0] or mesa[len(mesa) - 1][1] == fh[0] or mesa[0][0] == fh[1] or mesa[len(mesa) - 1][1] == fh[1]:
return True
else:
return False
def ganarcheck(self):
if len(self) == 4:
if len(self[0].player_tiles) == 0 or len(self[1].player_tiles) == 0 or len(self[2].player_tiles) == 0 or len(self[3].player_tiles) == 0:
return False
elif len(self) == 2:
if len(self[0].player_tiles) == 0 or len(self[1].player_tiles) == 0:
return False
return True
def check2(mesa, fh):
m = ""
if len(mesa) != 0:
if ((mesa[0][0] == fh[0]) or (mesa[0][0] == fh[1])) and ((mesa[len(mesa) - 1][1] == fh[0]) or (mesa[len(mesa) - 1][1] == fh[1])):
m = "di"
elif (mesa[len(mesa) - 1][1] == fh[0]) or (mesa[len(mesa) - 1][1] == fh[1]):
m = "d"
elif (mesa[0][0] == fh[0]) or (mesa[0][0] == fh[1]):
m = "i"
return m
def empty(self):
for a in range(0, len(self)):
del self[a].player_tiles[:]
return self
def randomiz(domino):
a = random.choice(domino)
fd = domino.pop(domino.index(a))
return fd, domino
|
'''
Created on Jan 23, 2015
@author: Kwadwo Yeboah
A first principle implementation of a Priority Queue
'''
from math import floor
class PriorityQueue():
def __init__(self):
self.__myHeap = []
def add(self, obj):
self.__myHeap.append(obj)
self.siftUp(self.__len__() - 1)
return self
def peek(self):
return self.__myHeap[0]
def remove(self):
if self.__myHeap.__len__() == 0:
raise Exception("Cannot remove from empty Priority Queue")
last_pop = self.peek()
if self.__myHeap.__len__() == 1:
self.__myHeap = []
else:
self.__myHeap[0] = self.__myHeap.pop()
self.siftDown(0)
return last_pop
def siftDown(self, index):
child_index = index * 2 + 1
if child_index >= self.__myHeap.__len__():
return
if child_index + 1 < self.__myHeap.__len__():
if self.__myHeap[child_index] > self.__myHeap[child_index + 1]:
child_index += 1
if self.__myHeap[child_index] < self.__myHeap[index]:
self.__myHeap[index], self.__myHeap[child_index] = self.__myHeap[child_index], self.__myHeap[index]
self.siftDown(child_index)
def siftUp(self, index):
parent = ((index - 1) // 2)
if(parent >= 0 and self.__myHeap[index] < self.__myHeap[parent]):
self.__myHeap[index], self.__myHeap[parent] = self.__myHeap[parent], self.__myHeap[index]
self.siftUp(parent)
def empty(self):
return self.__len__() == 0
def __len__(self):
return self.__myHeap.__len__()
def __str__(self):
return self.__myHeap.__str__()
|
class Line:
"""docstring for line"""
def __init__(self,date,split_line_list):
super(Line, self).__init__()
self.line = " "
self.date = date
self.time = split_line_list[0]
self.name = split_line_list[1]
self.content = split_line_list[2]
self.split_line_list = split_line_list
self.word_list = []
def splitWordsInList(self,count):
word = ""
if(self.determineIfContentAddChangeLine()):
content = self.content + "\n"
else:
content = self.content
for i in range(len(content) - count + 1):
for x in range(count):
word = word + content[i + x ]
self.word_list.append(word)
word = ""
return self.word_list
def determineIfContentAddChangeLine(self,determine_number = 0):
if(determine_number == 1):
return 1
else:
return 0 |
def SumNum(p, q):
if p == 0:
return q
return SumNum(int(input()), q + p)
p = int(input())
q = 0
print(SumNum(p, q))
|
a, b, c, d, e = int(input()), int(input()), int(input()), \
int(input()), int(input())
if b <= d and c <= e or c <= d and b <= e:
print('YES')
elif a <= d and c <= e or c <= d and a <= e:
print('YES')
elif b <= d and a <= e or a <= d and b <= e:
print('YES')
else:
print('NO')
|
#!/usr/bin/python3
# -*- coding: iso-8859-2 -*
print("Szukamy liczby sposobw, na jakie mona pokry plansz 4x4 identycznymi klockami 2x1.")
# Recursive function to find number of ways to fill a n x 4 matrix
# with 1 x 4 tiles
def totalWays(n):
# base cases
if n < 1:
return 0
if n < 2:
return 1
if n == 2:
return 2
# combine results of placing a tile horizontally and
# placing 4 tiles vertically
return totalWays(n - 1) + totalWays(n - 2)
n = 4
print(totalWays(n))
|
#!/usr/bin/python3
# -*- coding: iso-8859-2 -*
class Node:
"""Klasa reprezentujca wze drzewa binarnego."""
def __init__(self, data=None, left=None, right=None):
self.data = data
self.left = left
self.right = right
def __str__(self):
return str(self.data)
def insert(self, node):
if self.data < node.data: # na prawo
if self.right:
self.right.insert(node)
else:
self.right = node
elif self.data > node.data: # na lewo
if self.left:
self.left.insert(node)
else:
self.left = node
else:
pass # ignoruj duplikaty
def count(self):
counter = 1
if self.left:
counter += self.left.count()
if self.right:
counter += self.right.count()
return counter
class BinaryTree:
def __init__(self):
self.root = None
self.size = 0
def size(self):
return self.size
def isEmpty(self):
return self.root is None
def insert(self, value):
if self.root:
self.root.insert(Node(value))
else:
self.root = Node(value)
def count_total(self, node):
suma = node.data
if node.left != None:
suma += self.count_total(node.left)
if node.right != None:
suma += self.count_total(node.right)
return suma
def count_leafes(self, node):
count = 0
if node.left == None and node.right == None:
count += 1
if node.left != None:
count += self.count_leafes(node.left)
if node.right != None:
count += self.count_leafes(node.right)
return count
tree = BinaryTree()
tree.insert(1)
tree.insert(2)
tree.insert(3)
tree.insert(4)
tree.insert(5)
tree.insert(6)
tree.insert(7)
print(tree.count_total(tree.root)) # 28
print(tree.count_leafes(tree.root)) # 1
tree2 = BinaryTree()
tree2.insert(18)
tree2.insert(20)
tree2.insert(13)
tree2.insert(21)
print(tree2.count_total(tree2.root)) # 72
print(tree2.count_leafes(tree2.root)) # 2
tree3 = BinaryTree()
tree3.insert(30)
tree3.insert(15)
tree3.insert(40)
tree3.insert(7)
tree3.insert(20)
tree3.insert(45)
print(tree3.count_total(tree3.root)) # 157
print(tree3.count_leafes(tree3.root)) # 3
# https://www.cs.usfca.edu/~galles/visualization/BST.html
|
#!/usr/bin/python3
# -*- coding: iso-8859-2 -*
def binarne_rek(L, left, right, y):
"""Wyszukiwanie binarne w wersji rekurencyjnej."""
while left < right:
dive = int((left + right) / 2)
if y > L[dive]:
return binarne_rek(L, dive + 1, right, y)
elif y < L[dive]:
return binarne_rek(L, left, dive - 1, y)
else:
return dive
return -1 # brak takiej liczby w drzewie
print("12.2")
# test
liczby = [53, 107, 129, 174, 237, 238, 297, 338, 341, 353, 386, 387, 387, 460, 515, 566,580, 630, 653, 656, 659, 703,
713, 714, 760, 768, 832, 839, 861, 883, 969, 970]
findPosistion = binarne_rek(liczby, 0, len(liczby), 53) # 0
print(liczby[findPosistion], "jest na pozycji ", findPosistion)
print("********")
findPosistion = binarne_rek(liczby, 0, len(liczby), 970) # 31
print(liczby[findPosistion], "jest na pozycji ", findPosistion)
print("********")
findPosistion = binarne_rek(liczby, 0, len(liczby), 387) # 11
print(liczby[findPosistion], "jest na pozycji ", findPosistion)
print("********")
findPosistion = binarne_rek(liczby, 0, len(liczby), -12) # -1
print(-12, "jest na pozycji ", findPosistion)
print("********")
findPosistion = binarne_rek(liczby, 0, len(liczby), 3870) # -1
print(3870, "jest na pozycji ", findPosistion)
|
#!/usr/bin/python3
"""
A function that queries the Reddit API and returns the number of subscribers
"""
from requests import get
def number_of_subscribers(subreddit):
""" Return the number of suscribers """
headers = {'user-agent': 'my-app/0.0.1'}
url = 'https://www.reddit.com/r/{}/about.json'.format(subreddit)
response = get(url, headers=headers, allow_redirects=False)
if response.status_code != 200:
return 0
response = response.json()
if response:
data = response.get('data')
if data:
return data.get('subscribers')
return 0
|
from collections import Counter as c
import re
alphabet = 'абвгдежзийклмнопрстуфхцчшщъыьэюя'
file1 = open("var3.txt", "r", encoding='utf-8')
var3 = file1.read()
var3 = var3.lower()
var3 = re.sub("[^А-аЯ-я]", "", var3)
#punctuations = [".", ",", "!", "?", ";", ":", "-", "1", "2", "3", "4","5", "6", "7","8","9", "0","—","»","«", " ", "’","/n"]
#for k in range(len(punctuations)):
#var3 = var3.replace(punctuations[k], "")
def encode_function(text, key):
key_length = len(key)
encoded_text = []
for i in range(len(text)):
y = (alphabet.index(text[i]) + alphabet.index(key[i % key_length])) % 32
if y == 6:
y = 'е'
else:
y = chr(y + 1072)
encoded_text.append(y)
return ''.join(encoded_text)
def conformity_index(text):
n = len(text)
index = 0
text = c(text)
for i in text:
index += text[i]*(text[i]-1)
index = index/(n*(n-1))
return index
def nearest(lst, target):
return min(lst, key = lambda x: abs(x-target))
def compare_indexes(text):
i_list = []
our_i = conformity_index(var3)
i1 = conformity_index(encode_function(text, key1))
i2 = conformity_index(encode_function(text, key2))
i3 = conformity_index(encode_function(text, key3))
i4 = conformity_index(encode_function(text, key4))
i14 = conformity_index(encode_function(text, key5))
print(i1,i2,i3,i4,i14, our_i)
def find_key(text):
y = []
for word_length in range(2,32):
index_sum = 0
for i in range(word_length):
a = []
for j in range(i, len(text), word_length):
a.append(text[j])
print(conformity_index(''.join(a)))
index_sum += conformity_index(''.join(a))
print(a)
y.append(index_sum/word_length)
print()
nearest_val = nearest(y, 0.055)
print(y.index(nearest_val)+2)
print(y)
find_key(var3)
def key_value(text, key_length,most_popular):
a = []
k = []
z = ''
for i in range(0,key_length):
z = ''
for j in range(i, len(text), key_length):
z +=text[j]
a.append(z)
print('asdas')
print(a)
for l in range(key_length):
#print(list(dict(c(a[l]).most_common(1)).keys())[0])
most_popular_fragment = list(dict(c(a[l]).most_common(1)).keys())[0]
x = (alphabet.index(most_popular_fragment) - alphabet.index(most_popular) ) % 32
x = chr(x + 1072)
k.append(x)
print(k)
return k
keyf = ''.join(key_value(var3, 14, 'о'))
def decrypt_function(text, key):
decoded_text = []
for i in range(len(text)):
y = (alphabet.index(text[i]) - alphabet.index(key[i % 14]) + 32) % 32
y = chr(y+1072)
decoded_text.append(y)
return ''.join(decoded_text)
print(decrypt_function(var3,keyf))
file = open("C:/Users/user/Desktop/text.txt", "r", encoding='utf-8')
text = file.read()
text = re.sub("[^А-аЯ-я]", "", text)
text = text.lower()
key1 = 'во'
key2 = 'так'
key3 = 'рыба'
key4 = 'робот'
key5 = 'деконтаминация'
#print(encode_function(text, key1))
open('text_2.txt', 'w').write(encode_function(text, key1))
open('text_3.txt', 'w').write(encode_function(text, key2))
open('text_4.txt', 'w').write(encode_function(text, key3))
open('text_5.txt', 'w').write(encode_function(text, key4))
open('text_14.txt', 'w').write(encode_function(text, key5))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.