path
stringlengths
13
17
screenshot_names
listlengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
122260046/cell_19
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow.keras.utils import to_categorical import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') (train.shape, test.shape) X = train.drop('label', axis=1).values / 255.0 X = X.reshape(-1, 28, 28, 1) X.shape Y = train['label'].values Y.shape from tensorflow.keras.utils import to_categorical Y = to_categorical(Y, num_classes=10) Y.shape from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2) print(f'X_train: {X_train.shape}, y_train: {y_train.shape}') print('-' * 50) print(f'X_test: {X_test.shape}, y_test: {y_test.shape}')
code
122260046/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
122260046/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') test.info()
code
122260046/cell_18
[ "text_plain_output_1.png" ]
from tensorflow.keras.utils import to_categorical import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') (train.shape, test.shape) X = train.drop('label', axis=1).values / 255.0 X = X.reshape(-1, 28, 28, 1) X.shape Y = train['label'].values Y.shape from tensorflow.keras.utils import to_categorical Y = to_categorical(Y, num_classes=10) Y.shape
code
122260046/cell_28
[ "image_output_1.png" ]
from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, Activation, BatchNormalization from keras.models import Sequential from sklearn.model_selection import train_test_split from tensorflow.keras.utils import to_categorical import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') (train.shape, test.shape) def select_pic(i): arr = np.array(test[i:i + 1]) pix = arr.reshape(arr.shape[0], 28, 28) img = pix[0] for i in range(0, 1): r = np.random.randint(i, 10000) select_pic(r) X = train.drop('label', axis=1).values / 255.0 X = X.reshape(-1, 28, 28, 1) X.shape Y = train['label'].values Y.shape A = test.values / 255.0 A = A.reshape(-1, 28, 28, 1) A.shape from tensorflow.keras.utils import to_categorical Y = to_categorical(Y, num_classes=10) Y.shape from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2) from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, Activation, BatchNormalization model = Sequential() model.add(Conv2D(32, (5, 5), input_shape=(28, 28, 1), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=5, batch_size=200) predict = [np.argmax(i) for i in model.predict(A)] len(predict)
code
122260046/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') (train.shape, test.shape) X = train.drop('label', axis=1).values / 255.0 X = X.reshape(-1, 28, 28, 1) X.shape Y = train['label'].values Y.shape
code
122260046/cell_16
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') (train.shape, test.shape) A = test.values / 255.0 A = A.reshape(-1, 28, 28, 1) A.shape
code
122260046/cell_31
[ "text_html_output_1.png" ]
from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, Activation, BatchNormalization from keras.models import Sequential from sklearn.model_selection import train_test_split from tensorflow.keras.utils import to_categorical import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') (train.shape, test.shape) def select_pic(i): arr = np.array(test[i:i + 1]) pix = arr.reshape(arr.shape[0], 28, 28) img = pix[0] for i in range(0, 1): r = np.random.randint(i, 10000) select_pic(r) X = train.drop('label', axis=1).values / 255.0 X = X.reshape(-1, 28, 28, 1) X.shape Y = train['label'].values Y.shape A = test.values / 255.0 A = A.reshape(-1, 28, 28, 1) A.shape from tensorflow.keras.utils import to_categorical Y = to_categorical(Y, num_classes=10) Y.shape from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2) from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, Activation, BatchNormalization model = Sequential() model.add(Conv2D(32, (5, 5), input_shape=(28, 28, 1), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=5, batch_size=200) pd.DataFrame(history.history) predict = [np.argmax(i) for i in model.predict(A)] len(predict) ids = [i + 1 for i in test.index] len(ids) submission = pd.DataFrame(columns=['ImageId', 'Label']) submission['ImageId'] = ids submission['Label'] = predict submission
code
122260046/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') (train.shape, test.shape) X = train.drop('label', axis=1).values / 255.0 X = X.reshape(-1, 28, 28, 1) X.shape
code
122260046/cell_22
[ "text_plain_output_1.png" ]
from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, Activation, BatchNormalization from keras.models import Sequential from tensorflow.keras.utils import plot_model from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, Activation, BatchNormalization model = Sequential() model.add(Conv2D(32, (5, 5), input_shape=(28, 28, 1), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) from tensorflow.keras.utils import plot_model plot_model(model, show_shapes=True, show_layer_names=False, dpi=60, show_layer_activations=True, rankdir='TB')
code
74070988/cell_11
[ "text_html_output_1.png" ]
from finta import TA from plotly.subplots import make_subplots import glob import numpy as np import os import pandas as pd import plotly.graph_objects as go import warnings import os import glob import gc import yaml import math import warnings from tqdm import tqdm from functools import reduce import pandas as pd from finta import TA from numba import jit import numpy as np from plotly.subplots import make_subplots import plotly.graph_objects as go from joblib import Parallel, delayed warnings.simplefilter('ignore') pd.set_option('max_column', 300) DATA_PATH = '../input/optiver-realized-volatility-prediction' __DATA_DIRS__ = ['book_train.parquet', 'trade_tarin.parquet', 'book_test.parquet', 'trade_test.parquet'] FILE_LIST_MAP = {'book_train': glob.glob(os.path.join(DATA_PATH, 'book_train.parquet/*')), 'trade_train': glob.glob(os.path.join(DATA_PATH, 'trade_train.parquet/*')), 'book_test': glob.glob(os.path.join(DATA_PATH, 'book_test.parquet/*')), 'trade_test': glob.glob(os.path.join(DATA_PATH, 'trade_test.parquet/*'))} ORDER_PRICE = ['bid_price1', 'bid_price2', 'ask_price1', 'ask_price2'] ORDER_VOLUME = ['bid_size1', 'bid_size2', 'ask_size1', 'ask_size2'] TI_NAMES = [func for func in dir(TA) if callable(getattr(TA, func)) and (not func.startswith('_'))] def get_wap(df_book): """Compute estimated price series. Parameters: df_book: pd.DataFrame, raw information of book data Return: wap: pd.DataFrame, estimated price series with time identifiers """ df_book_ = df_book.copy() df_book_['wap1'] = (df_book_['bid_price1'] * df_book_['ask_size1'] + df_book_['ask_price1'] * df_book_['bid_size1']) / (df_book_['bid_size1'] + df_book_['ask_size1']) wap = df_book_.loc[:, ['time_id', 'seconds_in_bucket', 'wap1']] return wap def get_ohlcv(prices, volumes, scale=10): """Return OHLCV of stock price based on the Kline scale (sec). Parameters: prices: pd.Series, the estimated price series volumes: pd.Series, the trading volume series scale: int, the scale of the Kline (sec), default=10 Return: OHLCV: pd.DataFrame, four prices and trading volumes of the stock based on the Kline scale """ if 600 % scale != 0: raise ValueError('Choose scale divisible by 600 seconds...') OHLCV = pd.DataFrame(columns=['open', 'high', 'low', 'close', 'volume']) for i in range(0, 600, scale): p_window = prices[i:i + scale] v_window = volumes[i:i + scale] p_stick = {'open': p_window.iloc[0], 'high': np.max(p_window), 'low': np.min(p_window), 'close': p_window.iloc[-1], 'volume': np.sum(v_window)} OHLCV = OHLCV.append(p_stick, ignore_index=True) OHLCV.insert(0, column='sec', value=[scale * (i + 1) for i in range(0, 600 // scale)]) return OHLCV def ffill(df): """Forward fill information in order book data, followed by bfill to avoid bug in filler data. """ df_ = df.copy() df_.set_index(['time_id', 'seconds_in_bucket'], inplace=True) df_ = df_.reindex(pd.MultiIndex.from_product([df_.index.levels[0], np.arange(0, 600)], names=['time_id', 'seconds_in_bucket']), method='ffill') df_ = df_.reindex(pd.MultiIndex.from_product([df_.index.levels[0], np.arange(0, 600)], names=['time_id', 'seconds_in_bucket']), method='bfill') df_.reset_index(inplace=True) return df_ df_order = pd.read_parquet(os.path.join(DATA_PATH, 'book_train.parquet/stock_id=0/')) df_trade = pd.read_parquet(os.path.join(DATA_PATH, 'trade_train.parquet/stock_id=0/')) df_order = df_order[df_order['time_id'] == 5] df_order = ffill(df_order) df_trade = df_trade[df_trade['time_id'] == 5] wap = get_wap(df_order) df = wap.merge(df_trade.loc[:, ['seconds_in_bucket', 'size']], on=['seconds_in_bucket'], how='outer') df.fillna(0, inplace=True) ohlcv = get_ohlcv(df['wap1'], df['size'], scale=10) fig = make_subplots(rows=2, cols=1, shared_xaxes=True) fig.add_trace(go.Candlestick(x=ohlcv['sec'], open=ohlcv['open'], high=ohlcv['high'], low=ohlcv['low'], close=ohlcv['close']), row=1, col=1) fig.add_trace(go.Bar(x=ohlcv['sec'], y=ohlcv['volume']), row=2, col=1) fig.update(layout_xaxis_rangeslider_visible=False) fig.show()
code
74070988/cell_15
[ "text_html_output_2.png" ]
from finta import TA from joblib import Parallel, delayed from plotly.subplots import make_subplots from tqdm import tqdm import gc import glob import numpy as np import os import pandas as pd import plotly.graph_objects as go import warnings import os import glob import gc import yaml import math import warnings from tqdm import tqdm from functools import reduce import pandas as pd from finta import TA from numba import jit import numpy as np from plotly.subplots import make_subplots import plotly.graph_objects as go from joblib import Parallel, delayed warnings.simplefilter('ignore') pd.set_option('max_column', 300) DATA_PATH = '../input/optiver-realized-volatility-prediction' __DATA_DIRS__ = ['book_train.parquet', 'trade_tarin.parquet', 'book_test.parquet', 'trade_test.parquet'] FILE_LIST_MAP = {'book_train': glob.glob(os.path.join(DATA_PATH, 'book_train.parquet/*')), 'trade_train': glob.glob(os.path.join(DATA_PATH, 'trade_train.parquet/*')), 'book_test': glob.glob(os.path.join(DATA_PATH, 'book_test.parquet/*')), 'trade_test': glob.glob(os.path.join(DATA_PATH, 'trade_test.parquet/*'))} ORDER_PRICE = ['bid_price1', 'bid_price2', 'ask_price1', 'ask_price2'] ORDER_VOLUME = ['bid_size1', 'bid_size2', 'ask_size1', 'ask_size2'] TI_NAMES = [func for func in dir(TA) if callable(getattr(TA, func)) and (not func.startswith('_'))] def get_wap(df_book): """Compute estimated price series. Parameters: df_book: pd.DataFrame, raw information of book data Return: wap: pd.DataFrame, estimated price series with time identifiers """ df_book_ = df_book.copy() df_book_['wap1'] = (df_book_['bid_price1'] * df_book_['ask_size1'] + df_book_['ask_price1'] * df_book_['bid_size1']) / (df_book_['bid_size1'] + df_book_['ask_size1']) wap = df_book_.loc[:, ['time_id', 'seconds_in_bucket', 'wap1']] return wap def get_ohlcv(prices, volumes, scale=10): """Return OHLCV of stock price based on the Kline scale (sec). Parameters: prices: pd.Series, the estimated price series volumes: pd.Series, the trading volume series scale: int, the scale of the Kline (sec), default=10 Return: OHLCV: pd.DataFrame, four prices and trading volumes of the stock based on the Kline scale """ if 600 % scale != 0: raise ValueError('Choose scale divisible by 600 seconds...') OHLCV = pd.DataFrame(columns=['open', 'high', 'low', 'close', 'volume']) for i in range(0, 600, scale): p_window = prices[i:i + scale] v_window = volumes[i:i + scale] p_stick = {'open': p_window.iloc[0], 'high': np.max(p_window), 'low': np.min(p_window), 'close': p_window.iloc[-1], 'volume': np.sum(v_window)} OHLCV = OHLCV.append(p_stick, ignore_index=True) OHLCV.insert(0, column='sec', value=[scale * (i + 1) for i in range(0, 600 // scale)]) return OHLCV def ffill(df): """Forward fill information in order book data, followed by bfill to avoid bug in filler data. """ df_ = df.copy() df_.set_index(['time_id', 'seconds_in_bucket'], inplace=True) df_ = df_.reindex(pd.MultiIndex.from_product([df_.index.levels[0], np.arange(0, 600)], names=['time_id', 'seconds_in_bucket']), method='ffill') df_ = df_.reindex(pd.MultiIndex.from_product([df_.index.levels[0], np.arange(0, 600)], names=['time_id', 'seconds_in_bucket']), method='bfill') df_.reset_index(inplace=True) return df_ df_order = pd.read_parquet(os.path.join(DATA_PATH, 'book_train.parquet/stock_id=0/')) df_trade = pd.read_parquet(os.path.join(DATA_PATH, 'trade_train.parquet/stock_id=0/')) df_order = df_order[df_order['time_id'] == 5] df_order = ffill(df_order) df_trade = df_trade[df_trade['time_id'] == 5] wap = get_wap(df_order) df = wap.merge(df_trade.loc[:, ['seconds_in_bucket', 'size']], on=['seconds_in_bucket'], how='outer') df.fillna(0, inplace=True) ohlcv = get_ohlcv(df['wap1'], df['size'], scale=10) fig = make_subplots(rows=2, cols=1, shared_xaxes=True) fig.add_trace(go.Candlestick( x=ohlcv['sec'], open=ohlcv['open'], high=ohlcv['high'], low=ohlcv['low'], close=ohlcv['close']), row=1, col=1 ) fig.add_trace(go.Bar( x=ohlcv['sec'], y=ohlcv['volume']), row=2, col=1 ) fig.update(layout_xaxis_rangeslider_visible=False) fig.show() def get_dataset(datatype, n_jobs, scale): """Return processed dataset after running parallel dataset generation. Parameters: datatype: str, dataset type, the choices are as follows: {'train', 'test'} n_jobs: int, num of processors to work scale: int, the scale of the Kline (sec) Return: df_proc: pd.DataFrame, dataset containing derived technical indicators """ df_proc = Parallel(n_jobs=n_jobs)((delayed(gen_dataset)(book_file, trade_file, scale) for book_file, trade_file in tqdm(zip(sorted(FILE_LIST_MAP[f'book_{datatype}']), sorted(FILE_LIST_MAP[f'trade_{datatype}']))))) df_proc = pd.concat(df_proc, ignore_index=True) return df_proc def gen_dataset(book_file, trade_file, scale): """Generate dataset for one stock. Parameters: book_file: str, file path of the book data trade_file: str, file path of the trade data scale: int, the scale of the Kline (sec) """ assert book_file.split('=')[1] == trade_file.split('=')[1] stock_id = book_file.split('=')[1] df_book = pd.read_parquet(book_file) df_book = ffill(df_book) df_trade = pd.read_parquet(trade_file) df = get_wap(df_book) df = df.merge(df_trade.loc[:, ['time_id', 'seconds_in_bucket', 'size']], on=['time_id', 'seconds_in_bucket'], how='outer') df.fillna(0, inplace=True) del df_book, df_trade _ = gc.collect() tis = get_tis(df, scale) stats = {col: ['mean', 'median', 'min', 'max', 'std'] for col in tis.columns if col != 'time_id'} df_stats = cal_stats(tis, stats) df_stats['row_id'] = df_stats['time_id'].apply(lambda t: f'{stock_id}-{t}') df_stats['stock_id'] = int(stock_id) return df_stats def get_tis(df, scale): """Compute all technical indicators provided by finta. Parameters: df: pd.DataFrame, book data merged with trade data scale: int, the scale of the Kline (sec) """ tis = pd.DataFrame() for time_id, gp in df.groupby('time_id'): ohlcv_ = get_ohlcv(gp['wap1'], gp['size'], scale=scale) ohlcv_.set_index(pd.DatetimeIndex(ohlcv_['sec']), inplace=True) ohlcv_.drop('sec', axis=1, inplace=True) for ti_name, ti in TIS.items(): result = ti(ohlcv_) if type(result) == pd.core.series.Series: result.name = ti_name else: result.columns = [f'{ti_name}_{col}' for col in result.columns] ohlcv_ = ohlcv_.merge(result, right_index=True, left_index=True) ohlcv_['time_id'] = int(time_id) tis = pd.concat([tis, ohlcv_], ignore_index=True) return tis def cal_stats(df, ft_stats): """Calculate specified stats for given dataframe. Parameters: df: pd.DataFrame, dataframe containing raw features ft_stats: dict[str, list], mapping relationship between features and the stats (e.g., mean, median, min, max, std) Return: df_stats: pd.DataFrame, dataframe containing derived stats """ df_ = df.groupby(by=['time_id']) df_stats = df_.agg(ft_stats) df_stats.columns = ['_'.join(sub_str) for sub_str in df_stats.columns] df_stats.reset_index(inplace=True) return df_stats TIS = {'ADL': getattr(TA, 'ADL')} TIS
code
74070988/cell_10
[ "text_plain_output_1.png" ]
from finta import TA import glob import numpy as np import os import pandas as pd import warnings import os import glob import gc import yaml import math import warnings from tqdm import tqdm from functools import reduce import pandas as pd from finta import TA from numba import jit import numpy as np from plotly.subplots import make_subplots import plotly.graph_objects as go from joblib import Parallel, delayed warnings.simplefilter('ignore') pd.set_option('max_column', 300) DATA_PATH = '../input/optiver-realized-volatility-prediction' __DATA_DIRS__ = ['book_train.parquet', 'trade_tarin.parquet', 'book_test.parquet', 'trade_test.parquet'] FILE_LIST_MAP = {'book_train': glob.glob(os.path.join(DATA_PATH, 'book_train.parquet/*')), 'trade_train': glob.glob(os.path.join(DATA_PATH, 'trade_train.parquet/*')), 'book_test': glob.glob(os.path.join(DATA_PATH, 'book_test.parquet/*')), 'trade_test': glob.glob(os.path.join(DATA_PATH, 'trade_test.parquet/*'))} ORDER_PRICE = ['bid_price1', 'bid_price2', 'ask_price1', 'ask_price2'] ORDER_VOLUME = ['bid_size1', 'bid_size2', 'ask_size1', 'ask_size2'] TI_NAMES = [func for func in dir(TA) if callable(getattr(TA, func)) and (not func.startswith('_'))] def get_wap(df_book): """Compute estimated price series. Parameters: df_book: pd.DataFrame, raw information of book data Return: wap: pd.DataFrame, estimated price series with time identifiers """ df_book_ = df_book.copy() df_book_['wap1'] = (df_book_['bid_price1'] * df_book_['ask_size1'] + df_book_['ask_price1'] * df_book_['bid_size1']) / (df_book_['bid_size1'] + df_book_['ask_size1']) wap = df_book_.loc[:, ['time_id', 'seconds_in_bucket', 'wap1']] return wap def get_ohlcv(prices, volumes, scale=10): """Return OHLCV of stock price based on the Kline scale (sec). Parameters: prices: pd.Series, the estimated price series volumes: pd.Series, the trading volume series scale: int, the scale of the Kline (sec), default=10 Return: OHLCV: pd.DataFrame, four prices and trading volumes of the stock based on the Kline scale """ if 600 % scale != 0: raise ValueError('Choose scale divisible by 600 seconds...') OHLCV = pd.DataFrame(columns=['open', 'high', 'low', 'close', 'volume']) for i in range(0, 600, scale): p_window = prices[i:i + scale] v_window = volumes[i:i + scale] p_stick = {'open': p_window.iloc[0], 'high': np.max(p_window), 'low': np.min(p_window), 'close': p_window.iloc[-1], 'volume': np.sum(v_window)} OHLCV = OHLCV.append(p_stick, ignore_index=True) OHLCV.insert(0, column='sec', value=[scale * (i + 1) for i in range(0, 600 // scale)]) return OHLCV def ffill(df): """Forward fill information in order book data, followed by bfill to avoid bug in filler data. """ df_ = df.copy() df_.set_index(['time_id', 'seconds_in_bucket'], inplace=True) df_ = df_.reindex(pd.MultiIndex.from_product([df_.index.levels[0], np.arange(0, 600)], names=['time_id', 'seconds_in_bucket']), method='ffill') df_ = df_.reindex(pd.MultiIndex.from_product([df_.index.levels[0], np.arange(0, 600)], names=['time_id', 'seconds_in_bucket']), method='bfill') df_.reset_index(inplace=True) return df_ df_order = pd.read_parquet(os.path.join(DATA_PATH, 'book_train.parquet/stock_id=0/')) df_trade = pd.read_parquet(os.path.join(DATA_PATH, 'trade_train.parquet/stock_id=0/')) df_order = df_order[df_order['time_id'] == 5] df_order = ffill(df_order) df_trade = df_trade[df_trade['time_id'] == 5] df_order.head()
code
74070988/cell_5
[ "text_plain_output_1.png" ]
!pip install finta --no-index --find-links=file:///kaggle/input/fin-ta/finta
code
88086228/cell_21
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.shape train.nunique() train.nunique() sns.countplot(x='Survived', hue='Sex', data=train)
code
88086228/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') test.shape
code
88086228/cell_9
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.shape train.head(2)
code
88086228/cell_25
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.shape train.nunique() train.nunique() sns.boxplot(x='Survived', y='Fare', data=train)
code
88086228/cell_57
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.shape train.nunique() test.shape train.nunique() train.drop('Cabin', axis=1, inplace=True) test.drop('Cabin', axis=1, inplace=True) def train_age(cols): Age = cols[0] Pclass = cols[1] if pd.isnull(Age): if Pclass == 1: return 37 if Pclass == 2: return 29 else: return 24 else: return Age def test_age(cols): Age = cols[0] Pclass = cols[1] if pd.isnull(Age): if Pclass == 1: return 42 if Pclass == 2: return 26.5 else: return 24 else: return Age train = train[train['Fare'] < 300].reset_index(drop=True) sex_train = pd.get_dummies(train['Sex'], drop_first=True) sex_test = pd.get_dummies(test['Sex'], drop_first=True) embark_train = pd.get_dummies(train['Embarked'], drop_first=True) embark_test = pd.get_dummies(test['Embarked'], drop_first=True) train = pd.concat([train, sex_train, embark_train], axis=1) test = pd.concat([test, sex_test, embark_test], axis=1) test.head(2)
code
88086228/cell_56
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.shape train.nunique() test.shape train.nunique() train.drop('Cabin', axis=1, inplace=True) test.drop('Cabin', axis=1, inplace=True) def train_age(cols): Age = cols[0] Pclass = cols[1] if pd.isnull(Age): if Pclass == 1: return 37 if Pclass == 2: return 29 else: return 24 else: return Age def test_age(cols): Age = cols[0] Pclass = cols[1] if pd.isnull(Age): if Pclass == 1: return 42 if Pclass == 2: return 26.5 else: return 24 else: return Age train = train[train['Fare'] < 300].reset_index(drop=True) sex_train = pd.get_dummies(train['Sex'], drop_first=True) sex_test = pd.get_dummies(test['Sex'], drop_first=True) embark_train = pd.get_dummies(train['Embarked'], drop_first=True) embark_test = pd.get_dummies(test['Embarked'], drop_first=True) train = pd.concat([train, sex_train, embark_train], axis=1) test = pd.concat([test, sex_test, embark_test], axis=1) train.head(2)
code
88086228/cell_34
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.shape train.nunique() test.shape train.nunique() train.drop('Cabin', axis=1, inplace=True) test.drop('Cabin', axis=1, inplace=True) sns.boxplot(x='Pclass', y='Age', data=train) print('Median age of each class') print('Pclass 1:', train['Age'][train['Pclass'] == 1].median()) print('Pclass 2:', train['Age'][train['Pclass'] == 2].median()) print('Pclass 3:', train['Age'][train['Pclass'] == 3].median())
code
88086228/cell_23
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.shape train.nunique() train.nunique() sns.countplot(x='Survived', hue='SibSp', data=train) plt.legend(loc='upper right', title='SibSp')
code
88086228/cell_30
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.shape train.nunique() test.shape train.nunique() print('TRAIN DATASET') print(train.isnull().sum() / len(train) * 100) print('=' * 40) print('TEST DATASET') print(test.isnull().sum() / len(test) * 100)
code
88086228/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.shape train.nunique() train.nunique() sns.countplot(x='Survived', data=train)
code
88086228/cell_6
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.shape
code
88086228/cell_40
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.shape train.nunique() test.shape train.nunique() train.drop('Cabin', axis=1, inplace=True) test.drop('Cabin', axis=1, inplace=True) print(train['Embarked'].value_counts())
code
88086228/cell_26
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.shape train.nunique() train.nunique() sns.boxplot(x='Survived', y='Age', data=train)
code
88086228/cell_50
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.shape train.nunique() test.shape train.nunique() train.drop('Cabin', axis=1, inplace=True) test.drop('Cabin', axis=1, inplace=True) train[train['Fare'] > 300]
code
88086228/cell_7
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.shape train.info()
code
88086228/cell_49
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.shape train.nunique() test.shape train.nunique() train.drop('Cabin', axis=1, inplace=True) test.drop('Cabin', axis=1, inplace=True) sns.displot(x='Fare', data=train)
code
88086228/cell_8
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.shape train.describe().transpose()
code
88086228/cell_15
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') test.shape test.describe().transpose()
code
88086228/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') test.shape test.head(2)
code
88086228/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.shape train.nunique() train.nunique()
code
88086228/cell_35
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.shape train.nunique() test.shape train.nunique() train.drop('Cabin', axis=1, inplace=True) test.drop('Cabin', axis=1, inplace=True) sns.boxplot(x='Pclass', y='Age', data=test) print('Median age of each class') print('Pclass 1:', test['Age'][test['Pclass'] == 1].median()) print('Pclass 2:', test['Age'][test['Pclass'] == 2].median()) print('Pclass 3:', test['Age'][test['Pclass'] == 3].median())
code
88086228/cell_43
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.shape train.nunique() test.shape train.nunique() train.drop('Cabin', axis=1, inplace=True) test.drop('Cabin', axis=1, inplace=True) test['Fare'].median()
code
88086228/cell_46
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.shape train.nunique() test.shape train.nunique() train.drop('Cabin', axis=1, inplace=True) test.drop('Cabin', axis=1, inplace=True) print('TRAIN DATASET') print(train.isnull().sum() / len(train) * 100) print('=' * 40) print('TEST DATASET') print(test.isnull().sum() / len(test) * 100)
code
88086228/cell_24
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.shape train.nunique() train.nunique() sns.countplot(x='Survived', hue='Parch', data=train)
code
88086228/cell_14
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') test.shape test.info()
code
88086228/cell_22
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.shape train.nunique() train.nunique() sns.countplot(x='Survived', hue='Pclass', data=train)
code
88086228/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.shape train.nunique()
code
1006755/cell_42
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cameras.csv') df.shape df.columns.values df.dtypes df.replace('NaN', -1, inplace=True) (df['LATITUDE'] == -1).count() df.dtypes df.shape df.columns df.drop('LOCATION', axis=1, inplace=True) def find_one(substrs, superstr): for substr in substrs: if superstr.find(substr) != -1: return substr return '' address_values = df['ADDRESS'].values street_values = [] for name_value in address_values: street_values.append(find_one(['N PULASKI', 'W PERSHING', 'S WESTERN', 'W JACKSON', 'W MADISON', 'N HUMBOLDT', 'W AUGUSTA', 'S COTTAGE GROVE', 'S KEDZIE', 'W 71ST', 'W ROOSEVELT', 'W OGDEN', 'W PETERSON', 'W FOSTER', 'N WESTERN', 'W ADDISON', 'E MORGAN DR', 'S PULASKI', 'S ARCHER', 'S STATE', 'E 95TH', 'W FULLERTON', 'W GRAND', 'W 127TH', 'W 111TH', 'N CENTRAL AVE', 'W IRVING PARK', 'N LINCOLN', 'S CENTRAL AVE', 'S VINCENNES', 'W 79TH', 'N ASHLAND', 'N OGDEN', 'W BELMONT AVE', 'N MILWAUKEE AVE', 'N CLYBOURN AVE', 'N RIDGE AVE', 'N BROADWAY', 'W 51ST ST', 'S JEFFERY', 'W HIGGINS', 'W LAWRENCE', 'N NARRAGANSETT AVE', 'W CHICAGO AVE', 'S HALSTED', 'S RACINE AVE', 'W GARFIELD BLVD', 'S INDIANAPOLIS', 'N COLUMBUS DR', 'W 76th ST', 'E 75TH ST', 'W 55TH', 'W MONTROSE', 'E ILLINOIS ST', 'S EWING AVE', 'W SUPERIOR ST', 'E 95TH ST', 'W CERMAK RD', 'N CICERO AVE', 'W DIVISION ST', 'W BRYN MAWR AVE', 'N NORTHWEST HWY', 'E 87TH ST', 'E 63RD ST', 'S MARTIN LUTHER KING', 'S ASHLAND AVE', 'W 83rd ST', 'W 103RD ST', 'W NORTH AVE'], name_value)) one_hot = pd.get_dummies(street_values, 'Title', '_') df.drop('ADDRESS', axis=1, inplace=True) df = pd.concat([df, one_hot], axis=1) df.columns df.drop('Title_', axis=1, inplace=True) df.columns
code
1006755/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cameras.csv') df.shape df.columns.values df.dtypes
code
1006755/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cameras.csv') df.shape
code
1006755/cell_25
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cameras.csv') df.shape df.columns.values df.dtypes df.replace('NaN', -1, inplace=True) (df['LATITUDE'] == -1).count() df.dtypes
code
1006755/cell_34
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cameras.csv') df.shape df.columns.values df.dtypes df.replace('NaN', -1, inplace=True) (df['LATITUDE'] == -1).count() df.dtypes df.shape df.columns df.drop('LOCATION', axis=1, inplace=True) df[df['ADDRESS'] == '2912 W ROOSEVELT']
code
1006755/cell_44
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/cameras.csv') df.shape df.columns.values df.dtypes df.replace('NaN', -1, inplace=True) (df['LATITUDE'] == -1).count() df.dtypes df.shape df.columns df.drop('LOCATION', axis=1, inplace=True) def find_one(substrs, superstr): for substr in substrs: if superstr.find(substr) != -1: return substr return '' address_values = df['ADDRESS'].values street_values = [] for name_value in address_values: street_values.append(find_one(['N PULASKI', 'W PERSHING', 'S WESTERN', 'W JACKSON', 'W MADISON', 'N HUMBOLDT', 'W AUGUSTA', 'S COTTAGE GROVE', 'S KEDZIE', 'W 71ST', 'W ROOSEVELT', 'W OGDEN', 'W PETERSON', 'W FOSTER', 'N WESTERN', 'W ADDISON', 'E MORGAN DR', 'S PULASKI', 'S ARCHER', 'S STATE', 'E 95TH', 'W FULLERTON', 'W GRAND', 'W 127TH', 'W 111TH', 'N CENTRAL AVE', 'W IRVING PARK', 'N LINCOLN', 'S CENTRAL AVE', 'S VINCENNES', 'W 79TH', 'N ASHLAND', 'N OGDEN', 'W BELMONT AVE', 'N MILWAUKEE AVE', 'N CLYBOURN AVE', 'N RIDGE AVE', 'N BROADWAY', 'W 51ST ST', 'S JEFFERY', 'W HIGGINS', 'W LAWRENCE', 'N NARRAGANSETT AVE', 'W CHICAGO AVE', 'S HALSTED', 'S RACINE AVE', 'W GARFIELD BLVD', 'S INDIANAPOLIS', 'N COLUMBUS DR', 'W 76th ST', 'E 75TH ST', 'W 55TH', 'W MONTROSE', 'E ILLINOIS ST', 'S EWING AVE', 'W SUPERIOR ST', 'E 95TH ST', 'W CERMAK RD', 'N CICERO AVE', 'W DIVISION ST', 'W BRYN MAWR AVE', 'N NORTHWEST HWY', 'E 87TH ST', 'E 63RD ST', 'S MARTIN LUTHER KING', 'S ASHLAND AVE', 'W 83rd ST', 'W 103RD ST', 'W NORTH AVE'], name_value)) one_hot = pd.get_dummies(street_values, 'Title', '_') df.drop('ADDRESS', axis=1, inplace=True) df = pd.concat([df, one_hot], axis=1) df.columns df.drop('Title_', axis=1, inplace=True) df.columns date_group = df.groupby(['DATE']) fig, plot = plt.subplots(figsize=[20, 7]) date_group['VIOLATIONS'].sum().sort_index().plot(color='green') plot.set_title('Violations on different days', fontsize=25) plot.tick_params(labelsize='large') plot.set_ylabel('No of violations', fontsize=20) plot.set_xlabel('Dates', fontsize=20)
code
1006755/cell_20
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cameras.csv') df.shape df.columns.values df.dtypes df.info()
code
1006755/cell_29
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cameras.csv') df.shape df.columns.values df.dtypes df.replace('NaN', -1, inplace=True) (df['LATITUDE'] == -1).count() df.dtypes df.shape df.columns
code
1006755/cell_39
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cameras.csv') df.shape df.columns.values df.dtypes df.replace('NaN', -1, inplace=True) (df['LATITUDE'] == -1).count() df.dtypes df.shape df.columns df.drop('LOCATION', axis=1, inplace=True) def find_one(substrs, superstr): for substr in substrs: if superstr.find(substr) != -1: return substr return '' address_values = df['ADDRESS'].values street_values = [] for name_value in address_values: street_values.append(find_one(['N PULASKI', 'W PERSHING', 'S WESTERN', 'W JACKSON', 'W MADISON', 'N HUMBOLDT', 'W AUGUSTA', 'S COTTAGE GROVE', 'S KEDZIE', 'W 71ST', 'W ROOSEVELT', 'W OGDEN', 'W PETERSON', 'W FOSTER', 'N WESTERN', 'W ADDISON', 'E MORGAN DR', 'S PULASKI', 'S ARCHER', 'S STATE', 'E 95TH', 'W FULLERTON', 'W GRAND', 'W 127TH', 'W 111TH', 'N CENTRAL AVE', 'W IRVING PARK', 'N LINCOLN', 'S CENTRAL AVE', 'S VINCENNES', 'W 79TH', 'N ASHLAND', 'N OGDEN', 'W BELMONT AVE', 'N MILWAUKEE AVE', 'N CLYBOURN AVE', 'N RIDGE AVE', 'N BROADWAY', 'W 51ST ST', 'S JEFFERY', 'W HIGGINS', 'W LAWRENCE', 'N NARRAGANSETT AVE', 'W CHICAGO AVE', 'S HALSTED', 'S RACINE AVE', 'W GARFIELD BLVD', 'S INDIANAPOLIS', 'N COLUMBUS DR', 'W 76th ST', 'E 75TH ST', 'W 55TH', 'W MONTROSE', 'E ILLINOIS ST', 'S EWING AVE', 'W SUPERIOR ST', 'E 95TH ST', 'W CERMAK RD', 'N CICERO AVE', 'W DIVISION ST', 'W BRYN MAWR AVE', 'N NORTHWEST HWY', 'E 87TH ST', 'E 63RD ST', 'S MARTIN LUTHER KING', 'S ASHLAND AVE', 'W 83rd ST', 'W 103RD ST', 'W NORTH AVE'], name_value)) one_hot = pd.get_dummies(street_values, 'Title', '_') df.drop('ADDRESS', axis=1, inplace=True) df = pd.concat([df, one_hot], axis=1) df.columns df[990:1000]
code
1006755/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cameras.csv') df.shape df.columns.values
code
1006755/cell_19
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cameras.csv') df.shape df.columns.values df.dtypes df.describe(include='all')
code
1006755/cell_32
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cameras.csv') df.shape df.columns.values df.dtypes df.replace('NaN', -1, inplace=True) (df['LATITUDE'] == -1).count() df.dtypes df.shape df.columns df.drop('LOCATION', axis=1, inplace=True) df[5:15]
code
1006755/cell_28
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cameras.csv') df.shape df.columns.values df.dtypes df.replace('NaN', -1, inplace=True) (df['LATITUDE'] == -1).count() df.dtypes df.shape
code
1006755/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cameras.csv') df.shape df.columns.values df.dtypes df.head()
code
1006755/cell_16
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cameras.csv') df.shape df.columns.values df.dtypes df.tail()
code
1006755/cell_38
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cameras.csv') df.shape df.columns.values df.dtypes df.replace('NaN', -1, inplace=True) (df['LATITUDE'] == -1).count() df.dtypes df.shape df.columns df.drop('LOCATION', axis=1, inplace=True) def find_one(substrs, superstr): for substr in substrs: if superstr.find(substr) != -1: return substr return '' address_values = df['ADDRESS'].values street_values = [] for name_value in address_values: street_values.append(find_one(['N PULASKI', 'W PERSHING', 'S WESTERN', 'W JACKSON', 'W MADISON', 'N HUMBOLDT', 'W AUGUSTA', 'S COTTAGE GROVE', 'S KEDZIE', 'W 71ST', 'W ROOSEVELT', 'W OGDEN', 'W PETERSON', 'W FOSTER', 'N WESTERN', 'W ADDISON', 'E MORGAN DR', 'S PULASKI', 'S ARCHER', 'S STATE', 'E 95TH', 'W FULLERTON', 'W GRAND', 'W 127TH', 'W 111TH', 'N CENTRAL AVE', 'W IRVING PARK', 'N LINCOLN', 'S CENTRAL AVE', 'S VINCENNES', 'W 79TH', 'N ASHLAND', 'N OGDEN', 'W BELMONT AVE', 'N MILWAUKEE AVE', 'N CLYBOURN AVE', 'N RIDGE AVE', 'N BROADWAY', 'W 51ST ST', 'S JEFFERY', 'W HIGGINS', 'W LAWRENCE', 'N NARRAGANSETT AVE', 'W CHICAGO AVE', 'S HALSTED', 'S RACINE AVE', 'W GARFIELD BLVD', 'S INDIANAPOLIS', 'N COLUMBUS DR', 'W 76th ST', 'E 75TH ST', 'W 55TH', 'W MONTROSE', 'E ILLINOIS ST', 'S EWING AVE', 'W SUPERIOR ST', 'E 95TH ST', 'W CERMAK RD', 'N CICERO AVE', 'W DIVISION ST', 'W BRYN MAWR AVE', 'N NORTHWEST HWY', 'E 87TH ST', 'E 63RD ST', 'S MARTIN LUTHER KING', 'S ASHLAND AVE', 'W 83rd ST', 'W 103RD ST', 'W NORTH AVE'], name_value)) one_hot = pd.get_dummies(street_values, 'Title', '_') df.drop('ADDRESS', axis=1, inplace=True) df = pd.concat([df, one_hot], axis=1) df.columns
code
1006755/cell_17
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cameras.csv') df.shape df.columns.values df.dtypes df[5:15]
code
1006755/cell_31
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cameras.csv') df.shape df.columns.values df.dtypes df.replace('NaN', -1, inplace=True) (df['LATITUDE'] == -1).count() df.dtypes df.shape df.columns df.drop('LOCATION', axis=1, inplace=True) df.info()
code
1006755/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cameras.csv') df.shape df.columns.values df.dtypes df.replace('NaN', -1, inplace=True) (df['LATITUDE'] == -1).count() df[df['LATITUDE'] == -1]
code
1006755/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cameras.csv') df.shape df.columns.values df.dtypes df.replace('NaN', -1, inplace=True) (df['LATITUDE'] == -1).count()
code
1006755/cell_36
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/cameras.csv') df.shape df.columns.values df.dtypes df.replace('NaN', -1, inplace=True) (df['LATITUDE'] == -1).count() df.dtypes df.shape df.columns df.drop('LOCATION', axis=1, inplace=True) df['ADDRESS'].unique()
code
73095876/cell_6
[ "text_plain_output_1.png" ]
from google.cloud import bigquery bigquery_client = bigquery.Client(project='wtm-kampala-ds', location='US') dataset_ref = bigquery_client.dataset('openaq', project='bigquery-public-data') dataset = bigquery_client.get_dataset(dataset_ref) [x.table_id for x in bigquery_client.list_tables(dataset)]
code
72098873/cell_21
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn import svm from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd iris = pd.read_csv('../input/iris/Iris.csv') import seaborn as sns import matplotlib.pyplot as plt sns.FacetGrid(iris, hue='Species', height=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend() fig = iris[iris.Species=='Iris-setosa'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='orange', label='Setosa') iris[iris.Species=='Iris-versicolor'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='blue', label='versicolor',ax=fig) iris[iris.Species=='Iris-virginica'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='green', label='virginica', ax=fig) fig.set_xlabel("Petal Length") fig.set_ylabel("Petal Width") fig.set_title(" Petal Length VS Width") fig=plt.gcf() fig.set_size_inches(10,6) plt.show() iris.drop('Id', axis=1).boxplot(by='Species', figsize=(12, 6)) ax=plt.subplots(1,1,figsize=(10,8)) iris['Species'].value_counts().plot.pie(autopct='%1.1f%%',shadow=True,figsize=(10,8)) plt.title("Iris Species %") plt.show() iris.shape train, test = train_test_split(iris, test_size=0.25) train_X = train[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']] train_y = train.Species test_X = test[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']] test_y = test.Species model = svm.SVC() model.fit(train_X, train_y) prediction = model.predict(test_X) model = LogisticRegression() model.fit(train_X, train_y) prediction = model.predict(test_X) print('The accuracy of the Logistic Regression is', metrics.accuracy_score(prediction, test_y))
code
72098873/cell_13
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd iris = pd.read_csv('../input/iris/Iris.csv') import seaborn as sns import matplotlib.pyplot as plt sns.FacetGrid(iris, hue='Species', height=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend() fig = iris[iris.Species=='Iris-setosa'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='orange', label='Setosa') iris[iris.Species=='Iris-versicolor'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='blue', label='versicolor',ax=fig) iris[iris.Species=='Iris-virginica'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='green', label='virginica', ax=fig) fig.set_xlabel("Petal Length") fig.set_ylabel("Petal Width") fig.set_title(" Petal Length VS Width") fig=plt.gcf() fig.set_size_inches(10,6) plt.show() iris.drop('Id', axis=1).boxplot(by='Species', figsize=(12, 6)) iris.shape
code
72098873/cell_9
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd iris = pd.read_csv('../input/iris/Iris.csv') import seaborn as sns import matplotlib.pyplot as plt sns.FacetGrid(iris, hue='Species', height=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend() fig = iris[iris.Species=='Iris-setosa'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='orange', label='Setosa') iris[iris.Species=='Iris-versicolor'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='blue', label='versicolor',ax=fig) iris[iris.Species=='Iris-virginica'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='green', label='virginica', ax=fig) fig.set_xlabel("Petal Length") fig.set_ylabel("Petal Width") fig.set_title(" Petal Length VS Width") fig=plt.gcf() fig.set_size_inches(10,6) plt.show() iris.drop('Id', axis=1).boxplot(by='Species', figsize=(12, 6))
code
72098873/cell_4
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd iris = pd.read_csv('../input/iris/Iris.csv') iris.plot(kind='scatter', x='SepalLengthCm', y='SepalWidthCm')
code
72098873/cell_20
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn import svm from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd iris = pd.read_csv('../input/iris/Iris.csv') import seaborn as sns import matplotlib.pyplot as plt sns.FacetGrid(iris, hue='Species', height=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend() fig = iris[iris.Species=='Iris-setosa'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='orange', label='Setosa') iris[iris.Species=='Iris-versicolor'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='blue', label='versicolor',ax=fig) iris[iris.Species=='Iris-virginica'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='green', label='virginica', ax=fig) fig.set_xlabel("Petal Length") fig.set_ylabel("Petal Width") fig.set_title(" Petal Length VS Width") fig=plt.gcf() fig.set_size_inches(10,6) plt.show() iris.drop('Id', axis=1).boxplot(by='Species', figsize=(12, 6)) ax=plt.subplots(1,1,figsize=(10,8)) iris['Species'].value_counts().plot.pie(autopct='%1.1f%%',shadow=True,figsize=(10,8)) plt.title("Iris Species %") plt.show() iris.shape train, test = train_test_split(iris, test_size=0.25) train_X = train[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']] train_y = train.Species test_X = test[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']] test_y = test.Species model = svm.SVC() model.fit(train_X, train_y) prediction = model.predict(test_X) print('The accuracy of the SVM is:', metrics.accuracy_score(prediction, test_y))
code
72098873/cell_6
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd iris = pd.read_csv('../input/iris/Iris.csv') import seaborn as sns import matplotlib.pyplot as plt sns.FacetGrid(iris, hue='Species', height=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend() fig = iris[iris.Species == 'Iris-setosa'].plot.scatter(x='PetalLengthCm', y='PetalWidthCm', color='orange', label='Setosa') iris[iris.Species == 'Iris-versicolor'].plot.scatter(x='PetalLengthCm', y='PetalWidthCm', color='blue', label='versicolor', ax=fig) iris[iris.Species == 'Iris-virginica'].plot.scatter(x='PetalLengthCm', y='PetalWidthCm', color='green', label='virginica', ax=fig) fig.set_xlabel('Petal Length') fig.set_ylabel('Petal Width') fig.set_title(' Petal Length VS Width') fig = plt.gcf() fig.set_size_inches(10, 6) plt.show()
code
72098873/cell_2
[ "image_output_1.png" ]
import pandas as pd import pandas as pd iris = pd.read_csv('../input/iris/Iris.csv') iris.info()
code
72098873/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd iris = pd.read_csv('../input/iris/Iris.csv') import seaborn as sns import matplotlib.pyplot as plt sns.FacetGrid(iris, hue='Species', height=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend() fig = iris[iris.Species=='Iris-setosa'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='orange', label='Setosa') iris[iris.Species=='Iris-versicolor'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='blue', label='versicolor',ax=fig) iris[iris.Species=='Iris-virginica'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='green', label='virginica', ax=fig) fig.set_xlabel("Petal Length") fig.set_ylabel("Petal Width") fig.set_title(" Petal Length VS Width") fig=plt.gcf() fig.set_size_inches(10,6) plt.show() iris.drop('Id', axis=1).boxplot(by='Species', figsize=(12, 6)) iris['Species'].value_counts()
code
72098873/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd iris = pd.read_csv('../input/iris/Iris.csv') import seaborn as sns import matplotlib.pyplot as plt sns.FacetGrid(iris, hue='Species', height=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend() fig = iris[iris.Species=='Iris-setosa'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='orange', label='Setosa') iris[iris.Species=='Iris-versicolor'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='blue', label='versicolor',ax=fig) iris[iris.Species=='Iris-virginica'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='green', label='virginica', ax=fig) fig.set_xlabel("Petal Length") fig.set_ylabel("Petal Width") fig.set_title(" Petal Length VS Width") fig=plt.gcf() fig.set_size_inches(10,6) plt.show() iris.drop('Id', axis=1).boxplot(by='Species', figsize=(12, 6)) ax=plt.subplots(1,1,figsize=(10,8)) iris['Species'].value_counts().plot.pie(autopct='%1.1f%%',shadow=True,figsize=(10,8)) plt.title("Iris Species %") plt.show() iris.shape train, test = train_test_split(iris, test_size=0.25) train_X = train[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']] train_y = train.Species test_X = test[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']] test_y = test.Species test_y.head(5)
code
72098873/cell_1
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd iris = pd.read_csv('../input/iris/Iris.csv') iris.head()
code
72098873/cell_7
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd iris = pd.read_csv('../input/iris/Iris.csv') import seaborn as sns import matplotlib.pyplot as plt sns.FacetGrid(iris, hue='Species', height=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend() fig = iris[iris.Species=='Iris-setosa'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='orange', label='Setosa') iris[iris.Species=='Iris-versicolor'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='blue', label='versicolor',ax=fig) iris[iris.Species=='Iris-virginica'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='green', label='virginica', ax=fig) fig.set_xlabel("Petal Length") fig.set_ylabel("Petal Width") fig.set_title(" Petal Length VS Width") fig=plt.gcf() fig.set_size_inches(10,6) plt.show() sns.boxplot(x='Species', y='PetalLengthCm', data=iris)
code
72098873/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd iris = pd.read_csv('../input/iris/Iris.csv') import seaborn as sns import matplotlib.pyplot as plt sns.FacetGrid(iris, hue='Species', height=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend() fig = iris[iris.Species=='Iris-setosa'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='orange', label='Setosa') iris[iris.Species=='Iris-versicolor'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='blue', label='versicolor',ax=fig) iris[iris.Species=='Iris-virginica'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='green', label='virginica', ax=fig) fig.set_xlabel("Petal Length") fig.set_ylabel("Petal Width") fig.set_title(" Petal Length VS Width") fig=plt.gcf() fig.set_size_inches(10,6) plt.show() iris.drop('Id', axis=1).boxplot(by='Species', figsize=(12, 6)) ax=plt.subplots(1,1,figsize=(10,8)) iris['Species'].value_counts().plot.pie(autopct='%1.1f%%',shadow=True,figsize=(10,8)) plt.title("Iris Species %") plt.show() iris.shape train, test = train_test_split(iris, test_size=0.25) train_X = train[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']] train_y = train.Species test_X = test[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']] test_y = test.Species train_X.head(5)
code
72098873/cell_8
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd iris = pd.read_csv('../input/iris/Iris.csv') import seaborn as sns import matplotlib.pyplot as plt sns.FacetGrid(iris, hue='Species', height=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend() fig = iris[iris.Species=='Iris-setosa'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='orange', label='Setosa') iris[iris.Species=='Iris-versicolor'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='blue', label='versicolor',ax=fig) iris[iris.Species=='Iris-virginica'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='green', label='virginica', ax=fig) fig.set_xlabel("Petal Length") fig.set_ylabel("Petal Width") fig.set_title(" Petal Length VS Width") fig=plt.gcf() fig.set_size_inches(10,6) plt.show() sns.pairplot(iris.drop('Id', axis=1), hue='Species', height=3)
code
72098873/cell_15
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd iris = pd.read_csv('../input/iris/Iris.csv') import seaborn as sns import matplotlib.pyplot as plt sns.FacetGrid(iris, hue='Species', height=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend() fig = iris[iris.Species=='Iris-setosa'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='orange', label='Setosa') iris[iris.Species=='Iris-versicolor'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='blue', label='versicolor',ax=fig) iris[iris.Species=='Iris-virginica'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='green', label='virginica', ax=fig) fig.set_xlabel("Petal Length") fig.set_ylabel("Petal Width") fig.set_title(" Petal Length VS Width") fig=plt.gcf() fig.set_size_inches(10,6) plt.show() iris.drop('Id', axis=1).boxplot(by='Species', figsize=(12, 6)) ax=plt.subplots(1,1,figsize=(10,8)) iris['Species'].value_counts().plot.pie(autopct='%1.1f%%',shadow=True,figsize=(10,8)) plt.title("Iris Species %") plt.show() iris.shape plt.figure(figsize=(7, 4)) sns.heatmap(iris.corr(), annot=True, cmap='cubehelix_r') plt.show()
code
72098873/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd iris = pd.read_csv('../input/iris/Iris.csv') import seaborn as sns import matplotlib.pyplot as plt sns.FacetGrid(iris, hue='Species', height=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend() fig = iris[iris.Species=='Iris-setosa'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='orange', label='Setosa') iris[iris.Species=='Iris-versicolor'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='blue', label='versicolor',ax=fig) iris[iris.Species=='Iris-virginica'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='green', label='virginica', ax=fig) fig.set_xlabel("Petal Length") fig.set_ylabel("Petal Width") fig.set_title(" Petal Length VS Width") fig=plt.gcf() fig.set_size_inches(10,6) plt.show() iris.drop('Id', axis=1).boxplot(by='Species', figsize=(12, 6)) ax=plt.subplots(1,1,figsize=(10,8)) iris['Species'].value_counts().plot.pie(autopct='%1.1f%%',shadow=True,figsize=(10,8)) plt.title("Iris Species %") plt.show() iris.shape train, test = train_test_split(iris, test_size=0.25) print(train.shape) print(test.shape)
code
72098873/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd iris = pd.read_csv('../input/iris/Iris.csv') iris['Species'].value_counts()
code
72098873/cell_14
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd iris = pd.read_csv('../input/iris/Iris.csv') import seaborn as sns import matplotlib.pyplot as plt sns.FacetGrid(iris, hue='Species', height=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend() fig = iris[iris.Species=='Iris-setosa'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='orange', label='Setosa') iris[iris.Species=='Iris-versicolor'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='blue', label='versicolor',ax=fig) iris[iris.Species=='Iris-virginica'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='green', label='virginica', ax=fig) fig.set_xlabel("Petal Length") fig.set_ylabel("Petal Width") fig.set_title(" Petal Length VS Width") fig=plt.gcf() fig.set_size_inches(10,6) plt.show() iris.drop('Id', axis=1).boxplot(by='Species', figsize=(12, 6)) iris.shape print(iris['Species'].unique())
code
72098873/cell_22
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn import svm from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd iris = pd.read_csv('../input/iris/Iris.csv') import seaborn as sns import matplotlib.pyplot as plt sns.FacetGrid(iris, hue='Species', height=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend() fig = iris[iris.Species=='Iris-setosa'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='orange', label='Setosa') iris[iris.Species=='Iris-versicolor'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='blue', label='versicolor',ax=fig) iris[iris.Species=='Iris-virginica'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='green', label='virginica', ax=fig) fig.set_xlabel("Petal Length") fig.set_ylabel("Petal Width") fig.set_title(" Petal Length VS Width") fig=plt.gcf() fig.set_size_inches(10,6) plt.show() iris.drop('Id', axis=1).boxplot(by='Species', figsize=(12, 6)) ax=plt.subplots(1,1,figsize=(10,8)) iris['Species'].value_counts().plot.pie(autopct='%1.1f%%',shadow=True,figsize=(10,8)) plt.title("Iris Species %") plt.show() iris.shape train, test = train_test_split(iris, test_size=0.25) train_X = train[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']] train_y = train.Species test_X = test[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']] test_y = test.Species model = svm.SVC() model.fit(train_X, train_y) prediction = model.predict(test_X) model = LogisticRegression() model.fit(train_X, train_y) prediction = model.predict(test_X) model = DecisionTreeClassifier() model.fit(train_X, train_y) prediction = model.predict(test_X) print('The accuracy of the Decision Tree is', metrics.accuracy_score(prediction, test_y))
code
72098873/cell_10
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd iris = pd.read_csv('../input/iris/Iris.csv') import seaborn as sns import matplotlib.pyplot as plt sns.FacetGrid(iris, hue='Species', height=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend() fig = iris[iris.Species=='Iris-setosa'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='orange', label='Setosa') iris[iris.Species=='Iris-versicolor'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='blue', label='versicolor',ax=fig) iris[iris.Species=='Iris-virginica'].plot.scatter(x='PetalLengthCm',y='PetalWidthCm',color='green', label='virginica', ax=fig) fig.set_xlabel("Petal Length") fig.set_ylabel("Petal Width") fig.set_title(" Petal Length VS Width") fig=plt.gcf() fig.set_size_inches(10,6) plt.show() iris.drop('Id', axis=1).boxplot(by='Species', figsize=(12, 6)) ax = plt.subplots(1, 1, figsize=(10, 8)) iris['Species'].value_counts().plot.pie(autopct='%1.1f%%', shadow=True, figsize=(10, 8)) plt.title('Iris Species %') plt.show()
code
72098873/cell_5
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd iris = pd.read_csv('../input/iris/Iris.csv') import seaborn as sns import matplotlib.pyplot as plt sns.FacetGrid(iris, hue='Species', height=5).map(plt.scatter, 'SepalLengthCm', 'SepalWidthCm').add_legend()
code
128045865/cell_19
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow.keras import layers from tensorflow.keras.preprocessing.image import ImageDataGenerator import glob import numpy as np import pandas as pd import pickle import tensorflow as tf import tensorflow.keras.backend as K breast_img = glob.glob('/kaggle/input/breast-histopathology-images/IDC_regular_ps50_idx5/**/*.png', recursive=True) data = pd.read_csv('/kaggle/input/selected-images/selected_images.csv') train_data, val_data = train_test_split(data, test_size=0.3, random_state=42) val_data, test_data = train_test_split(val_data, test_size=0.5, random_state=42) datagen = ImageDataGenerator(rescale=1.0 / 255) target_size = (50, 50) batch_size = 32 train_generator = datagen.flow_from_dataframe(dataframe=train_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw') val_generator = datagen.flow_from_dataframe(dataframe=val_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw') test_generator = datagen.flow_from_dataframe(dataframe=test_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw') A, B = np.unique(train_generator.labels, return_counts=True) n = len(train_generator.labels) cls_weights = {i: (n - j) / n for i, j in zip(A, B)} def f1_score(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) recall = true_positives / (possible_positives + K.epsilon()) f1_val = 2 * (precision * recall) / (precision + recall + K.epsilon()) return f1_val def train_best_model(units, learning_rate): model = tf.keras.Sequential() model.add(layers.Conv2D(units, (3, 3), activation='relu', input_shape=(50, 50, 3))) model.add(layers.BatchNormalization()) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(units * 2, (3, 3), activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(units * 4, (3, 3), activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Flatten()) model.add(layers.Dense(units * 2, activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.Dropout(0.5)) model.add(layers.Dense(1, activation='sigmoid')) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate), loss='binary_crossentropy', metrics=[tf.keras.metrics.Precision(), tf.keras.metrics.Recall(), f1_score]) history = model.fit(train_generator, validation_data=val_generator, epochs=300, callbacks=tf.keras.callbacks.EarlyStopping(patience=5, min_delta=0.001, monitor='val_loss'), class_weight=cls_weights) test_results = model.evaluate(test_generator) return (model, history, test_results) model.save('cnn_model.h5') with open('cnn_model_history.pkl', 'wb') as file: pickle.dump(history.history, file) with open('cnn_test_results.pkl', 'wb') as file: pickle.dump(test_results, file) total_parameters = model.count_params() mult_adds_total = 0 for layer in model.layers: if isinstance(layer, tf.keras.layers.Conv2D): height, width, channels_in = layer.input_shape[1:] _, _, channels_out = layer.output_shape[1:] kernel_height, kernel_width = layer.kernel_size mult_adds = height * width * channels_in * channels_out * kernel_height * kernel_width mult_adds_total += mult_adds print('Total parameters:', total_parameters) print('Total number of multipy-accumulates:', mult_adds_total)
code
128045865/cell_16
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow.keras import layers from tensorflow.keras.preprocessing.image import ImageDataGenerator import glob import matplotlib.pyplot as plt import numpy as np import pandas as pd import pickle import tensorflow as tf import tensorflow.keras.backend as K breast_img = glob.glob('/kaggle/input/breast-histopathology-images/IDC_regular_ps50_idx5/**/*.png', recursive=True) data = pd.read_csv('/kaggle/input/selected-images/selected_images.csv') train_data, val_data = train_test_split(data, test_size=0.3, random_state=42) val_data, test_data = train_test_split(val_data, test_size=0.5, random_state=42) datagen = ImageDataGenerator(rescale=1.0 / 255) target_size = (50, 50) batch_size = 32 train_generator = datagen.flow_from_dataframe(dataframe=train_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw') val_generator = datagen.flow_from_dataframe(dataframe=val_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw') test_generator = datagen.flow_from_dataframe(dataframe=test_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw') A, B = np.unique(train_generator.labels, return_counts=True) n = len(train_generator.labels) cls_weights = {i: (n - j) / n for i, j in zip(A, B)} def f1_score(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) recall = true_positives / (possible_positives + K.epsilon()) f1_val = 2 * (precision * recall) / (precision + recall + K.epsilon()) return f1_val def train_best_model(units, learning_rate): model = tf.keras.Sequential() model.add(layers.Conv2D(units, (3, 3), activation='relu', input_shape=(50, 50, 3))) model.add(layers.BatchNormalization()) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(units * 2, (3, 3), activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(units * 4, (3, 3), activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Flatten()) model.add(layers.Dense(units * 2, activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.Dropout(0.5)) model.add(layers.Dense(1, activation='sigmoid')) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate), loss='binary_crossentropy', metrics=[tf.keras.metrics.Precision(), tf.keras.metrics.Recall(), f1_score]) history = model.fit(train_generator, validation_data=val_generator, epochs=300, callbacks=tf.keras.callbacks.EarlyStopping(patience=5, min_delta=0.001, monitor='val_loss'), class_weight=cls_weights) test_results = model.evaluate(test_generator) return (model, history, test_results) model.save('cnn_model.h5') with open('cnn_model_history.pkl', 'wb') as file: pickle.dump(history.history, file) with open('cnn_test_results.pkl', 'wb') as file: pickle.dump(test_results, file) def plot_the_results(history): plt.style.use('seaborn') plt.figure(figsize=(10, 5)) plt.plot(history.epoch, history.history['val_f1_score'], label='Val F1 Score', linewidth=2) plt.plot(history.epoch, history.history['f1_score'], label='f1_score', linewidth=2) plt.legend() plt.title('F1 Score') plt.show() plt.figure(figsize=(10, 5)) plt.plot(history.epoch, history.history['val_precision'], label='Val Precision', linewidth=2) plt.plot(history.epoch, history.history['precision'], label='Precision', linewidth=2) plt.legend() plt.title('Precision') plt.show() plt.figure(figsize=(10, 5)) plt.plot(history.epoch, history.history['val_recall'], label='Val Recall', linewidth=2) plt.plot(history.epoch, history.history['recall'], label='Recall', linewidth=2) plt.legend() plt.title('Recall') plt.show() plot_the_results(history)
code
128045865/cell_17
[ "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow.keras import layers from tensorflow.keras.preprocessing.image import ImageDataGenerator import glob import numpy as np import pandas as pd import tensorflow as tf import tensorflow.keras.backend as K breast_img = glob.glob('/kaggle/input/breast-histopathology-images/IDC_regular_ps50_idx5/**/*.png', recursive=True) data = pd.read_csv('/kaggle/input/selected-images/selected_images.csv') train_data, val_data = train_test_split(data, test_size=0.3, random_state=42) val_data, test_data = train_test_split(val_data, test_size=0.5, random_state=42) datagen = ImageDataGenerator(rescale=1.0 / 255) target_size = (50, 50) batch_size = 32 train_generator = datagen.flow_from_dataframe(dataframe=train_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw') val_generator = datagen.flow_from_dataframe(dataframe=val_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw') test_generator = datagen.flow_from_dataframe(dataframe=test_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw') A, B = np.unique(train_generator.labels, return_counts=True) n = len(train_generator.labels) cls_weights = {i: (n - j) / n for i, j in zip(A, B)} def f1_score(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) recall = true_positives / (possible_positives + K.epsilon()) f1_val = 2 * (precision * recall) / (precision + recall + K.epsilon()) return f1_val def train_best_model(units, learning_rate): model = tf.keras.Sequential() model.add(layers.Conv2D(units, (3, 3), activation='relu', input_shape=(50, 50, 3))) model.add(layers.BatchNormalization()) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(units * 2, (3, 3), activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(units * 4, (3, 3), activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Flatten()) model.add(layers.Dense(units * 2, activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.Dropout(0.5)) model.add(layers.Dense(1, activation='sigmoid')) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate), loss='binary_crossentropy', metrics=[tf.keras.metrics.Precision(), tf.keras.metrics.Recall(), f1_score]) history = model.fit(train_generator, validation_data=val_generator, epochs=300, callbacks=tf.keras.callbacks.EarlyStopping(patience=5, min_delta=0.001, monitor='val_loss'), class_weight=cls_weights) test_results = model.evaluate(test_generator) return (model, history, test_results) print('Test Results') print('\n-------------\n') print('Test Loss:', format(test_results[0], '.3f')) print('Test Precision: ', format(test_results[1], '.3f')) print('Test Recall: ', format(test_results[2], '.3f')) print('Test F1: ', format(test_results[3], '.3f'))
code
128045865/cell_14
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow.keras import layers from tensorflow.keras.preprocessing.image import ImageDataGenerator import glob import numpy as np import pandas as pd import tensorflow as tf import tensorflow.keras.backend as K breast_img = glob.glob('/kaggle/input/breast-histopathology-images/IDC_regular_ps50_idx5/**/*.png', recursive=True) data = pd.read_csv('/kaggle/input/selected-images/selected_images.csv') train_data, val_data = train_test_split(data, test_size=0.3, random_state=42) val_data, test_data = train_test_split(val_data, test_size=0.5, random_state=42) datagen = ImageDataGenerator(rescale=1.0 / 255) target_size = (50, 50) batch_size = 32 train_generator = datagen.flow_from_dataframe(dataframe=train_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw') val_generator = datagen.flow_from_dataframe(dataframe=val_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw') test_generator = datagen.flow_from_dataframe(dataframe=test_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw') A, B = np.unique(train_generator.labels, return_counts=True) n = len(train_generator.labels) cls_weights = {i: (n - j) / n for i, j in zip(A, B)} def f1_score(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) recall = true_positives / (possible_positives + K.epsilon()) f1_val = 2 * (precision * recall) / (precision + recall + K.epsilon()) return f1_val def train_best_model(units, learning_rate): model = tf.keras.Sequential() model.add(layers.Conv2D(units, (3, 3), activation='relu', input_shape=(50, 50, 3))) model.add(layers.BatchNormalization()) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(units * 2, (3, 3), activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(units * 4, (3, 3), activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Flatten()) model.add(layers.Dense(units * 2, activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.Dropout(0.5)) model.add(layers.Dense(1, activation='sigmoid')) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate), loss='binary_crossentropy', metrics=[tf.keras.metrics.Precision(), tf.keras.metrics.Recall(), f1_score]) history = model.fit(train_generator, validation_data=val_generator, epochs=300, callbacks=tf.keras.callbacks.EarlyStopping(patience=5, min_delta=0.001, monitor='val_loss'), class_weight=cls_weights) test_results = model.evaluate(test_generator) return (model, history, test_results) best_settings = {'learning_rate': 0.001, 'units': 64} model, history, test_results = train_best_model(**best_settings)
code
128045865/cell_5
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from tensorflow.keras.preprocessing.image import ImageDataGenerator import glob import pandas as pd breast_img = glob.glob('/kaggle/input/breast-histopathology-images/IDC_regular_ps50_idx5/**/*.png', recursive=True) data = pd.read_csv('/kaggle/input/selected-images/selected_images.csv') train_data, val_data = train_test_split(data, test_size=0.3, random_state=42) val_data, test_data = train_test_split(val_data, test_size=0.5, random_state=42) datagen = ImageDataGenerator(rescale=1.0 / 255) target_size = (50, 50) batch_size = 32 train_generator = datagen.flow_from_dataframe(dataframe=train_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw') val_generator = datagen.flow_from_dataframe(dataframe=val_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw') test_generator = datagen.flow_from_dataframe(dataframe=test_data, x_col='path', y_col='label', target_size=target_size, batch_size=batch_size, class_mode='raw')
code
2017383/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'Pclass':'Embarked'], test.loc[:, 'Pclass':'Embarked'])) all_data.info()
code
2017383/cell_6
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_4.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'Pclass':'Embarked'], test.loc[:, 'Pclass':'Embarked'])) all_data.Age = all_data.Age.fillna(all_data.Age.median()) all_data.Fare = all_data.Fare.fillna(all_data.Fare.median()) all_data.Embarked = all_data.Embarked.fillna(all_data.Embarked.mode()[0]) all_data['Title'] = all_data['Name'].str.split(', ', expand=True)[1].str.split('.', expand=True)[0] stat_min = 10 title_names = all_data['Title'].value_counts() < stat_min all_data['Title'] = all_data['Title'].apply(lambda x: 'Misc' if title_names.loc[x] == True else x) print(all_data['Title'].value_counts()) all_data = all_data.drop(['Name'], axis=1) all_data = all_data.drop(['Ticket'], axis=1) all_data = all_data.drop(['Cabin'], axis=1) all_data.info()
code
2017383/cell_11
[ "text_plain_output_1.png" ]
from sklearn import feature_selection from sklearn import model_selection from sklearn import tree from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'Pclass':'Embarked'], test.loc[:, 'Pclass':'Embarked'])) all_data.Age = all_data.Age.fillna(all_data.Age.median()) all_data.Fare = all_data.Fare.fillna(all_data.Fare.median()) all_data.Embarked = all_data.Embarked.fillna(all_data.Embarked.mode()[0]) all_data['Title'] = all_data['Name'].str.split(', ', expand=True)[1].str.split('.', expand=True)[0] stat_min = 10 title_names = all_data['Title'].value_counts() < stat_min all_data['Title'] = all_data['Title'].apply(lambda x: 'Misc' if title_names.loc[x] == True else x) all_data = all_data.drop(['Name'], axis=1) all_data = all_data.drop(['Ticket'], axis=1) all_data = all_data.drop(['Cabin'], axis=1) all_data = pd.get_dummies(all_data) cv_split = model_selection.ShuffleSplit(n_splits=10, test_size=0.3, train_size=0.6, random_state=0) train_cleared = all_data[:train.shape[0]] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(train_cleared, train.Survived, random_state=0, test_size=0.1) X_val = all_data[train.shape[0]:] dtree = tree.DecisionTreeClassifier(random_state=0) base_results = model_selection.cross_validate(dtree, train_cleared, train.Survived, cv=cv_split, return_train_score=True) dtree.fit(train_cleared, train.Survived) param_grid = {'criterion': ['gini', 'entropy'], 'max_depth': [2, 4, 6, 8, 10, None], 'random_state': [0]} tune_model = model_selection.GridSearchCV(tree.DecisionTreeClassifier(), param_grid=param_grid, scoring='roc_auc', cv=cv_split, return_train_score=True) tune_model.fit(train_cleared, train.Survived) print('BEFORE DT RFE Training Shape Old: ', train_cleared.shape) print('BEFORE DT RFE Training Columns Old: ', train_cleared.columns.values) print('BEFORE DT RFE Training w/bin score mean: {:.2f}'.format(base_results['train_score'].mean() * 100)) print('BEFORE DT RFE Test w/bin score mean: {:.2f}'.format(base_results['test_score'].mean() * 100)) print('BEFORE DT RFE Test w/bin score 3*std: +/- {:.2f}'.format(base_results['test_score'].std() * 100 * 3)) print('-' * 10) dtree_rfe = feature_selection.RFECV(dtree, step=1, scoring='accuracy', cv=cv_split) dtree_rfe.fit(train_cleared, train.Survived) X_rfe = train_cleared.columns.values[dtree_rfe.get_support()] rfe_results = model_selection.cross_validate(dtree, train_cleared[X_rfe], train.Survived, cv=cv_split) print('AFTER DT RFE Training Shape New: ', train_cleared[X_rfe].shape) print('AFTER DT RFE Training Columns New: ', X_rfe) print('AFTER DT RFE Training w/bin score mean: {:.2f}'.format(rfe_results['train_score'].mean() * 100)) print('AFTER DT RFE Test w/bin score mean: {:.2f}'.format(rfe_results['test_score'].mean() * 100)) print('AFTER DT RFE Test w/bin score 3*std: +/- {:.2f}'.format(rfe_results['test_score'].std() * 100 * 3)) print('-' * 10) rfe_tune_model = model_selection.GridSearchCV(tree.DecisionTreeClassifier(), param_grid=param_grid, scoring='roc_auc', cv=cv_split) rfe_tune_model.fit(train_cleared[X_rfe], train.Survived) print('AFTER DT RFE Tuned Parameters: ', rfe_tune_model.best_params_) print('AFTER DT RFE Tuned Training w/bin score mean: {:.2f}'.format(rfe_tune_model.cv_results_['mean_train_score'][tune_model.best_index_] * 100)) print('AFTER DT RFE Tuned Test w/bin score mean: {:.2f}'.format(rfe_tune_model.cv_results_['mean_test_score'][tune_model.best_index_] * 100)) print('AFTER DT RFE Tuned Test w/bin score 3*std: +/- {:.2f}'.format(rfe_tune_model.cv_results_['std_test_score'][tune_model.best_index_] * 100 * 3)) print('-' * 10)
code
2017383/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'Pclass':'Embarked'], test.loc[:, 'Pclass':'Embarked'])) all_data.Age = all_data.Age.fillna(all_data.Age.median()) all_data.Fare = all_data.Fare.fillna(all_data.Fare.median()) all_data.Embarked = all_data.Embarked.fillna(all_data.Embarked.mode()[0]) all_data['Title'] = all_data['Name'].str.split(', ', expand=True)[1].str.split('.', expand=True)[0] stat_min = 10 title_names = all_data['Title'].value_counts() < stat_min all_data['Title'] = all_data['Title'].apply(lambda x: 'Misc' if title_names.loc[x] == True else x) all_data = all_data.drop(['Name'], axis=1) all_data = all_data.drop(['Ticket'], axis=1) all_data = all_data.drop(['Cabin'], axis=1) all_data = pd.get_dummies(all_data) all_data.head()
code
2017383/cell_10
[ "text_plain_output_1.png" ]
from sklearn import model_selection from sklearn import tree from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'Pclass':'Embarked'], test.loc[:, 'Pclass':'Embarked'])) all_data.Age = all_data.Age.fillna(all_data.Age.median()) all_data.Fare = all_data.Fare.fillna(all_data.Fare.median()) all_data.Embarked = all_data.Embarked.fillna(all_data.Embarked.mode()[0]) all_data['Title'] = all_data['Name'].str.split(', ', expand=True)[1].str.split('.', expand=True)[0] stat_min = 10 title_names = all_data['Title'].value_counts() < stat_min all_data['Title'] = all_data['Title'].apply(lambda x: 'Misc' if title_names.loc[x] == True else x) all_data = all_data.drop(['Name'], axis=1) all_data = all_data.drop(['Ticket'], axis=1) all_data = all_data.drop(['Cabin'], axis=1) all_data = pd.get_dummies(all_data) cv_split = model_selection.ShuffleSplit(n_splits=10, test_size=0.3, train_size=0.6, random_state=0) train_cleared = all_data[:train.shape[0]] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(train_cleared, train.Survived, random_state=0, test_size=0.1) X_val = all_data[train.shape[0]:] dtree = tree.DecisionTreeClassifier(random_state=0) base_results = model_selection.cross_validate(dtree, train_cleared, train.Survived, cv=cv_split, return_train_score=True) dtree.fit(train_cleared, train.Survived) print('BEFORE DT Parameters: ', dtree.get_params()) print('BEFORE DT Training w/bin score mean: {:.2f}'.format(base_results['train_score'].mean() * 100)) print('BEFORE DT Test w/bin score mean: {:.2f}'.format(base_results['test_score'].mean() * 100)) print('BEFORE DT Test w/bin score 3*std: +/- {:.2f}'.format(base_results['test_score'].std() * 100 * 3)) print('-' * 10) param_grid = {'criterion': ['gini', 'entropy'], 'max_depth': [2, 4, 6, 8, 10, None], 'random_state': [0]} tune_model = model_selection.GridSearchCV(tree.DecisionTreeClassifier(), param_grid=param_grid, scoring='roc_auc', cv=cv_split, return_train_score=True) tune_model.fit(train_cleared, train.Survived) print('AFTER DT Parameters: ', tune_model.best_params_) print('AFTER DT Training w/bin score mean: {:.2f}'.format(tune_model.cv_results_['mean_train_score'][tune_model.best_index_] * 100)) print('AFTER DT Test w/bin score mean: {:.2f}'.format(tune_model.cv_results_['mean_test_score'][tune_model.best_index_] * 100)) print('AFTER DT Test w/bin score 3*std: +/- {:.2f}'.format(tune_model.cv_results_['std_test_score'][tune_model.best_index_] * 100 * 3)) print('-' * 10)
code
2017383/cell_12
[ "text_plain_output_1.png" ]
from sklearn import feature_selection from sklearn import model_selection from sklearn import tree from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'Pclass':'Embarked'], test.loc[:, 'Pclass':'Embarked'])) all_data.Age = all_data.Age.fillna(all_data.Age.median()) all_data.Fare = all_data.Fare.fillna(all_data.Fare.median()) all_data.Embarked = all_data.Embarked.fillna(all_data.Embarked.mode()[0]) all_data['Title'] = all_data['Name'].str.split(', ', expand=True)[1].str.split('.', expand=True)[0] stat_min = 10 title_names = all_data['Title'].value_counts() < stat_min all_data['Title'] = all_data['Title'].apply(lambda x: 'Misc' if title_names.loc[x] == True else x) all_data = all_data.drop(['Name'], axis=1) all_data = all_data.drop(['Ticket'], axis=1) all_data = all_data.drop(['Cabin'], axis=1) all_data = pd.get_dummies(all_data) cv_split = model_selection.ShuffleSplit(n_splits=10, test_size=0.3, train_size=0.6, random_state=0) train_cleared = all_data[:train.shape[0]] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(train_cleared, train.Survived, random_state=0, test_size=0.1) X_val = all_data[train.shape[0]:] dtree = tree.DecisionTreeClassifier(random_state=0) base_results = model_selection.cross_validate(dtree, train_cleared, train.Survived, cv=cv_split, return_train_score=True) dtree.fit(train_cleared, train.Survived) param_grid = {'criterion': ['gini', 'entropy'], 'max_depth': [2, 4, 6, 8, 10, None], 'random_state': [0]} tune_model = model_selection.GridSearchCV(tree.DecisionTreeClassifier(), param_grid=param_grid, scoring='roc_auc', cv=cv_split, return_train_score=True) tune_model.fit(train_cleared, train.Survived) dtree_rfe = feature_selection.RFECV(dtree, step=1, scoring='accuracy', cv=cv_split) dtree_rfe.fit(train_cleared, train.Survived) X_rfe = train_cleared.columns.values[dtree_rfe.get_support()] rfe_results = model_selection.cross_validate(dtree, train_cleared[X_rfe], train.Survived, cv=cv_split) rfe_tune_model = model_selection.GridSearchCV(tree.DecisionTreeClassifier(), param_grid=param_grid, scoring='roc_auc', cv=cv_split) rfe_tune_model.fit(train_cleared[X_rfe], train.Survived) model = DecisionTreeClassifier(random_state=0, max_depth=5) model.fit(X_train, y_train) print('Train score: {:.3f}'.format(model.score(X_train, y_train))) print('Test score: {:.3f}'.format(model.score(X_test, y_test))) decision_tree_predicts_base = model.predict(X_val) decision_tree_predicts_tuned_param = tune_model.predict(X_val) decision_tree_predicts_tuned_param_rfe = rfe_tune_model.predict(X_val[X_rfe])
code
2017383/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'Pclass':'Embarked'], test.loc[:, 'Pclass':'Embarked'])) all_data.Age = all_data.Age.fillna(all_data.Age.median()) all_data.Fare = all_data.Fare.fillna(all_data.Fare.median()) all_data.Embarked = all_data.Embarked.fillna(all_data.Embarked.mode()[0]) all_data.info()
code
33096987/cell_42
[ "image_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.style.use('seaborn-whitegrid') from collections import Counter import warnings warnings.filterwarnings('ignore') import os train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test_df['PassengerId'] train_df.columns def barplot(variable): """ input : variable example: "Sex" output : barplot & value count """ var = train_df[variable] varValue = var.value_counts() plt.xticks(varValue.index, varValue.index.values) def plothist(variable): pass def detect_outliers(df, features): outlier_indices = [] for c in features: q1 = np.percentile(df[c], 25) q3 = np.percentile(df[c], 75) IQR = q3 - q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < q1 - outlier_step) | (df[c] > q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers train_df.loc[detect_outliers(train_df, ['Age', 'Parch', 'SibSp', 'Fare'])] train_df = train_df.drop(detect_outliers(train_df, ['Age', 'Parch', 'SibSp', 'Fare']), axis=0).reset_index(drop=True) train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum() list1 = ['SibSp', 'Age', 'Fare', 'Parch', 'Survived'] sns.heatmap(train_df[list1].corr(), annot=True, fmt='.2f')
code
33096987/cell_21
[ "text_plain_output_5.png", "text_plain_output_4.png", "image_output_5.png", "text_plain_output_6.png", "text_plain_output_3.png", "image_output_4.png", "image_output_6.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test_df['PassengerId'] train_df.columns train_df[['Sex', 'Survived']].groupby(['Sex'], as_index=False).mean().sort_values(by='Survived', ascending=False)
code
33096987/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test_df['PassengerId'] train_df.columns train_df.info()
code
33096987/cell_34
[ "text_html_output_1.png" ]
from collections import Counter import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test_df['PassengerId'] train_df.columns def detect_outliers(df, features): outlier_indices = [] for c in features: q1 = np.percentile(df[c], 25) q3 = np.percentile(df[c], 75) IQR = q3 - q1 outlier_step = IQR * 1.5 outlier_list_col = df[(df[c] < q1 - outlier_step) | (df[c] > q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((i for i, v in outlier_indices.items() if v > 2)) return multiple_outliers train_df.loc[detect_outliers(train_df, ['Age', 'Parch', 'SibSp', 'Fare'])] train_df = train_df.drop(detect_outliers(train_df, ['Age', 'Parch', 'SibSp', 'Fare']), axis=0).reset_index(drop=True) train_df_len = len(train_df) train_df = pd.concat([train_df, test_df], axis=0).reset_index(drop=True) train_df.columns[train_df.isnull().any()] train_df.isnull().sum() train_df[train_df['Embarked'].isnull()]
code
33096987/cell_23
[ "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/titanic/train.csv') test_df = pd.read_csv('/kaggle/input/titanic/test.csv') test_PassengerId = test_df['PassengerId'] train_df.columns train_df[['Parch', 'Survived']].groupby(['Parch'], as_index=False).mean().sort_values(by='Survived', ascending=False)
code