path
stringlengths 13
17
| screenshot_names
listlengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
|---|---|---|---|
130023373/cell_16
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
import pandas as pd #dataframe manipulation
train = pd.read_csv('/kaggle/input/machinehack-watermark-challenge/train.csv')
test = pd.read_csv('/kaggle/input/machinehack-watermark-challenge/test.csv')
train['Label'].value_counts()
|
code
|
130023373/cell_3
|
[
"image_output_2.png",
"image_output_1.png"
] |
!pip install pip -U -q
!pip install fastdup -q
|
code
|
130023373/cell_27
|
[
"text_plain_output_1.png"
] |
from PIL import Image, ImageDraw, ImageEnhance #for read the image
from tqdm import tqdm_notebook
import cv2 #for read the image
import os
import pandas as pd #dataframe manipulation
import seaborn as sns #for visualization
train = pd.read_csv('/kaggle/input/machinehack-watermark-challenge/train.csv')
test = pd.read_csv('/kaggle/input/machinehack-watermark-challenge/test.csv')
_=sns.countplot(x=train['Label'],order=train['Label'].value_counts().index);
_=plt.title("Target Label Distribution",fontsize=18)
def img_read(path, im, new_size=False):
img = Image.open(f'{os.path.join(path, im)}')
if new_size:
img = img.resize((224, 224))
return img
else:
return img
path="/kaggle/input/machinehack-watermark-challenge/train"
fig=plt.figure(figsize=(8, 15))
for i,label in enumerate(['Watermark','No Watermark']):
_=plt.subplot(1,2,i+1)
img=img_read(path,train[train['Label']==label]['Image'].head(1).values[0])
plt.imshow(img)
plt.axis('off')
plt.title(f"{'With Watermark' if label=='Watermark' else 'No Watermark'}")
def rgb_dist_plot(img,ax):
start=0
end=256
for _,color in enumerate(['Red','Green','Blue']):
_=sns.kdeplot(img.histogram()[start:end],label=color,color=color)
_=plt.legend();
start+=256
end+=256
for label in ['Watermark','No Watermark']:
fig, axs = plt.subplots(1, 2 ,figsize=(15,5))
img_id=train[train['Label']==label].head(1)['Image'].values[0]
img_file =Image.open(f"/kaggle/input/machinehack-watermark-challenge/train/{img_id}")
axs[0].imshow(img_file)
axs[0].axis('off')
axs[0].set_title(img_id,fontsize=18)
_=rgb_dist_plot(img_file,ax=axs[1])
axs[1].set_title("RGB Color Distribution For "+img_id,fontsize=18)
def basic_image_info(df, path):
image_name = []
img_mode = []
img_height = []
img_width = []
img_contrast = []
for file in tqdm_notebook(df['Image']):
image_name.append(file)
img = Image.open(f'{os.path.join(path, file)}')
grey_img = cv2.imread(f'{os.path.join(path, file)}', cv2.COLOR_BGR2GRAY)
img_mode.append(img.mode)
img_width.append(img.width)
img_height.append(img.height)
img_contrast.append(grey_img.std())
return pd.DataFrame({'image_name': image_name, 'img_mode': img_mode, 'img_contrast': img_contrast, 'img_width': img_width, 'img_height': img_height})
train_image_basic_info=basic_image_info(train,
"/kaggle/input/machinehack-watermark-challenge/train")
train_image_basic_info.head()
|
code
|
130023373/cell_5
|
[
"image_output_1.png"
] |
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw, ImageEnhance
import skimage.color
import skimage.util
import imagehash
import cv2
import os
import re
import itertools
import distance
import time
import warnings
warnings.filterwarnings('ignore')
from tqdm import tqdm_notebook
|
code
|
106208987/cell_4
|
[
"text_plain_output_1.png"
] |
from matplotlib import image
im = image.imread('../input/rice-image-dataset/Rice_Image_Dataset/Arborio/Arborio (10012).jpg')
im.shape
|
code
|
106208987/cell_2
|
[
"text_plain_output_1.png"
] |
import os
import numpy as np
import pandas as pd
import os
os.listdir('../input/rice-image-dataset/Rice_Image_Dataset')
|
code
|
106208987/cell_1
|
[
"text_plain_output_1.png"
] |
import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
code
|
106208987/cell_7
|
[
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] |
import numpy as np # linear algebra
import os
import numpy as np
import pandas as pd
import os
os.listdir('../input/rice-image-dataset/Rice_Image_Dataset')
def load(impath):
imgs = []
labels = []
l1 = os.listdir(impath)
for i in l1:
if i[-1] == 't':
continue
l2 = os.listdir(impath + '/' + i)
count = 0
for j in l2:
img = Image.open(impath + '/' + i + '/' + j)
img = img.resize(size=(28, 28))
img = img.convert('L')
imgs.append(np.array(img))
labels.append(i)
count += 1
if count >= 1000:
break
return (np.array(imgs), labels)
arr, l = load('../input/rice-image-dataset/Rice_Image_Dataset')
l
|
code
|
106208987/cell_5
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
from matplotlib import image
import matplotlib.pyplot as plt
im = image.imread('../input/rice-image-dataset/Rice_Image_Dataset/Arborio/Arborio (10012).jpg')
im.shape
plt.imshow(im)
|
code
|
122244126/cell_13
|
[
"text_html_output_1.png",
"text_plain_output_1.png"
] |
import numpy as np
import pandas as pd
import warnings
import gc
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
import time
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from sklearn.svm import SVR as skSVR
from cuml.svm import SVR as cuSVR
def smape(y_true, y_pred, agg=True):
smap = np.zeros(len(y_true))
num = np.abs(y_true - y_pred)
dem = (np.abs(y_true) + np.abs(y_pred)) / 2
pos_ind = (y_true != 0) | (y_pred != 0)
smap[pos_ind] = num[pos_ind] / dem[pos_ind]
if agg == True:
return 100 * np.mean(smap)
else:
return 100 * smap
BASE = '../input/godaddy-microbusiness-density-forecasting/'
VERSION = 29
COLS = ['GEO_ID', 'NAME', 'S0101_C01_026E']
df2017 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2017.S0101-Data.csv', usecols=COLS)
df2017 = df2017.iloc[1:]
df2017['S0101_C01_026E'] = df2017['S0101_C01_026E'].astype('int')
df2018 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2018.S0101-Data.csv', usecols=COLS)
df2018 = df2018.iloc[1:]
df2018['S0101_C01_026E'] = df2018['S0101_C01_026E'].astype('int')
df2019 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2019.S0101-Data.csv', usecols=COLS)
df2019 = df2019.iloc[1:]
df2019['S0101_C01_026E'] = df2019['S0101_C01_026E'].astype('int')
df2020 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2020.S0101-Data.csv', usecols=COLS)
df2020 = df2020.iloc[1:]
df2020['S0101_C01_026E'] = df2020['S0101_C01_026E'].astype('int')
df2021 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2021.S0101-Data.csv', usecols=COLS)
df2021 = df2021.iloc[1:]
df2021['S0101_C01_026E'] = df2021['S0101_C01_026E'].astype('int')
df2017['cfips'] = df2017.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2018['cfips'] = df2018.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2019['cfips'] = df2019.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2020['cfips'] = df2020.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2021['cfips'] = df2021.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2017['year'] = 2017 + 2
df2018['year'] = 2018 + 2
df2019['year'] = 2019 + 2
df2020['year'] = 2020 + 2
df2021['year'] = 2021 + 2
K = pd.concat((df2017, df2018, df2019, df2020, df2021))
del K['GEO_ID'], K['NAME']
K = K.rename(columns={'S0101_C01_026E': 'K'})
K
train = pd.read_csv(BASE + 'train.csv')
reaveal_test = pd.read_csv(BASE + 'revealed_test.csv')
train = pd.concat([train, reaveal_test]).sort_values(by=['cfips', 'first_day_of_month']).reset_index(drop=True)
test = pd.read_csv(BASE + 'test.csv')
drop_index = (test.first_day_of_month == '2022-11-01') | (test.first_day_of_month == '2022-12-01')
test = test.loc[~drop_index, :]
sub = pd.read_csv(BASE + 'sample_submission.csv')
train['istest'] = 0
test['istest'] = 1
raw = pd.concat((train, test)).sort_values(['cfips', 'row_id']).reset_index(drop=True)
coords = pd.read_csv('/kaggle/input/usa-counties-coordinates/cfips_location.csv')
raw = raw.merge(coords.drop('name', axis=1), on='cfips')
raw['first_day_of_month'] = pd.to_datetime(raw['first_day_of_month'])
raw['year'] = raw['first_day_of_month'].dt.year
raw['month'] = raw['first_day_of_month'].dt.month
raw['county'] = raw.groupby('cfips')['county'].ffill()
raw['state'] = raw.groupby('cfips')['state'].ffill()
raw['dcount'] = raw.groupby(['cfips'])['row_id'].cumcount()
raw['county_i'] = (raw['county'] + raw['state']).factorize()[0]
raw['state_i'] = raw['state'].factorize()[0]
raw = raw.merge(K, on=['cfips', 'year'], how='left')
raw['tmp'] = (100 * raw['active'] / raw['K']).round(decimals=4)
ind = raw['microbusiness_density'].notnull()
del raw['tmp']
for t in range(0, 7):
raw[f'lag{t}'] = raw.groupby('cfips')['active'].shift(t)
raw[f'lag{t}'] = raw.groupby('cfips')[f'lag{t}'].ffill()
ind = raw.active.notnull() & (raw.dcount >= 29)
for lag in range(1, 7):
print(lag, smape(raw.loc[ind, 'active'], raw.loc[ind, f'lag{lag}']))
|
code
|
122244126/cell_9
|
[
"text_plain_output_1.png"
] |
import numpy as np
import pandas as pd
import warnings
import gc
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
import time
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from sklearn.svm import SVR as skSVR
from cuml.svm import SVR as cuSVR
def smape(y_true, y_pred, agg=True):
smap = np.zeros(len(y_true))
num = np.abs(y_true - y_pred)
dem = (np.abs(y_true) + np.abs(y_pred)) / 2
pos_ind = (y_true != 0) | (y_pred != 0)
smap[pos_ind] = num[pos_ind] / dem[pos_ind]
if agg == True:
return 100 * np.mean(smap)
else:
return 100 * smap
BASE = '../input/godaddy-microbusiness-density-forecasting/'
VERSION = 29
COLS = ['GEO_ID', 'NAME', 'S0101_C01_026E']
df2017 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2017.S0101-Data.csv', usecols=COLS)
df2017 = df2017.iloc[1:]
df2017['S0101_C01_026E'] = df2017['S0101_C01_026E'].astype('int')
df2018 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2018.S0101-Data.csv', usecols=COLS)
df2018 = df2018.iloc[1:]
df2018['S0101_C01_026E'] = df2018['S0101_C01_026E'].astype('int')
df2019 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2019.S0101-Data.csv', usecols=COLS)
df2019 = df2019.iloc[1:]
df2019['S0101_C01_026E'] = df2019['S0101_C01_026E'].astype('int')
df2020 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2020.S0101-Data.csv', usecols=COLS)
df2020 = df2020.iloc[1:]
df2020['S0101_C01_026E'] = df2020['S0101_C01_026E'].astype('int')
df2021 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2021.S0101-Data.csv', usecols=COLS)
df2021 = df2021.iloc[1:]
df2021['S0101_C01_026E'] = df2021['S0101_C01_026E'].astype('int')
df2017['cfips'] = df2017.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2018['cfips'] = df2018.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2019['cfips'] = df2019.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2020['cfips'] = df2020.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2021['cfips'] = df2021.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2017['year'] = 2017 + 2
df2018['year'] = 2018 + 2
df2019['year'] = 2019 + 2
df2020['year'] = 2020 + 2
df2021['year'] = 2021 + 2
K = pd.concat((df2017, df2018, df2019, df2020, df2021))
del K['GEO_ID'], K['NAME']
K = K.rename(columns={'S0101_C01_026E': 'K'})
K
train = pd.read_csv(BASE + 'train.csv')
reaveal_test = pd.read_csv(BASE + 'revealed_test.csv')
train = pd.concat([train, reaveal_test]).sort_values(by=['cfips', 'first_day_of_month']).reset_index(drop=True)
test = pd.read_csv(BASE + 'test.csv')
drop_index = (test.first_day_of_month == '2022-11-01') | (test.first_day_of_month == '2022-12-01')
test = test.loc[~drop_index, :]
sub = pd.read_csv(BASE + 'sample_submission.csv')
train['istest'] = 0
test['istest'] = 1
raw = pd.concat((train, test)).sort_values(['cfips', 'row_id']).reset_index(drop=True)
coords = pd.read_csv('/kaggle/input/usa-counties-coordinates/cfips_location.csv')
raw = raw.merge(coords.drop('name', axis=1), on='cfips')
raw['first_day_of_month'] = pd.to_datetime(raw['first_day_of_month'])
raw['year'] = raw['first_day_of_month'].dt.year
raw['month'] = raw['first_day_of_month'].dt.month
raw['county'] = raw.groupby('cfips')['county'].ffill()
raw['state'] = raw.groupby('cfips')['state'].ffill()
raw['dcount'] = raw.groupby(['cfips'])['row_id'].cumcount()
raw['county_i'] = (raw['county'] + raw['state']).factorize()[0]
raw['state_i'] = raw['state'].factorize()[0]
raw = raw.merge(K, on=['cfips', 'year'], how='left')
raw['tmp'] = (100 * raw['active'] / raw['K']).round(decimals=4)
ind = raw['microbusiness_density'].notnull()
del raw['tmp']
raw.tail(10)
|
code
|
122244126/cell_6
|
[
"text_plain_output_1.png"
] |
import numpy as np
import pandas as pd
import warnings
import gc
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
import time
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from sklearn.svm import SVR as skSVR
from cuml.svm import SVR as cuSVR
def smape(y_true, y_pred, agg=True):
smap = np.zeros(len(y_true))
num = np.abs(y_true - y_pred)
dem = (np.abs(y_true) + np.abs(y_pred)) / 2
pos_ind = (y_true != 0) | (y_pred != 0)
smap[pos_ind] = num[pos_ind] / dem[pos_ind]
if agg == True:
return 100 * np.mean(smap)
else:
return 100 * smap
BASE = '../input/godaddy-microbusiness-density-forecasting/'
VERSION = 29
COLS = ['GEO_ID', 'NAME', 'S0101_C01_026E']
df2017 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2017.S0101-Data.csv', usecols=COLS)
df2017 = df2017.iloc[1:]
df2017['S0101_C01_026E'] = df2017['S0101_C01_026E'].astype('int')
df2018 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2018.S0101-Data.csv', usecols=COLS)
df2018 = df2018.iloc[1:]
df2018['S0101_C01_026E'] = df2018['S0101_C01_026E'].astype('int')
df2019 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2019.S0101-Data.csv', usecols=COLS)
df2019 = df2019.iloc[1:]
df2019['S0101_C01_026E'] = df2019['S0101_C01_026E'].astype('int')
df2020 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2020.S0101-Data.csv', usecols=COLS)
df2020 = df2020.iloc[1:]
df2020['S0101_C01_026E'] = df2020['S0101_C01_026E'].astype('int')
df2021 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2021.S0101-Data.csv', usecols=COLS)
df2021 = df2021.iloc[1:]
df2021['S0101_C01_026E'] = df2021['S0101_C01_026E'].astype('int')
df2017['cfips'] = df2017.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2018['cfips'] = df2018.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2019['cfips'] = df2019.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2020['cfips'] = df2020.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2021['cfips'] = df2021.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2017['year'] = 2017 + 2
df2018['year'] = 2018 + 2
df2019['year'] = 2019 + 2
df2020['year'] = 2020 + 2
df2021['year'] = 2021 + 2
K = pd.concat((df2017, df2018, df2019, df2020, df2021))
del K['GEO_ID'], K['NAME']
K = K.rename(columns={'S0101_C01_026E': 'K'})
K
train = pd.read_csv(BASE + 'train.csv')
reaveal_test = pd.read_csv(BASE + 'revealed_test.csv')
train = pd.concat([train, reaveal_test]).sort_values(by=['cfips', 'first_day_of_month']).reset_index(drop=True)
test = pd.read_csv(BASE + 'test.csv')
drop_index = (test.first_day_of_month == '2022-11-01') | (test.first_day_of_month == '2022-12-01')
test = test.loc[~drop_index, :]
sub = pd.read_csv(BASE + 'sample_submission.csv')
train['istest'] = 0
test['istest'] = 1
raw = pd.concat((train, test)).sort_values(['cfips', 'row_id']).reset_index(drop=True)
coords = pd.read_csv('/kaggle/input/usa-counties-coordinates/cfips_location.csv')
raw = raw.merge(coords.drop('name', axis=1), on='cfips')
raw['first_day_of_month'] = pd.to_datetime(raw['first_day_of_month'])
raw['year'] = raw['first_day_of_month'].dt.year
raw['month'] = raw['first_day_of_month'].dt.month
raw['county'] = raw.groupby('cfips')['county'].ffill()
raw['state'] = raw.groupby('cfips')['state'].ffill()
raw['dcount'] = raw.groupby(['cfips'])['row_id'].cumcount()
raw['county_i'] = (raw['county'] + raw['state']).factorize()[0]
raw['state_i'] = raw['state'].factorize()[0]
raw = raw.merge(K, on=['cfips', 'year'], how='left')
K = K.sort_values(['cfips', 'year']).reset_index(drop=True)
K['lag'] = K.groupby('cfips')['K'].shift(1)
K['up'] = (K['K'] - K['lag']) / K['K']
K['up'] = K['up'].fillna(0)
K['utrend'] = 1 * (K['up'] > 0.0005)
K['dtrend'] = 1 * (K['up'] < -0.0005)
K
dtu = K.groupby('cfips')['utrend', 'dtrend'].sum().reset_index()
dtu['grp'] = 1
dtu.loc[dtu['utrend'] == 4, 'grp'] = 2
dtu.loc[dtu['dtrend'] == 4, 'grp'] = 0
raw['grp'] = raw['cfips'].map(dtu.set_index('cfips')['grp'].to_dict())
raw['utrend'] = 1 * (raw['grp'] == 2).astype('float32')
raw['atrend'] = 1 * (raw['grp'] == 1).astype('float32')
raw['ntrend'] = 1 * (raw['grp'] == 0).astype('float32')
raw['grp'].value_counts()
|
code
|
122244126/cell_2
|
[
"text_plain_output_1.png"
] |
import numpy as np
import pandas as pd
import warnings
import gc
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
import time
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from sklearn.svm import SVR as skSVR
from cuml.svm import SVR as cuSVR
def smape(y_true, y_pred, agg=True):
smap = np.zeros(len(y_true))
num = np.abs(y_true - y_pred)
dem = (np.abs(y_true) + np.abs(y_pred)) / 2
pos_ind = (y_true != 0) | (y_pred != 0)
smap[pos_ind] = num[pos_ind] / dem[pos_ind]
if agg == True:
return 100 * np.mean(smap)
else:
return 100 * smap
BASE = '../input/godaddy-microbusiness-density-forecasting/'
VERSION = 29
COLS = ['GEO_ID', 'NAME', 'S0101_C01_026E']
df2017 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2017.S0101-Data.csv', usecols=COLS)
df2017 = df2017.iloc[1:]
df2017['S0101_C01_026E'] = df2017['S0101_C01_026E'].astype('int')
print(df2017.shape)
df2018 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2018.S0101-Data.csv', usecols=COLS)
df2018 = df2018.iloc[1:]
df2018['S0101_C01_026E'] = df2018['S0101_C01_026E'].astype('int')
print(df2018.shape)
df2019 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2019.S0101-Data.csv', usecols=COLS)
df2019 = df2019.iloc[1:]
df2019['S0101_C01_026E'] = df2019['S0101_C01_026E'].astype('int')
print(df2019.shape)
df2020 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2020.S0101-Data.csv', usecols=COLS)
df2020 = df2020.iloc[1:]
df2020['S0101_C01_026E'] = df2020['S0101_C01_026E'].astype('int')
print(df2020.shape)
df2021 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2021.S0101-Data.csv', usecols=COLS)
df2021 = df2021.iloc[1:]
df2021['S0101_C01_026E'] = df2021['S0101_C01_026E'].astype('int')
print(df2021.shape)
df2017['cfips'] = df2017.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2018['cfips'] = df2018.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2019['cfips'] = df2019.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2020['cfips'] = df2020.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2021['cfips'] = df2021.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2017['year'] = 2017 + 2
df2018['year'] = 2018 + 2
df2019['year'] = 2019 + 2
df2020['year'] = 2020 + 2
df2021['year'] = 2021 + 2
K = pd.concat((df2017, df2018, df2019, df2020, df2021))
del K['GEO_ID'], K['NAME']
K = K.rename(columns={'S0101_C01_026E': 'K'})
K
|
code
|
122244126/cell_11
|
[
"text_html_output_1.png",
"text_plain_output_1.png"
] |
import numpy as np
import pandas as pd
import warnings
import gc
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
import time
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from sklearn.svm import SVR as skSVR
from cuml.svm import SVR as cuSVR
def smape(y_true, y_pred, agg=True):
smap = np.zeros(len(y_true))
num = np.abs(y_true - y_pred)
dem = (np.abs(y_true) + np.abs(y_pred)) / 2
pos_ind = (y_true != 0) | (y_pred != 0)
smap[pos_ind] = num[pos_ind] / dem[pos_ind]
if agg == True:
return 100 * np.mean(smap)
else:
return 100 * smap
BASE = '../input/godaddy-microbusiness-density-forecasting/'
VERSION = 29
COLS = ['GEO_ID', 'NAME', 'S0101_C01_026E']
df2017 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2017.S0101-Data.csv', usecols=COLS)
df2017 = df2017.iloc[1:]
df2017['S0101_C01_026E'] = df2017['S0101_C01_026E'].astype('int')
df2018 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2018.S0101-Data.csv', usecols=COLS)
df2018 = df2018.iloc[1:]
df2018['S0101_C01_026E'] = df2018['S0101_C01_026E'].astype('int')
df2019 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2019.S0101-Data.csv', usecols=COLS)
df2019 = df2019.iloc[1:]
df2019['S0101_C01_026E'] = df2019['S0101_C01_026E'].astype('int')
df2020 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2020.S0101-Data.csv', usecols=COLS)
df2020 = df2020.iloc[1:]
df2020['S0101_C01_026E'] = df2020['S0101_C01_026E'].astype('int')
df2021 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2021.S0101-Data.csv', usecols=COLS)
df2021 = df2021.iloc[1:]
df2021['S0101_C01_026E'] = df2021['S0101_C01_026E'].astype('int')
df2017['cfips'] = df2017.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2018['cfips'] = df2018.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2019['cfips'] = df2019.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2020['cfips'] = df2020.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2021['cfips'] = df2021.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2017['year'] = 2017 + 2
df2018['year'] = 2018 + 2
df2019['year'] = 2019 + 2
df2020['year'] = 2020 + 2
df2021['year'] = 2021 + 2
K = pd.concat((df2017, df2018, df2019, df2020, df2021))
del K['GEO_ID'], K['NAME']
K = K.rename(columns={'S0101_C01_026E': 'K'})
K
train = pd.read_csv(BASE + 'train.csv')
reaveal_test = pd.read_csv(BASE + 'revealed_test.csv')
train = pd.concat([train, reaveal_test]).sort_values(by=['cfips', 'first_day_of_month']).reset_index(drop=True)
test = pd.read_csv(BASE + 'test.csv')
drop_index = (test.first_day_of_month == '2022-11-01') | (test.first_day_of_month == '2022-12-01')
test = test.loc[~drop_index, :]
sub = pd.read_csv(BASE + 'sample_submission.csv')
train['istest'] = 0
test['istest'] = 1
raw = pd.concat((train, test)).sort_values(['cfips', 'row_id']).reset_index(drop=True)
coords = pd.read_csv('/kaggle/input/usa-counties-coordinates/cfips_location.csv')
raw = raw.merge(coords.drop('name', axis=1), on='cfips')
raw['first_day_of_month'] = pd.to_datetime(raw['first_day_of_month'])
raw['year'] = raw['first_day_of_month'].dt.year
raw['month'] = raw['first_day_of_month'].dt.month
raw['county'] = raw.groupby('cfips')['county'].ffill()
raw['state'] = raw.groupby('cfips')['state'].ffill()
raw['dcount'] = raw.groupby(['cfips'])['row_id'].cumcount()
raw['county_i'] = (raw['county'] + raw['state']).factorize()[0]
raw['state_i'] = raw['state'].factorize()[0]
raw = raw.merge(K, on=['cfips', 'year'], how='left')
raw['tmp'] = (100 * raw['active'] / raw['K']).round(decimals=4)
ind = raw['microbusiness_density'].notnull()
del raw['tmp']
for t in range(0, 7):
raw[f'lag{t}'] = raw.groupby('cfips')['active'].shift(t)
raw[f'lag{t}'] = raw.groupby('cfips')[f'lag{t}'].ffill()
raw.tail(10)
|
code
|
122244126/cell_1
|
[
"text_plain_output_1.png"
] |
import numpy as np
import pandas as pd
import warnings
import gc
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
import time
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
print(pd.__version__)
from sklearn.svm import SVR as skSVR
from cuml.svm import SVR as cuSVR
def smape(y_true, y_pred, agg=True):
smap = np.zeros(len(y_true))
num = np.abs(y_true - y_pred)
dem = (np.abs(y_true) + np.abs(y_pred)) / 2
pos_ind = (y_true != 0) | (y_pred != 0)
smap[pos_ind] = num[pos_ind] / dem[pos_ind]
if agg == True:
return 100 * np.mean(smap)
else:
return 100 * smap
BASE = '../input/godaddy-microbusiness-density-forecasting/'
VERSION = 29
|
code
|
122244126/cell_8
|
[
"text_plain_output_1.png"
] |
import numpy as np
import pandas as pd
import warnings
import gc
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
import time
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from sklearn.svm import SVR as skSVR
from cuml.svm import SVR as cuSVR
def smape(y_true, y_pred, agg=True):
smap = np.zeros(len(y_true))
num = np.abs(y_true - y_pred)
dem = (np.abs(y_true) + np.abs(y_pred)) / 2
pos_ind = (y_true != 0) | (y_pred != 0)
smap[pos_ind] = num[pos_ind] / dem[pos_ind]
if agg == True:
return 100 * np.mean(smap)
else:
return 100 * smap
BASE = '../input/godaddy-microbusiness-density-forecasting/'
VERSION = 29
COLS = ['GEO_ID', 'NAME', 'S0101_C01_026E']
df2017 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2017.S0101-Data.csv', usecols=COLS)
df2017 = df2017.iloc[1:]
df2017['S0101_C01_026E'] = df2017['S0101_C01_026E'].astype('int')
df2018 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2018.S0101-Data.csv', usecols=COLS)
df2018 = df2018.iloc[1:]
df2018['S0101_C01_026E'] = df2018['S0101_C01_026E'].astype('int')
df2019 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2019.S0101-Data.csv', usecols=COLS)
df2019 = df2019.iloc[1:]
df2019['S0101_C01_026E'] = df2019['S0101_C01_026E'].astype('int')
df2020 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2020.S0101-Data.csv', usecols=COLS)
df2020 = df2020.iloc[1:]
df2020['S0101_C01_026E'] = df2020['S0101_C01_026E'].astype('int')
df2021 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2021.S0101-Data.csv', usecols=COLS)
df2021 = df2021.iloc[1:]
df2021['S0101_C01_026E'] = df2021['S0101_C01_026E'].astype('int')
df2017['cfips'] = df2017.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2018['cfips'] = df2018.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2019['cfips'] = df2019.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2020['cfips'] = df2020.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2021['cfips'] = df2021.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2017['year'] = 2017 + 2
df2018['year'] = 2018 + 2
df2019['year'] = 2019 + 2
df2020['year'] = 2020 + 2
df2021['year'] = 2021 + 2
K = pd.concat((df2017, df2018, df2019, df2020, df2021))
del K['GEO_ID'], K['NAME']
K = K.rename(columns={'S0101_C01_026E': 'K'})
K
train = pd.read_csv(BASE + 'train.csv')
reaveal_test = pd.read_csv(BASE + 'revealed_test.csv')
train = pd.concat([train, reaveal_test]).sort_values(by=['cfips', 'first_day_of_month']).reset_index(drop=True)
test = pd.read_csv(BASE + 'test.csv')
drop_index = (test.first_day_of_month == '2022-11-01') | (test.first_day_of_month == '2022-12-01')
test = test.loc[~drop_index, :]
sub = pd.read_csv(BASE + 'sample_submission.csv')
train['istest'] = 0
test['istest'] = 1
raw = pd.concat((train, test)).sort_values(['cfips', 'row_id']).reset_index(drop=True)
coords = pd.read_csv('/kaggle/input/usa-counties-coordinates/cfips_location.csv')
raw = raw.merge(coords.drop('name', axis=1), on='cfips')
raw['first_day_of_month'] = pd.to_datetime(raw['first_day_of_month'])
raw['year'] = raw['first_day_of_month'].dt.year
raw['month'] = raw['first_day_of_month'].dt.month
raw['county'] = raw.groupby('cfips')['county'].ffill()
raw['state'] = raw.groupby('cfips')['state'].ffill()
raw['dcount'] = raw.groupby(['cfips'])['row_id'].cumcount()
raw['county_i'] = (raw['county'] + raw['state']).factorize()[0]
raw['state_i'] = raw['state'].factorize()[0]
raw = raw.merge(K, on=['cfips', 'year'], how='left')
raw['tmp'] = (100 * raw['active'] / raw['K']).round(decimals=4)
ind = raw['microbusiness_density'].notnull()
print((raw.loc[ind, 'microbusiness_density'] - raw.loc[ind, 'tmp']).abs().mean())
del raw['tmp']
|
code
|
122244126/cell_15
|
[
"text_html_output_1.png"
] |
BACK = 36
features = ['utrend', 'atrend', 'ntrend', 'lng', 'lat', 'rate0', 'rate1', 'rate2', 'rate3', 'rate4', 'rate_sum', 'last_rate1', 'last_rate2', 'last_rate3', 'last_rate4']
CLIPS = {1: 0.00225, 2: 0.005, 3: 0.011, 4: 0.015, 5: 0.024, 6: 0.032}
for LEAD in range(1, 7):
print(f'Forecast month ahead {LEAD}...')
raw[f'pred{LEAD}'] = raw[f'lag{LEAD}'].copy()
raw[f'k{LEAD}'] = 0.0
for fold in range(29, 40 + LEAD + 1):
ind = raw.dcount <= fold - LEAD
tmp = raw.loc[ind].copy().reset_index(drop=True)
tmp['target'] = tmp.groupby('cfips')['active'].shift(-LEAD)
tmp['target'] = tmp['target'] / tmp['active'] - 1
tmp['target'] = tmp['target'].clip(-1.0, 1.0)
tmp[f'last_active'] = tmp.groupby('cfips')['active'].transform('last')
for t in range(5):
tmp[f'lag{t}'] = tmp.groupby('cfips')['active'].shift(t)
tmp[f'lag{t}'] = tmp.groupby('cfips')[f'lag{t}'].ffill()
tmp[f'rate{t}'] = tmp[f'active'] / tmp[f'lag{t}'] - 1
tmp[f'rate{t}'] = tmp[f'rate{t}'].clip(-1.0, 1.0)
tmp[f'last_rate{t}'] = (tmp[f'last_active'] - tmp[f'lag{t}']) / tmp[f'last_active']
tmp[f'last_rate{t}'] = tmp[f'last_rate{t}'].clip(-1.0, 1.0)
ind = tmp.dcount >= 5
tmp.loc[ind, f'rate{t}'] = tmp.loc[ind, f'rate{t}'].fillna(0)
tmp.loc[ind, f'last_rate{t}'] = tmp.loc[ind, f'last_rate{t}'].fillna(0)
tmp[f'rate_sum'] = (4 * tmp['rate0'] + 3 * tmp['rate1'] + 2 * tmp['rate2'] + 1 * tmp['rate3']) / 10
model = skSVR(C=0.5, kernel='rbf', degree=3, gamma='scale', coef0=0.0, tol=1e-06, epsilon=0.1, cache_size=200, verbose=False, max_iter=-1)
ind = tmp.target.notnull() & (tmp.active != 0) & (tmp.dcount >= fold - LEAD - BACK) & (tmp[features].isnull().sum(1) == 0) & (tmp[f'lag{LEAD}'] > 0)
model.fit(tmp.loc[ind, features], tmp.loc[ind, 'target'])
ind = tmp.dcount == fold - LEAD
tmp.loc[ind, f'k{LEAD}'] = model.predict(tmp.loc[ind, features])
ypred = tmp.loc[ind, [f'k{LEAD}', 'cfips']].set_index('cfips')[f'k{LEAD}'].to_dict()
del model
gc.collect()
ind = raw.dcount == fold
raw.loc[ind, f'k{LEAD}'] = raw.loc[ind, 'cfips'].map(ypred).fillna(0)
raw[f'pred{LEAD}'] = (raw[f'lag{LEAD}'] * (raw[f'k{LEAD}'].fillna(0) + 1).clip(1, 1 + CLIPS[LEAD])).round()
ind = raw.dcount == fold
smape0 = smape(raw.loc[ind, 'active'], raw.loc[ind, f'lag{LEAD}'])
smape1 = smape(raw.loc[ind, 'active'], raw.loc[ind, f'pred{LEAD}'])
print(f'month {fold}, smape gain: {smape0 - smape1:0.4f}')
raw[f'pred{LEAD}'] = raw[f'lag{LEAD}'] * (raw[f'k{LEAD}'].fillna(0) + 1).clip(1, 1 + CLIPS[LEAD])
raw[f'pred{LEAD}'] = raw[f'pred{LEAD}'].round()
ind = (raw.dcount >= 29) & (raw.dcount <= 40)
smape0 = smape(raw.loc[ind, 'active'], raw.loc[ind, f'lag{LEAD}'])
smape1 = smape(raw.loc[ind, 'active'], raw.loc[ind, f'pred{LEAD}'])
print(f'Forecast {LEAD}, total smape gain: {smape0 - smape1:0.4f}')
print()
|
code
|
122244126/cell_3
|
[
"text_html_output_1.png"
] |
import numpy as np
import pandas as pd
import warnings
import gc
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
import time
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from sklearn.svm import SVR as skSVR
from cuml.svm import SVR as cuSVR
def smape(y_true, y_pred, agg=True):
smap = np.zeros(len(y_true))
num = np.abs(y_true - y_pred)
dem = (np.abs(y_true) + np.abs(y_pred)) / 2
pos_ind = (y_true != 0) | (y_pred != 0)
smap[pos_ind] = num[pos_ind] / dem[pos_ind]
if agg == True:
return 100 * np.mean(smap)
else:
return 100 * smap
BASE = '../input/godaddy-microbusiness-density-forecasting/'
VERSION = 29
COLS = ['GEO_ID', 'NAME', 'S0101_C01_026E']
df2017 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2017.S0101-Data.csv', usecols=COLS)
df2017 = df2017.iloc[1:]
df2017['S0101_C01_026E'] = df2017['S0101_C01_026E'].astype('int')
df2018 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2018.S0101-Data.csv', usecols=COLS)
df2018 = df2018.iloc[1:]
df2018['S0101_C01_026E'] = df2018['S0101_C01_026E'].astype('int')
df2019 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2019.S0101-Data.csv', usecols=COLS)
df2019 = df2019.iloc[1:]
df2019['S0101_C01_026E'] = df2019['S0101_C01_026E'].astype('int')
df2020 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2020.S0101-Data.csv', usecols=COLS)
df2020 = df2020.iloc[1:]
df2020['S0101_C01_026E'] = df2020['S0101_C01_026E'].astype('int')
df2021 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2021.S0101-Data.csv', usecols=COLS)
df2021 = df2021.iloc[1:]
df2021['S0101_C01_026E'] = df2021['S0101_C01_026E'].astype('int')
df2017['cfips'] = df2017.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2018['cfips'] = df2018.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2019['cfips'] = df2019.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2020['cfips'] = df2020.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2021['cfips'] = df2021.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2017['year'] = 2017 + 2
df2018['year'] = 2018 + 2
df2019['year'] = 2019 + 2
df2020['year'] = 2020 + 2
df2021['year'] = 2021 + 2
K = pd.concat((df2017, df2018, df2019, df2020, df2021))
del K['GEO_ID'], K['NAME']
K = K.rename(columns={'S0101_C01_026E': 'K'})
K
train = pd.read_csv(BASE + 'train.csv')
reaveal_test = pd.read_csv(BASE + 'revealed_test.csv')
train = pd.concat([train, reaveal_test]).sort_values(by=['cfips', 'first_day_of_month']).reset_index(drop=True)
test = pd.read_csv(BASE + 'test.csv')
drop_index = (test.first_day_of_month == '2022-11-01') | (test.first_day_of_month == '2022-12-01')
test = test.loc[~drop_index, :]
sub = pd.read_csv(BASE + 'sample_submission.csv')
print(train.shape, test.shape, sub.shape)
train['istest'] = 0
test['istest'] = 1
raw = pd.concat((train, test)).sort_values(['cfips', 'row_id']).reset_index(drop=True)
coords = pd.read_csv('/kaggle/input/usa-counties-coordinates/cfips_location.csv')
raw = raw.merge(coords.drop('name', axis=1), on='cfips')
raw['first_day_of_month'] = pd.to_datetime(raw['first_day_of_month'])
raw['year'] = raw['first_day_of_month'].dt.year
raw['month'] = raw['first_day_of_month'].dt.month
raw['county'] = raw.groupby('cfips')['county'].ffill()
raw['state'] = raw.groupby('cfips')['state'].ffill()
raw['dcount'] = raw.groupby(['cfips'])['row_id'].cumcount()
raw['county_i'] = (raw['county'] + raw['state']).factorize()[0]
raw['state_i'] = raw['state'].factorize()[0]
raw = raw.merge(K, on=['cfips', 'year'], how='left')
raw.tail(20)
|
code
|
122244126/cell_5
|
[
"text_html_output_1.png"
] |
import numpy as np
import pandas as pd
import warnings
import gc
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
import time
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from sklearn.svm import SVR as skSVR
from cuml.svm import SVR as cuSVR
def smape(y_true, y_pred, agg=True):
smap = np.zeros(len(y_true))
num = np.abs(y_true - y_pred)
dem = (np.abs(y_true) + np.abs(y_pred)) / 2
pos_ind = (y_true != 0) | (y_pred != 0)
smap[pos_ind] = num[pos_ind] / dem[pos_ind]
if agg == True:
return 100 * np.mean(smap)
else:
return 100 * smap
BASE = '../input/godaddy-microbusiness-density-forecasting/'
VERSION = 29
COLS = ['GEO_ID', 'NAME', 'S0101_C01_026E']
df2017 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2017.S0101-Data.csv', usecols=COLS)
df2017 = df2017.iloc[1:]
df2017['S0101_C01_026E'] = df2017['S0101_C01_026E'].astype('int')
df2018 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2018.S0101-Data.csv', usecols=COLS)
df2018 = df2018.iloc[1:]
df2018['S0101_C01_026E'] = df2018['S0101_C01_026E'].astype('int')
df2019 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2019.S0101-Data.csv', usecols=COLS)
df2019 = df2019.iloc[1:]
df2019['S0101_C01_026E'] = df2019['S0101_C01_026E'].astype('int')
df2020 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2020.S0101-Data.csv', usecols=COLS)
df2020 = df2020.iloc[1:]
df2020['S0101_C01_026E'] = df2020['S0101_C01_026E'].astype('int')
df2021 = pd.read_csv('../input/census-data-for-godaddy/ACSST5Y2021.S0101-Data.csv', usecols=COLS)
df2021 = df2021.iloc[1:]
df2021['S0101_C01_026E'] = df2021['S0101_C01_026E'].astype('int')
df2017['cfips'] = df2017.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2018['cfips'] = df2018.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2019['cfips'] = df2019.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2020['cfips'] = df2020.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2021['cfips'] = df2021.GEO_ID.apply(lambda x: int(x.split('US')[-1]))
df2017['year'] = 2017 + 2
df2018['year'] = 2018 + 2
df2019['year'] = 2019 + 2
df2020['year'] = 2020 + 2
df2021['year'] = 2021 + 2
K = pd.concat((df2017, df2018, df2019, df2020, df2021))
del K['GEO_ID'], K['NAME']
K = K.rename(columns={'S0101_C01_026E': 'K'})
K
K = K.sort_values(['cfips', 'year']).reset_index(drop=True)
K['lag'] = K.groupby('cfips')['K'].shift(1)
K['up'] = (K['K'] - K['lag']) / K['K']
K['up'] = K['up'].fillna(0)
K['utrend'] = 1 * (K['up'] > 0.0005)
K['dtrend'] = 1 * (K['up'] < -0.0005)
K
|
code
|
34120381/cell_2
|
[
"text_plain_output_1.png"
] |
import os
import os
for dirname, _, filenames in os.walk('/kaggle/'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
code
|
34120381/cell_1
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
import pandas as pd
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
from gensim.models import Word2Vec
from keras.models import Sequential
from keras.callbacks import EarlyStopping
from keras import optimizers
from keras.layers import Embedding, Conv1D, MaxPooling1D, Dense
from keras.layers import Dense, Activation, Dropout, Flatten, Input
from keras import regularizers
|
code
|
34120381/cell_3
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
import pandas as pd
datasets_dir = ''
vnrows = None
datasets_dir = '/kaggle/input/fakenews-preprocessed-dataset/'
df = pd.read_csv(datasets_dir + 'fakenews_preprocessed_35k.csv', nrows=vnrows, encoding='utf-8')
|
code
|
105213956/cell_21
|
[
"text_plain_output_1.png"
] |
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
from sklearn.manifold import TSNE
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import nltk
import numpy as np
import pandas as pd # For data handling
import seaborn as sns
import re
import pandas as pd
from time import time
from collections import defaultdict
from bs4 import BeautifulSoup
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
from gensim.models import Word2Vec, Phrases
from nltk.tokenize import word_tokenize
import nltk
import seaborn as sns
import numpy as np
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import gensim
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('omw-1.4')
import spacy
df = pd.read_csv('../input/google-war-news/war-news.csv', encoding='latin1')
df.isnull().sum()
df.dropna(subset=['Summary'], inplace=True)
from nltk.stem.snowball import SnowballStemmer
def stemm_text(text):
stemmer = SnowballStemmer('english')
return ' '.join([stemmer.stem(w) for w in text.split(' ')])
w_tokenizer = nltk.tokenize.WhitespaceTokenizer()
lemmatizer = nltk.stem.WordNetLemmatizer()
def lemmatize_text(text):
lemmatizer = WordNetLemmatizer()
return ' '.join([lemmatizer.lemmatize(w) for w in text.split(' ')])
T = df['Summary'].str.split(' \n\n---\n\n').str[0]
T = T.str.replace('-', ' ').str.replace('[^\\w\\s]', '').str.replace('\n', ' ').str.lower()
stop = stopwords.words('english')
T = T.apply(lambda x: ' '.join((x for x in x.split() if not x.isdigit())))
T = T.apply(lambda words: ' '.join((word.lower() for word in words.split() if word not in stop)))
T1 = T.values.tolist()
def Sentence2Vec(T, embedding_dim=100, max_length=300):
glove_path = '../input/glove6b100dtxt/glove.6B.100d.txt'
path = glove_path
tokenizer = Tokenizer()
text = T
tokenizer.fit_on_texts(text)
word_index = tokenizer.word_index
vocab_size = 5000
trunc_type = 'post'
oov_tok = '<OOV>'
padding_type = 'post'
text_sequence = tokenizer.texts_to_sequences(text)
text_sequence = pad_sequences(text_sequence, maxlen=max_length, truncating=trunc_type, padding=padding_type)
size_of_vocabulary = len(tokenizer.word_index) + 1
embeddings_index = dict()
f = open(path)
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
embedding_matrix = np.zeros((size_of_vocabulary, embedding_dim))
for word, i in tokenizer.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
text_shape = text_sequence.shape
X_train = np.empty((text_shape[0], text_shape[1], embedding_matrix.shape[1]))
for i in range(text_sequence.shape[0]):
for j in range(text_sequence.shape[1]):
X_train[i, j, :] = embedding_matrix[text_sequence[i][j]]
return (X_train, embeddings_index, word_index)
L = list(word_index.keys())
emb_matrix = np.zeros((len(L) + 1, 100))
for word, index in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
emb_matrix[index, :] = embedding_vector
emb_matrix.shape
tsne = TSNE(perplexity=40, n_components=2, init='pca', n_iter=2500, random_state=123)
z = tsne.fit_transform(emb_matrix)
df3 = pd.DataFrame()
df3['comp-1'] = z[:, 0]
df3['comp-2'] = z[:, 1]
plt.figure(figsize=(12, 6))
sns.scatterplot(x='comp-1', y='comp-2', data=df3).set(title='Glove T-SNE projection')
|
code
|
105213956/cell_9
|
[
"image_output_1.png"
] |
from nltk.corpus import stopwords
import pandas as pd # For data handling
df = pd.read_csv('../input/google-war-news/war-news.csv', encoding='latin1')
df.isnull().sum()
df.dropna(subset=['Summary'], inplace=True)
T = df['Summary'].str.split(' \n\n---\n\n').str[0]
T = T.str.replace('-', ' ').str.replace('[^\\w\\s]', '').str.replace('\n', ' ').str.lower()
stop = stopwords.words('english')
T = T.apply(lambda x: ' '.join((x for x in x.split() if not x.isdigit())))
T = T.apply(lambda words: ' '.join((word.lower() for word in words.split() if word not in stop)))
|
code
|
105213956/cell_25
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
from gensim.models import word2vec
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import pandas as pd # For data handling
df = pd.read_csv('../input/google-war-news/war-news.csv', encoding='latin1')
df.isnull().sum()
df.dropna(subset=['Summary'], inplace=True)
T = df['Summary'].str.split(' \n\n---\n\n').str[0]
T = T.str.replace('-', ' ').str.replace('[^\\w\\s]', '').str.replace('\n', ' ').str.lower()
stop = stopwords.words('english')
T = T.apply(lambda x: ' '.join((x for x in x.split() if not x.isdigit())))
T = T.apply(lambda words: ' '.join((word.lower() for word in words.split() if word not in stop)))
T1 = T.values.tolist()
T1 = T.apply(word_tokenize)
num_features = 100
min_word_count = 20
num_workers = 4
context = 20
downsampling = 0.001
from gensim.models import word2vec
print('Training model....')
model = word2vec.Word2Vec(workers=num_workers, vector_size=num_features, min_count=min_word_count, window=context, sample=downsampling)
model.build_vocab(T1, progress_per=1000)
model.train(T1, total_examples=model.corpus_count, epochs=model.epochs)
|
code
|
105213956/cell_30
|
[
"text_plain_output_1.png"
] |
from gensim.models import word2vec
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import pandas as pd # For data handling
df = pd.read_csv('../input/google-war-news/war-news.csv', encoding='latin1')
df.isnull().sum()
df.dropna(subset=['Summary'], inplace=True)
T = df['Summary'].str.split(' \n\n---\n\n').str[0]
T = T.str.replace('-', ' ').str.replace('[^\\w\\s]', '').str.replace('\n', ' ').str.lower()
stop = stopwords.words('english')
T = T.apply(lambda x: ' '.join((x for x in x.split() if not x.isdigit())))
T = T.apply(lambda words: ' '.join((word.lower() for word in words.split() if word not in stop)))
T1 = T.values.tolist()
T1 = T.apply(word_tokenize)
num_features = 100
min_word_count = 20
num_workers = 4
context = 20
downsampling = 0.001
from gensim.models import word2vec
model = word2vec.Word2Vec(workers=num_workers, vector_size=num_features, min_count=min_word_count, window=context, sample=downsampling)
model.build_vocab(T1, progress_per=1000)
model.train(T1, total_examples=model.corpus_count, epochs=model.epochs)
labels = model.wv.index_to_key
model.wv['ukraine']
X = model.wv[model.wv.index_to_key]
X.shape
|
code
|
105213956/cell_33
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
from gensim.models import word2vec
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
from nltk.tokenize import word_tokenize
from sklearn.manifold import TSNE
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import nltk
import numpy as np
import pandas as pd # For data handling
import seaborn as sns
import re
import pandas as pd
from time import time
from collections import defaultdict
from bs4 import BeautifulSoup
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
from gensim.models import Word2Vec, Phrases
from nltk.tokenize import word_tokenize
import nltk
import seaborn as sns
import numpy as np
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import gensim
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('omw-1.4')
import spacy
df = pd.read_csv('../input/google-war-news/war-news.csv', encoding='latin1')
df.isnull().sum()
df.dropna(subset=['Summary'], inplace=True)
from nltk.stem.snowball import SnowballStemmer
def stemm_text(text):
stemmer = SnowballStemmer('english')
return ' '.join([stemmer.stem(w) for w in text.split(' ')])
w_tokenizer = nltk.tokenize.WhitespaceTokenizer()
lemmatizer = nltk.stem.WordNetLemmatizer()
def lemmatize_text(text):
lemmatizer = WordNetLemmatizer()
return ' '.join([lemmatizer.lemmatize(w) for w in text.split(' ')])
T = df['Summary'].str.split(' \n\n---\n\n').str[0]
T = T.str.replace('-', ' ').str.replace('[^\\w\\s]', '').str.replace('\n', ' ').str.lower()
stop = stopwords.words('english')
T = T.apply(lambda x: ' '.join((x for x in x.split() if not x.isdigit())))
T = T.apply(lambda words: ' '.join((word.lower() for word in words.split() if word not in stop)))
T1 = T.values.tolist()
def Sentence2Vec(T, embedding_dim=100, max_length=300):
glove_path = '../input/glove6b100dtxt/glove.6B.100d.txt'
path = glove_path
tokenizer = Tokenizer()
text = T
tokenizer.fit_on_texts(text)
word_index = tokenizer.word_index
vocab_size = 5000
trunc_type = 'post'
oov_tok = '<OOV>'
padding_type = 'post'
text_sequence = tokenizer.texts_to_sequences(text)
text_sequence = pad_sequences(text_sequence, maxlen=max_length, truncating=trunc_type, padding=padding_type)
size_of_vocabulary = len(tokenizer.word_index) + 1
embeddings_index = dict()
f = open(path)
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
embedding_matrix = np.zeros((size_of_vocabulary, embedding_dim))
for word, i in tokenizer.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
text_shape = text_sequence.shape
X_train = np.empty((text_shape[0], text_shape[1], embedding_matrix.shape[1]))
for i in range(text_sequence.shape[0]):
for j in range(text_sequence.shape[1]):
X_train[i, j, :] = embedding_matrix[text_sequence[i][j]]
return (X_train, embeddings_index, word_index)
L = list(word_index.keys())
emb_matrix = np.zeros((len(L) + 1, 100))
for word, index in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
emb_matrix[index, :] = embedding_vector
emb_matrix.shape
tsne = TSNE(perplexity=40, n_components=2, init='pca', n_iter=2500, random_state=123)
z = tsne.fit_transform(emb_matrix)
df3 = pd.DataFrame()
df3['comp-1'] = z[:, 0]
df3['comp-2'] = z[:, 1]
T1 = T.apply(word_tokenize)
num_features = 100
min_word_count = 20
num_workers = 4
context = 20
downsampling = 0.001
from gensim.models import word2vec
model = word2vec.Word2Vec(workers=num_workers, vector_size=num_features, min_count=min_word_count, window=context, sample=downsampling)
model.build_vocab(T1, progress_per=1000)
model.train(T1, total_examples=model.corpus_count, epochs=model.epochs)
labels = model.wv.index_to_key
model.wv['ukraine']
X = model.wv[model.wv.index_to_key]
X.shape
tsne = TSNE(perplexity=40, n_components=2, init='pca', n_iter=2500, random_state=123)
z = tsne.fit_transform(X)
z.shape
df3 = pd.DataFrame()
df3['comp-1'] = z[:, 0]
df3['comp-2'] = z[:, 1]
x = z[:, 0]
y = z[:, 1]
plt.figure(figsize=(12, 6))
sns.scatterplot(x='comp-1', y='comp-2', data=df3).set(title='Word2Vec T-SNE projection')
|
code
|
105213956/cell_20
|
[
"text_plain_output_1.png"
] |
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
from sklearn.manifold import TSNE
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import nltk
import numpy as np
import pandas as pd # For data handling
import re
import pandas as pd
from time import time
from collections import defaultdict
from bs4 import BeautifulSoup
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
from gensim.models import Word2Vec, Phrases
from nltk.tokenize import word_tokenize
import nltk
import seaborn as sns
import numpy as np
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import gensim
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('omw-1.4')
import spacy
df = pd.read_csv('../input/google-war-news/war-news.csv', encoding='latin1')
df.isnull().sum()
df.dropna(subset=['Summary'], inplace=True)
from nltk.stem.snowball import SnowballStemmer
def stemm_text(text):
stemmer = SnowballStemmer('english')
return ' '.join([stemmer.stem(w) for w in text.split(' ')])
w_tokenizer = nltk.tokenize.WhitespaceTokenizer()
lemmatizer = nltk.stem.WordNetLemmatizer()
def lemmatize_text(text):
lemmatizer = WordNetLemmatizer()
return ' '.join([lemmatizer.lemmatize(w) for w in text.split(' ')])
T = df['Summary'].str.split(' \n\n---\n\n').str[0]
T = T.str.replace('-', ' ').str.replace('[^\\w\\s]', '').str.replace('\n', ' ').str.lower()
stop = stopwords.words('english')
T = T.apply(lambda x: ' '.join((x for x in x.split() if not x.isdigit())))
T = T.apply(lambda words: ' '.join((word.lower() for word in words.split() if word not in stop)))
T1 = T.values.tolist()
def Sentence2Vec(T, embedding_dim=100, max_length=300):
glove_path = '../input/glove6b100dtxt/glove.6B.100d.txt'
path = glove_path
tokenizer = Tokenizer()
text = T
tokenizer.fit_on_texts(text)
word_index = tokenizer.word_index
vocab_size = 5000
trunc_type = 'post'
oov_tok = '<OOV>'
padding_type = 'post'
text_sequence = tokenizer.texts_to_sequences(text)
text_sequence = pad_sequences(text_sequence, maxlen=max_length, truncating=trunc_type, padding=padding_type)
size_of_vocabulary = len(tokenizer.word_index) + 1
embeddings_index = dict()
f = open(path)
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
embedding_matrix = np.zeros((size_of_vocabulary, embedding_dim))
for word, i in tokenizer.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
text_shape = text_sequence.shape
X_train = np.empty((text_shape[0], text_shape[1], embedding_matrix.shape[1]))
for i in range(text_sequence.shape[0]):
for j in range(text_sequence.shape[1]):
X_train[i, j, :] = embedding_matrix[text_sequence[i][j]]
return (X_train, embeddings_index, word_index)
L = list(word_index.keys())
emb_matrix = np.zeros((len(L) + 1, 100))
for word, index in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
emb_matrix[index, :] = embedding_vector
emb_matrix.shape
tsne = TSNE(perplexity=40, n_components=2, init='pca', n_iter=2500, random_state=123)
z = tsne.fit_transform(emb_matrix)
|
code
|
105213956/cell_29
|
[
"text_plain_output_1.png"
] |
from gensim.models import word2vec
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import pandas as pd # For data handling
df = pd.read_csv('../input/google-war-news/war-news.csv', encoding='latin1')
df.isnull().sum()
df.dropna(subset=['Summary'], inplace=True)
T = df['Summary'].str.split(' \n\n---\n\n').str[0]
T = T.str.replace('-', ' ').str.replace('[^\\w\\s]', '').str.replace('\n', ' ').str.lower()
stop = stopwords.words('english')
T = T.apply(lambda x: ' '.join((x for x in x.split() if not x.isdigit())))
T = T.apply(lambda words: ' '.join((word.lower() for word in words.split() if word not in stop)))
T1 = T.values.tolist()
T1 = T.apply(word_tokenize)
num_features = 100
min_word_count = 20
num_workers = 4
context = 20
downsampling = 0.001
from gensim.models import word2vec
model = word2vec.Word2Vec(workers=num_workers, vector_size=num_features, min_count=min_word_count, window=context, sample=downsampling)
model.build_vocab(T1, progress_per=1000)
model.train(T1, total_examples=model.corpus_count, epochs=model.epochs)
labels = model.wv.index_to_key
model.wv['ukraine']
X = model.wv[model.wv.index_to_key]
X
|
code
|
105213956/cell_11
|
[
"text_plain_output_1.png"
] |
from nltk.corpus import stopwords
import pandas as pd # For data handling
df = pd.read_csv('../input/google-war-news/war-news.csv', encoding='latin1')
df.isnull().sum()
df.dropna(subset=['Summary'], inplace=True)
T = df['Summary'].str.split(' \n\n---\n\n').str[0]
T = T.str.replace('-', ' ').str.replace('[^\\w\\s]', '').str.replace('\n', ' ').str.lower()
stop = stopwords.words('english')
T = T.apply(lambda x: ' '.join((x for x in x.split() if not x.isdigit())))
T = T.apply(lambda words: ' '.join((word.lower() for word in words.split() if word not in stop)))
type(T)
|
code
|
105213956/cell_1
|
[
"text_plain_output_1.png"
] |
import nltk
import re
import pandas as pd
from time import time
from collections import defaultdict
from bs4 import BeautifulSoup
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
from gensim.models import Word2Vec, Phrases
from nltk.tokenize import word_tokenize
import nltk
import seaborn as sns
import numpy as np
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import gensim
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('omw-1.4')
import spacy
|
code
|
105213956/cell_18
|
[
"text_plain_output_1.png"
] |
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import nltk
import numpy as np
import pandas as pd # For data handling
import re
import pandas as pd
from time import time
from collections import defaultdict
from bs4 import BeautifulSoup
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
from gensim.models import Word2Vec, Phrases
from nltk.tokenize import word_tokenize
import nltk
import seaborn as sns
import numpy as np
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import gensim
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('omw-1.4')
import spacy
df = pd.read_csv('../input/google-war-news/war-news.csv', encoding='latin1')
df.isnull().sum()
df.dropna(subset=['Summary'], inplace=True)
from nltk.stem.snowball import SnowballStemmer
def stemm_text(text):
stemmer = SnowballStemmer('english')
return ' '.join([stemmer.stem(w) for w in text.split(' ')])
w_tokenizer = nltk.tokenize.WhitespaceTokenizer()
lemmatizer = nltk.stem.WordNetLemmatizer()
def lemmatize_text(text):
lemmatizer = WordNetLemmatizer()
return ' '.join([lemmatizer.lemmatize(w) for w in text.split(' ')])
T = df['Summary'].str.split(' \n\n---\n\n').str[0]
T = T.str.replace('-', ' ').str.replace('[^\\w\\s]', '').str.replace('\n', ' ').str.lower()
stop = stopwords.words('english')
T = T.apply(lambda x: ' '.join((x for x in x.split() if not x.isdigit())))
T = T.apply(lambda words: ' '.join((word.lower() for word in words.split() if word not in stop)))
T1 = T.values.tolist()
def Sentence2Vec(T, embedding_dim=100, max_length=300):
glove_path = '../input/glove6b100dtxt/glove.6B.100d.txt'
path = glove_path
tokenizer = Tokenizer()
text = T
tokenizer.fit_on_texts(text)
word_index = tokenizer.word_index
vocab_size = 5000
trunc_type = 'post'
oov_tok = '<OOV>'
padding_type = 'post'
text_sequence = tokenizer.texts_to_sequences(text)
text_sequence = pad_sequences(text_sequence, maxlen=max_length, truncating=trunc_type, padding=padding_type)
size_of_vocabulary = len(tokenizer.word_index) + 1
embeddings_index = dict()
f = open(path)
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
embedding_matrix = np.zeros((size_of_vocabulary, embedding_dim))
for word, i in tokenizer.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
text_shape = text_sequence.shape
X_train = np.empty((text_shape[0], text_shape[1], embedding_matrix.shape[1]))
for i in range(text_sequence.shape[0]):
for j in range(text_sequence.shape[1]):
X_train[i, j, :] = embedding_matrix[text_sequence[i][j]]
return (X_train, embeddings_index, word_index)
L = list(word_index.keys())
emb_matrix = np.zeros((len(L) + 1, 100))
for word, index in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
emb_matrix[index, :] = embedding_vector
emb_matrix.shape
|
code
|
105213956/cell_32
|
[
"text_plain_output_1.png"
] |
from gensim.models import word2vec
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
from nltk.tokenize import word_tokenize
from sklearn.manifold import TSNE
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import nltk
import numpy as np
import pandas as pd # For data handling
import re
import pandas as pd
from time import time
from collections import defaultdict
from bs4 import BeautifulSoup
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
from gensim.models import Word2Vec, Phrases
from nltk.tokenize import word_tokenize
import nltk
import seaborn as sns
import numpy as np
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import gensim
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('omw-1.4')
import spacy
df = pd.read_csv('../input/google-war-news/war-news.csv', encoding='latin1')
df.isnull().sum()
df.dropna(subset=['Summary'], inplace=True)
from nltk.stem.snowball import SnowballStemmer
def stemm_text(text):
stemmer = SnowballStemmer('english')
return ' '.join([stemmer.stem(w) for w in text.split(' ')])
w_tokenizer = nltk.tokenize.WhitespaceTokenizer()
lemmatizer = nltk.stem.WordNetLemmatizer()
def lemmatize_text(text):
lemmatizer = WordNetLemmatizer()
return ' '.join([lemmatizer.lemmatize(w) for w in text.split(' ')])
T = df['Summary'].str.split(' \n\n---\n\n').str[0]
T = T.str.replace('-', ' ').str.replace('[^\\w\\s]', '').str.replace('\n', ' ').str.lower()
stop = stopwords.words('english')
T = T.apply(lambda x: ' '.join((x for x in x.split() if not x.isdigit())))
T = T.apply(lambda words: ' '.join((word.lower() for word in words.split() if word not in stop)))
T1 = T.values.tolist()
def Sentence2Vec(T, embedding_dim=100, max_length=300):
glove_path = '../input/glove6b100dtxt/glove.6B.100d.txt'
path = glove_path
tokenizer = Tokenizer()
text = T
tokenizer.fit_on_texts(text)
word_index = tokenizer.word_index
vocab_size = 5000
trunc_type = 'post'
oov_tok = '<OOV>'
padding_type = 'post'
text_sequence = tokenizer.texts_to_sequences(text)
text_sequence = pad_sequences(text_sequence, maxlen=max_length, truncating=trunc_type, padding=padding_type)
size_of_vocabulary = len(tokenizer.word_index) + 1
embeddings_index = dict()
f = open(path)
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
embedding_matrix = np.zeros((size_of_vocabulary, embedding_dim))
for word, i in tokenizer.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
text_shape = text_sequence.shape
X_train = np.empty((text_shape[0], text_shape[1], embedding_matrix.shape[1]))
for i in range(text_sequence.shape[0]):
for j in range(text_sequence.shape[1]):
X_train[i, j, :] = embedding_matrix[text_sequence[i][j]]
return (X_train, embeddings_index, word_index)
L = list(word_index.keys())
emb_matrix = np.zeros((len(L) + 1, 100))
for word, index in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
emb_matrix[index, :] = embedding_vector
emb_matrix.shape
tsne = TSNE(perplexity=40, n_components=2, init='pca', n_iter=2500, random_state=123)
z = tsne.fit_transform(emb_matrix)
T1 = T.apply(word_tokenize)
num_features = 100
min_word_count = 20
num_workers = 4
context = 20
downsampling = 0.001
from gensim.models import word2vec
model = word2vec.Word2Vec(workers=num_workers, vector_size=num_features, min_count=min_word_count, window=context, sample=downsampling)
model.build_vocab(T1, progress_per=1000)
model.train(T1, total_examples=model.corpus_count, epochs=model.epochs)
labels = model.wv.index_to_key
model.wv['ukraine']
X = model.wv[model.wv.index_to_key]
X.shape
tsne = TSNE(perplexity=40, n_components=2, init='pca', n_iter=2500, random_state=123)
z = tsne.fit_transform(X)
z.shape
|
code
|
105213956/cell_15
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import nltk
import numpy as np
import pandas as pd # For data handling
import re
import pandas as pd
from time import time
from collections import defaultdict
from bs4 import BeautifulSoup
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
from gensim.models import Word2Vec, Phrases
from nltk.tokenize import word_tokenize
import nltk
import seaborn as sns
import numpy as np
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import gensim
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('omw-1.4')
import spacy
df = pd.read_csv('../input/google-war-news/war-news.csv', encoding='latin1')
df.isnull().sum()
df.dropna(subset=['Summary'], inplace=True)
from nltk.stem.snowball import SnowballStemmer
def stemm_text(text):
stemmer = SnowballStemmer('english')
return ' '.join([stemmer.stem(w) for w in text.split(' ')])
w_tokenizer = nltk.tokenize.WhitespaceTokenizer()
lemmatizer = nltk.stem.WordNetLemmatizer()
def lemmatize_text(text):
lemmatizer = WordNetLemmatizer()
return ' '.join([lemmatizer.lemmatize(w) for w in text.split(' ')])
T = df['Summary'].str.split(' \n\n---\n\n').str[0]
T = T.str.replace('-', ' ').str.replace('[^\\w\\s]', '').str.replace('\n', ' ').str.lower()
stop = stopwords.words('english')
T = T.apply(lambda x: ' '.join((x for x in x.split() if not x.isdigit())))
T = T.apply(lambda words: ' '.join((word.lower() for word in words.split() if word not in stop)))
T1 = T.values.tolist()
def Sentence2Vec(T, embedding_dim=100, max_length=300):
glove_path = '../input/glove6b100dtxt/glove.6B.100d.txt'
path = glove_path
tokenizer = Tokenizer()
text = T
tokenizer.fit_on_texts(text)
word_index = tokenizer.word_index
vocab_size = 5000
trunc_type = 'post'
oov_tok = '<OOV>'
padding_type = 'post'
text_sequence = tokenizer.texts_to_sequences(text)
text_sequence = pad_sequences(text_sequence, maxlen=max_length, truncating=trunc_type, padding=padding_type)
size_of_vocabulary = len(tokenizer.word_index) + 1
embeddings_index = dict()
f = open(path)
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
embedding_matrix = np.zeros((size_of_vocabulary, embedding_dim))
for word, i in tokenizer.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
text_shape = text_sequence.shape
X_train = np.empty((text_shape[0], text_shape[1], embedding_matrix.shape[1]))
for i in range(text_sequence.shape[0]):
for j in range(text_sequence.shape[1]):
X_train[i, j, :] = embedding_matrix[text_sequence[i][j]]
return (X_train, embeddings_index, word_index)
X_train, embeddings_index, word_index = Sentence2Vec(T)
|
code
|
105213956/cell_3
|
[
"text_plain_output_2.png",
"text_plain_output_1.png"
] |
import pandas as pd # For data handling
df = pd.read_csv('../input/google-war-news/war-news.csv', encoding='latin1')
df.head()
|
code
|
105213956/cell_31
|
[
"text_plain_output_1.png"
] |
from gensim.models import word2vec
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
from nltk.tokenize import word_tokenize
from sklearn.manifold import TSNE
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import nltk
import numpy as np
import pandas as pd # For data handling
import re
import pandas as pd
from time import time
from collections import defaultdict
from bs4 import BeautifulSoup
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
from gensim.models import Word2Vec, Phrases
from nltk.tokenize import word_tokenize
import nltk
import seaborn as sns
import numpy as np
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import gensim
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('omw-1.4')
import spacy
df = pd.read_csv('../input/google-war-news/war-news.csv', encoding='latin1')
df.isnull().sum()
df.dropna(subset=['Summary'], inplace=True)
from nltk.stem.snowball import SnowballStemmer
def stemm_text(text):
stemmer = SnowballStemmer('english')
return ' '.join([stemmer.stem(w) for w in text.split(' ')])
w_tokenizer = nltk.tokenize.WhitespaceTokenizer()
lemmatizer = nltk.stem.WordNetLemmatizer()
def lemmatize_text(text):
lemmatizer = WordNetLemmatizer()
return ' '.join([lemmatizer.lemmatize(w) for w in text.split(' ')])
T = df['Summary'].str.split(' \n\n---\n\n').str[0]
T = T.str.replace('-', ' ').str.replace('[^\\w\\s]', '').str.replace('\n', ' ').str.lower()
stop = stopwords.words('english')
T = T.apply(lambda x: ' '.join((x for x in x.split() if not x.isdigit())))
T = T.apply(lambda words: ' '.join((word.lower() for word in words.split() if word not in stop)))
T1 = T.values.tolist()
def Sentence2Vec(T, embedding_dim=100, max_length=300):
glove_path = '../input/glove6b100dtxt/glove.6B.100d.txt'
path = glove_path
tokenizer = Tokenizer()
text = T
tokenizer.fit_on_texts(text)
word_index = tokenizer.word_index
vocab_size = 5000
trunc_type = 'post'
oov_tok = '<OOV>'
padding_type = 'post'
text_sequence = tokenizer.texts_to_sequences(text)
text_sequence = pad_sequences(text_sequence, maxlen=max_length, truncating=trunc_type, padding=padding_type)
size_of_vocabulary = len(tokenizer.word_index) + 1
embeddings_index = dict()
f = open(path)
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
embedding_matrix = np.zeros((size_of_vocabulary, embedding_dim))
for word, i in tokenizer.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
text_shape = text_sequence.shape
X_train = np.empty((text_shape[0], text_shape[1], embedding_matrix.shape[1]))
for i in range(text_sequence.shape[0]):
for j in range(text_sequence.shape[1]):
X_train[i, j, :] = embedding_matrix[text_sequence[i][j]]
return (X_train, embeddings_index, word_index)
L = list(word_index.keys())
emb_matrix = np.zeros((len(L) + 1, 100))
for word, index in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
emb_matrix[index, :] = embedding_vector
emb_matrix.shape
tsne = TSNE(perplexity=40, n_components=2, init='pca', n_iter=2500, random_state=123)
z = tsne.fit_transform(emb_matrix)
T1 = T.apply(word_tokenize)
num_features = 100
min_word_count = 20
num_workers = 4
context = 20
downsampling = 0.001
from gensim.models import word2vec
model = word2vec.Word2Vec(workers=num_workers, vector_size=num_features, min_count=min_word_count, window=context, sample=downsampling)
model.build_vocab(T1, progress_per=1000)
model.train(T1, total_examples=model.corpus_count, epochs=model.epochs)
labels = model.wv.index_to_key
model.wv['ukraine']
X = model.wv[model.wv.index_to_key]
X.shape
tsne = TSNE(perplexity=40, n_components=2, init='pca', n_iter=2500, random_state=123)
z = tsne.fit_transform(X)
|
code
|
105213956/cell_24
|
[
"text_plain_output_1.png"
] |
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import pandas as pd # For data handling
df = pd.read_csv('../input/google-war-news/war-news.csv', encoding='latin1')
df.isnull().sum()
df.dropna(subset=['Summary'], inplace=True)
T = df['Summary'].str.split(' \n\n---\n\n').str[0]
T = T.str.replace('-', ' ').str.replace('[^\\w\\s]', '').str.replace('\n', ' ').str.lower()
stop = stopwords.words('english')
T = T.apply(lambda x: ' '.join((x for x in x.split() if not x.isdigit())))
T = T.apply(lambda words: ' '.join((word.lower() for word in words.split() if word not in stop)))
T1 = T.values.tolist()
T1 = T.apply(word_tokenize)
T1
|
code
|
105213956/cell_10
|
[
"text_html_output_1.png"
] |
from nltk.corpus import stopwords
import pandas as pd # For data handling
df = pd.read_csv('../input/google-war-news/war-news.csv', encoding='latin1')
df.isnull().sum()
df.dropna(subset=['Summary'], inplace=True)
T = df['Summary'].str.split(' \n\n---\n\n').str[0]
T = T.str.replace('-', ' ').str.replace('[^\\w\\s]', '').str.replace('\n', ' ').str.lower()
stop = stopwords.words('english')
T = T.apply(lambda x: ' '.join((x for x in x.split() if not x.isdigit())))
T = T.apply(lambda words: ' '.join((word.lower() for word in words.split() if word not in stop)))
T
|
code
|
105213956/cell_27
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
from gensim.models import word2vec
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import pandas as pd # For data handling
df = pd.read_csv('../input/google-war-news/war-news.csv', encoding='latin1')
df.isnull().sum()
df.dropna(subset=['Summary'], inplace=True)
T = df['Summary'].str.split(' \n\n---\n\n').str[0]
T = T.str.replace('-', ' ').str.replace('[^\\w\\s]', '').str.replace('\n', ' ').str.lower()
stop = stopwords.words('english')
T = T.apply(lambda x: ' '.join((x for x in x.split() if not x.isdigit())))
T = T.apply(lambda words: ' '.join((word.lower() for word in words.split() if word not in stop)))
T1 = T.values.tolist()
T1 = T.apply(word_tokenize)
num_features = 100
min_word_count = 20
num_workers = 4
context = 20
downsampling = 0.001
from gensim.models import word2vec
model = word2vec.Word2Vec(workers=num_workers, vector_size=num_features, min_count=min_word_count, window=context, sample=downsampling)
model.build_vocab(T1, progress_per=1000)
model.train(T1, total_examples=model.corpus_count, epochs=model.epochs)
labels = model.wv.index_to_key
model.wv['ukraine']
|
code
|
105213956/cell_5
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd # For data handling
df = pd.read_csv('../input/google-war-news/war-news.csv', encoding='latin1')
df.isnull().sum()
|
code
|
17144682/cell_21
|
[
"text_html_output_1.png"
] |
import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'Park Facility Name', 'Park Borough', 'Vehicle Type', 'Taxi Company Borough', 'Taxi Pick Up Location', 'Bridge Highway Name', 'Bridge Highway Direction', 'Road Ramp', 'Bridge Highway Segment'], axis=1)
noise_complaints_data['City'].unique()
|
code
|
17144682/cell_13
|
[
"text_html_output_1.png"
] |
import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'Park Facility Name', 'Park Borough', 'Vehicle Type', 'Taxi Company Borough', 'Taxi Pick Up Location', 'Bridge Highway Name', 'Bridge Highway Direction', 'Road Ramp', 'Bridge Highway Segment'], axis=1)
comp_desc = noise_complaints_data['Descriptor'].value_counts()
comp_desc
|
code
|
17144682/cell_9
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.info()
|
code
|
17144682/cell_34
|
[
"text_plain_output_1.png"
] |
import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'Park Facility Name', 'Park Borough', 'Vehicle Type', 'Taxi Company Borough', 'Taxi Pick Up Location', 'Bridge Highway Name', 'Bridge Highway Direction', 'Road Ramp', 'Bridge Highway Segment'], axis=1)
print(noise_complaints_data['Longitude'].max())
print(noise_complaints_data['Longitude'].min())
|
code
|
17144682/cell_23
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'Park Facility Name', 'Park Borough', 'Vehicle Type', 'Taxi Company Borough', 'Taxi Pick Up Location', 'Bridge Highway Name', 'Bridge Highway Direction', 'Road Ramp', 'Bridge Highway Segment'], axis=1)
comp_city = noise_complaints_data['City'].value_counts()
comp_city_df = comp_city.to_frame().reset_index()
comp_city_df.rename(columns={'index': 'City Name'}, inplace=True)
comp_city_df.rename(columns={'City': 'Count'}, inplace=True)
comp_city_df.head(10)
|
code
|
17144682/cell_44
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'Park Facility Name', 'Park Borough', 'Vehicle Type', 'Taxi Company Borough', 'Taxi Pick Up Location', 'Bridge Highway Name', 'Bridge Highway Direction', 'Road Ramp', 'Bridge Highway Segment'], axis=1)
noise_complaints_data['Day of Week'].head()
|
code
|
17144682/cell_6
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
|
code
|
17144682/cell_39
|
[
"text_plain_output_1.png"
] |
from pylab import rcParams
from pylab import rcParams
from pylab import rcParams
import matplotlib.pyplot as plt #data plotting
import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'Park Facility Name', 'Park Borough', 'Vehicle Type', 'Taxi Company Borough', 'Taxi Pick Up Location', 'Bridge Highway Name', 'Bridge Highway Direction', 'Road Ramp', 'Bridge Highway Segment'], axis=1)
from pylab import rcParams
rcParams['figure.figsize'] = (10, 10)
from pylab import rcParams
rcParams['figure.figsize'] = (6, 10)
noise_complaints_manhattan = noise_complaints_data[noise_complaints_data['Borough'] == 'MANHATTAN']
noise_complaints_brooklyn = noise_complaints_data[noise_complaints_data['Borough'] == 'BROOKLYN']
noise_complaints_queens = noise_complaints_data[noise_complaints_data['Borough'] == 'QUEENS']
plt.plot(noise_complaints_queens['Longitude'], noise_complaints_queens['Latitude'], '.', markersize=0.5)
|
code
|
17144682/cell_26
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'Park Facility Name', 'Park Borough', 'Vehicle Type', 'Taxi Company Borough', 'Taxi Pick Up Location', 'Bridge Highway Name', 'Bridge Highway Direction', 'Road Ramp', 'Bridge Highway Segment'], axis=1)
noise_complaints_data['Borough'].unique()
|
code
|
17144682/cell_48
|
[
"text_plain_output_1.png"
] |
from pylab import rcParams
from pylab import rcParams
from pylab import rcParams
from pylab import rcParams
import pandas as pd #for data wrangling
import seaborn as sns #data visualization
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'Park Facility Name', 'Park Borough', 'Vehicle Type', 'Taxi Company Borough', 'Taxi Pick Up Location', 'Bridge Highway Name', 'Bridge Highway Direction', 'Road Ramp', 'Bridge Highway Segment'], axis=1)
comp_desc = noise_complaints_data['Descriptor'].value_counts()
comp_desc
comp_desc_df = comp_desc.to_frame().reset_index()
comp_desc_df.rename(columns={'index': 'Description'}, inplace=True)
comp_desc_df.rename(columns={'Descriptor': 'Count'}, inplace=True)
from pylab import rcParams
rcParams['figure.figsize'] = (10, 3)
comp_city = noise_complaints_data['City'].value_counts()
comp_city_df = comp_city.to_frame().reset_index()
comp_city_df.rename(columns={'index': 'City Name'}, inplace=True)
comp_city_df.rename(columns={'City': 'Count'}, inplace=True)
from pylab import rcParams
rcParams['figure.figsize'] = (10, 5)
sns.countplot(x=noise_complaints_data['Day of Week'])
|
code
|
17144682/cell_41
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'Park Facility Name', 'Park Borough', 'Vehicle Type', 'Taxi Company Borough', 'Taxi Pick Up Location', 'Bridge Highway Name', 'Bridge Highway Direction', 'Road Ramp', 'Bridge Highway Segment'], axis=1)
noise_complaints_data['Created Date'].iloc[0]
|
code
|
17144682/cell_11
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'Park Facility Name', 'Park Borough', 'Vehicle Type', 'Taxi Company Borough', 'Taxi Pick Up Location', 'Bridge Highway Name', 'Bridge Highway Direction', 'Road Ramp', 'Bridge Highway Segment'], axis=1)
|
code
|
17144682/cell_7
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.head()
|
code
|
17144682/cell_18
|
[
"text_plain_output_1.png"
] |
from pylab import rcParams
import pandas as pd #for data wrangling
import seaborn as sns #data visualization
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'Park Facility Name', 'Park Borough', 'Vehicle Type', 'Taxi Company Borough', 'Taxi Pick Up Location', 'Bridge Highway Name', 'Bridge Highway Direction', 'Road Ramp', 'Bridge Highway Segment'], axis=1)
comp_desc = noise_complaints_data['Descriptor'].value_counts()
comp_desc
comp_desc_df = comp_desc.to_frame().reset_index()
comp_desc_df.rename(columns={'index': 'Description'}, inplace=True)
comp_desc_df.rename(columns={'Descriptor': 'Count'}, inplace=True)
from pylab import rcParams
rcParams['figure.figsize'] = (10, 3)
sns.countplot(y='Complaint Type', data=noise_complaints_data, order=noise_complaints_data['Complaint Type'].value_counts().index).set_title('NYC Noise Complaint Types 2018')
|
code
|
17144682/cell_28
|
[
"text_html_output_1.png"
] |
from pylab import rcParams
import pandas as pd #for data wrangling
import seaborn as sns #data visualization
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'Park Facility Name', 'Park Borough', 'Vehicle Type', 'Taxi Company Borough', 'Taxi Pick Up Location', 'Bridge Highway Name', 'Bridge Highway Direction', 'Road Ramp', 'Bridge Highway Segment'], axis=1)
comp_desc = noise_complaints_data['Descriptor'].value_counts()
comp_desc
comp_desc_df = comp_desc.to_frame().reset_index()
comp_desc_df.rename(columns={'index': 'Description'}, inplace=True)
comp_desc_df.rename(columns={'Descriptor': 'Count'}, inplace=True)
from pylab import rcParams
rcParams['figure.figsize'] = (10, 3)
comp_city = noise_complaints_data['City'].value_counts()
comp_city_df = comp_city.to_frame().reset_index()
comp_city_df.rename(columns={'index': 'City Name'}, inplace=True)
comp_city_df.rename(columns={'City': 'Count'}, inplace=True)
sns.countplot(y='Borough', data=noise_complaints_data, order=noise_complaints_data['Borough'].value_counts().index).set_title('NYC Noise Complaints by Borough 2018')
|
code
|
17144682/cell_15
|
[
"text_plain_output_1.png"
] |
import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'Park Facility Name', 'Park Borough', 'Vehicle Type', 'Taxi Company Borough', 'Taxi Pick Up Location', 'Bridge Highway Name', 'Bridge Highway Direction', 'Road Ramp', 'Bridge Highway Segment'], axis=1)
comp_desc = noise_complaints_data['Descriptor'].value_counts()
comp_desc
comp_desc_df = comp_desc.to_frame().reset_index()
comp_desc_df.rename(columns={'index': 'Description'}, inplace=True)
comp_desc_df.rename(columns={'Descriptor': 'Count'}, inplace=True)
comp_desc_df.head(10)
|
code
|
17144682/cell_16
|
[
"text_plain_output_1.png"
] |
from pylab import rcParams
import pandas as pd #for data wrangling
import seaborn as sns #data visualization
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'Park Facility Name', 'Park Borough', 'Vehicle Type', 'Taxi Company Borough', 'Taxi Pick Up Location', 'Bridge Highway Name', 'Bridge Highway Direction', 'Road Ramp', 'Bridge Highway Segment'], axis=1)
comp_desc = noise_complaints_data['Descriptor'].value_counts()
comp_desc
comp_desc_df = comp_desc.to_frame().reset_index()
comp_desc_df.rename(columns={'index': 'Description'}, inplace=True)
comp_desc_df.rename(columns={'Descriptor': 'Count'}, inplace=True)
from pylab import rcParams
rcParams['figure.figsize'] = (10, 3)
sns.barplot(x='Count', y='Description', data=comp_desc_df.head(10)).set_title('Top 10 NYC Noise Compaint Descriptors 2018')
|
code
|
17144682/cell_38
|
[
"text_plain_output_1.png"
] |
from pylab import rcParams
from pylab import rcParams
from pylab import rcParams
import matplotlib.pyplot as plt #data plotting
import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'Park Facility Name', 'Park Borough', 'Vehicle Type', 'Taxi Company Borough', 'Taxi Pick Up Location', 'Bridge Highway Name', 'Bridge Highway Direction', 'Road Ramp', 'Bridge Highway Segment'], axis=1)
from pylab import rcParams
rcParams['figure.figsize'] = (10, 10)
from pylab import rcParams
rcParams['figure.figsize'] = (6, 10)
noise_complaints_manhattan = noise_complaints_data[noise_complaints_data['Borough'] == 'MANHATTAN']
noise_complaints_brooklyn = noise_complaints_data[noise_complaints_data['Borough'] == 'BROOKLYN']
plt.plot(noise_complaints_brooklyn['Longitude'], noise_complaints_brooklyn['Latitude'], '.', markersize=0.5)
|
code
|
17144682/cell_47
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
from pylab import rcParams
from pylab import rcParams
from pylab import rcParams
from pylab import rcParams
import pandas as pd #for data wrangling
import seaborn as sns #data visualization
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'Park Facility Name', 'Park Borough', 'Vehicle Type', 'Taxi Company Borough', 'Taxi Pick Up Location', 'Bridge Highway Name', 'Bridge Highway Direction', 'Road Ramp', 'Bridge Highway Segment'], axis=1)
comp_desc = noise_complaints_data['Descriptor'].value_counts()
comp_desc
comp_desc_df = comp_desc.to_frame().reset_index()
comp_desc_df.rename(columns={'index': 'Description'}, inplace=True)
comp_desc_df.rename(columns={'Descriptor': 'Count'}, inplace=True)
from pylab import rcParams
rcParams['figure.figsize'] = (10, 3)
comp_city = noise_complaints_data['City'].value_counts()
comp_city_df = comp_city.to_frame().reset_index()
comp_city_df.rename(columns={'index': 'City Name'}, inplace=True)
comp_city_df.rename(columns={'City': 'Count'}, inplace=True)
from pylab import rcParams
rcParams['figure.figsize'] = (10, 5)
sns.countplot(x=noise_complaints_data['Month'])
|
code
|
17144682/cell_17
|
[
"text_html_output_1.png"
] |
import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'Park Facility Name', 'Park Borough', 'Vehicle Type', 'Taxi Company Borough', 'Taxi Pick Up Location', 'Bridge Highway Name', 'Bridge Highway Direction', 'Road Ramp', 'Bridge Highway Segment'], axis=1)
comp_type = noise_complaints_data['Complaint Type'].value_counts()
comp_type
|
code
|
17144682/cell_35
|
[
"text_plain_output_1.png"
] |
import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'Park Facility Name', 'Park Borough', 'Vehicle Type', 'Taxi Company Borough', 'Taxi Pick Up Location', 'Bridge Highway Name', 'Bridge Highway Direction', 'Road Ramp', 'Bridge Highway Segment'], axis=1)
print(noise_complaints_data['Latitude'].max())
print(noise_complaints_data['Latitude'].min())
|
code
|
17144682/cell_31
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'Park Facility Name', 'Park Borough', 'Vehicle Type', 'Taxi Company Borough', 'Taxi Pick Up Location', 'Bridge Highway Name', 'Bridge Highway Direction', 'Road Ramp', 'Bridge Highway Segment'], axis=1)
noise_complaints_data['Location Type'].value_counts()
|
code
|
17144682/cell_46
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
from pylab import rcParams
from pylab import rcParams
from pylab import rcParams
from pylab import rcParams
import pandas as pd #for data wrangling
import seaborn as sns #data visualization
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'Park Facility Name', 'Park Borough', 'Vehicle Type', 'Taxi Company Borough', 'Taxi Pick Up Location', 'Bridge Highway Name', 'Bridge Highway Direction', 'Road Ramp', 'Bridge Highway Segment'], axis=1)
comp_desc = noise_complaints_data['Descriptor'].value_counts()
comp_desc
comp_desc_df = comp_desc.to_frame().reset_index()
comp_desc_df.rename(columns={'index': 'Description'}, inplace=True)
comp_desc_df.rename(columns={'Descriptor': 'Count'}, inplace=True)
from pylab import rcParams
rcParams['figure.figsize'] = (10, 3)
comp_city = noise_complaints_data['City'].value_counts()
comp_city_df = comp_city.to_frame().reset_index()
comp_city_df.rename(columns={'index': 'City Name'}, inplace=True)
comp_city_df.rename(columns={'City': 'Count'}, inplace=True)
from pylab import rcParams
rcParams['figure.figsize'] = (10, 5)
sns.countplot(x=noise_complaints_data['Hour'])
|
code
|
17144682/cell_24
|
[
"text_plain_output_1.png"
] |
from pylab import rcParams
import pandas as pd #for data wrangling
import seaborn as sns #data visualization
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'Park Facility Name', 'Park Borough', 'Vehicle Type', 'Taxi Company Borough', 'Taxi Pick Up Location', 'Bridge Highway Name', 'Bridge Highway Direction', 'Road Ramp', 'Bridge Highway Segment'], axis=1)
comp_desc = noise_complaints_data['Descriptor'].value_counts()
comp_desc
comp_desc_df = comp_desc.to_frame().reset_index()
comp_desc_df.rename(columns={'index': 'Description'}, inplace=True)
comp_desc_df.rename(columns={'Descriptor': 'Count'}, inplace=True)
from pylab import rcParams
rcParams['figure.figsize'] = (10, 3)
comp_city = noise_complaints_data['City'].value_counts()
comp_city_df = comp_city.to_frame().reset_index()
comp_city_df.rename(columns={'index': 'City Name'}, inplace=True)
comp_city_df.rename(columns={'City': 'Count'}, inplace=True)
sns.barplot(x='Count', y='City Name', data=comp_city_df.head(10)).set_title('Top 10 NYC Noise Compaints by City 2018')
|
code
|
17144682/cell_27
|
[
"text_plain_output_1.png"
] |
import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'Park Facility Name', 'Park Borough', 'Vehicle Type', 'Taxi Company Borough', 'Taxi Pick Up Location', 'Bridge Highway Name', 'Bridge Highway Direction', 'Road Ramp', 'Bridge Highway Segment'], axis=1)
noise_complaints_data['Borough'].value_counts()
|
code
|
17144682/cell_37
|
[
"text_plain_output_1.png"
] |
from pylab import rcParams
from pylab import rcParams
from pylab import rcParams
import matplotlib.pyplot as plt #data plotting
import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'Park Facility Name', 'Park Borough', 'Vehicle Type', 'Taxi Company Borough', 'Taxi Pick Up Location', 'Bridge Highway Name', 'Bridge Highway Direction', 'Road Ramp', 'Bridge Highway Segment'], axis=1)
from pylab import rcParams
rcParams['figure.figsize'] = (10, 10)
from pylab import rcParams
rcParams['figure.figsize'] = (6, 10)
noise_complaints_manhattan = noise_complaints_data[noise_complaints_data['Borough'] == 'MANHATTAN']
plt.plot(noise_complaints_manhattan['Longitude'], noise_complaints_manhattan['Latitude'], '.', markersize=0.3)
|
code
|
17144682/cell_5
|
[
"text_plain_output_1.png"
] |
from plotly.offline import download_plotlyjs,init_notebook_mode,plot,iplot
import cufflinks as cf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from plotly import __version__
import cufflinks as cf
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import plotly.graph_objs as go
init_notebook_mode(connected=True)
cf.go_offline()
|
code
|
17144682/cell_36
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
from pylab import rcParams
from pylab import rcParams
import matplotlib.pyplot as plt #data plotting
import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'Park Facility Name', 'Park Borough', 'Vehicle Type', 'Taxi Company Borough', 'Taxi Pick Up Location', 'Bridge Highway Name', 'Bridge Highway Direction', 'Road Ramp', 'Bridge Highway Segment'], axis=1)
from pylab import rcParams
rcParams['figure.figsize'] = (10, 10)
plt.plot(noise_complaints_data['Longitude'], noise_complaints_data['Latitude'], '.', markersize=0.2)
|
code
|
129019356/cell_2
|
[
"text_plain_output_1.png"
] |
!pip install pycaret
|
code
|
129019356/cell_1
|
[
"text_plain_output_1.png"
] |
import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
code
|
129019356/cell_3
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import numpy as np
import pandas as pd
pd.set_option('max_columns', None)
pd.set_option('max_rows', 90)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
from sklearn.neighbors import KNeighborsRegressor
import scipy.stats
from sklearn.preprocessing import StandardScaler
from pycaret.regression import setup, compare_models
from sklearn.model_selection import KFold, cross_val_score
from catboost import CatBoostRegressor
from sklearn.linear_model import BayesianRidge, HuberRegressor, Ridge, OrthogonalMatchingPursuit
from lightgbm import LGBMRegressor
from sklearn.ensemble import GradientBoostingRegressor
from xgboost import XGBRegressor
import optuna
|
code
|
89139521/cell_13
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv', parse_dates=['time'], index_col=['row_id'])
df.isna().sum()
index_series = pd.DataFrame(df.time.drop_duplicates().sort_values().reset_index(drop=True))
index_series['times_gap'] = index_series - index_series.shift(1)
index_series.sort_values(by='times_gap', ascending=False).head(5)
df['direction'].value_counts()
|
code
|
89139521/cell_4
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv', parse_dates=['time'], index_col=['row_id'])
df.head()
|
code
|
89139521/cell_19
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv', parse_dates=['time'], index_col=['row_id'])
df.isna().sum()
index_series = pd.DataFrame(df.time.drop_duplicates().sort_values().reset_index(drop=True))
index_series['times_gap'] = index_series - index_series.shift(1)
index_series.sort_values(by='times_gap', ascending=False).head(5)
df['hour'] = df.time.dt.hour
df['minute'] = df.time.dt.minute
median = df.groupby(['hour', 'minute', 'x', 'y', 'direction']).median()
median
|
code
|
89139521/cell_1
|
[
"text_plain_output_1.png"
] |
import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
|
code
|
89139521/cell_7
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv', parse_dates=['time'], index_col=['row_id'])
df.isna().sum()
index_series = pd.DataFrame(df.time.drop_duplicates().sort_values().reset_index(drop=True))
index_series['times_gap'] = index_series - index_series.shift(1)
index_series.sort_values(by='times_gap', ascending=False).head(5)
|
code
|
89139521/cell_8
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv', parse_dates=['time'], index_col=['row_id'])
df.isna().sum()
index_series = pd.DataFrame(df.time.drop_duplicates().sort_values().reset_index(drop=True))
index_series['times_gap'] = index_series - index_series.shift(1)
index_series.sort_values(by='times_gap', ascending=False).head(5)
sns.lineplot(data=index_series, y='times_gap', x='time')
|
code
|
89139521/cell_15
|
[
"text_plain_output_1.png",
"image_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv', parse_dates=['time'], index_col=['row_id'])
df.isna().sum()
index_series = pd.DataFrame(df.time.drop_duplicates().sort_values().reset_index(drop=True))
index_series['times_gap'] = index_series - index_series.shift(1)
index_series.sort_values(by='times_gap', ascending=False).head(5)
df.head()
|
code
|
89139521/cell_17
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv', parse_dates=['time'], index_col=['row_id'])
df.isna().sum()
index_series = pd.DataFrame(df.time.drop_duplicates().sort_values().reset_index(drop=True))
index_series['times_gap'] = index_series - index_series.shift(1)
index_series.sort_values(by='times_gap', ascending=False).head(5)
df['hour'] = df.time.dt.hour
df['minute'] = df.time.dt.minute
df
|
code
|
89139521/cell_24
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv', parse_dates=['time'], index_col=['row_id'])
df.isna().sum()
index_series = pd.DataFrame(df.time.drop_duplicates().sort_values().reset_index(drop=True))
index_series['times_gap'] = index_series - index_series.shift(1)
index_series.sort_values(by='times_gap', ascending=False).head(5)
test = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/test.csv', parse_dates=['time'])
test['hour'] = test.time.dt.hour
test['minute'] = test.time.dt.minute
pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/sample_submission.csv')
|
code
|
89139521/cell_10
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv', parse_dates=['time'], index_col=['row_id'])
df.isna().sum()
index_series = pd.DataFrame(df.time.drop_duplicates().sort_values().reset_index(drop=True))
index_series['times_gap'] = index_series - index_series.shift(1)
index_series.sort_values(by='times_gap', ascending=False).head(5)
print(df['x'].unique())
print(df['y'].unique())
|
code
|
89139521/cell_12
|
[
"text_plain_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv', parse_dates=['time'], index_col=['row_id'])
df.isna().sum()
index_series = pd.DataFrame(df.time.drop_duplicates().sort_values().reset_index(drop=True))
index_series['times_gap'] = index_series - index_series.shift(1)
index_series.sort_values(by='times_gap', ascending=False).head(5)
sns.scatterplot(data=df, x='x', y='y')
|
code
|
89139521/cell_5
|
[
"text_html_output_1.png"
] |
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv', parse_dates=['time'], index_col=['row_id'])
df.isna().sum()
|
code
|
88080357/cell_19
|
[
"text_plain_output_1.png"
] |
! wget -O ngannou.gif https://raw.githubusercontent.com/Justsecret123/Human-pose-estimation/main/Test%20gifs/Ngannou_takedown.gif
|
code
|
88080357/cell_24
|
[
"text_plain_output_1.png"
] |
from IPython.display import HTML, display
from matplotlib.collections import LineCollection
import cv2
import imageio
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
KEYPOINT_DICT = {'nose': 0, 'left_eye': 1, 'right_eye': 2, 'left_ear': 3, 'right_ear': 4, 'left_shoulder': 5, 'right_shoulder': 6, 'left_elbow': 7, 'right_elbow': 8, 'left_wrist': 9, 'right_wrist': 10, 'left_hip': 11, 'right_hip': 12, 'left_knee': 13, 'right_knee': 14, 'left_ankle': 15, 'right_ankle': 16}
KEYPOINT_EDGE_INDS_TO_COLOR = {(0, 1): 'm', (0, 2): 'c', (1, 3): 'm', (2, 4): 'c', (0, 5): 'm', (0, 6): 'c', (5, 7): 'm', (7, 9): 'm', (6, 8): 'c', (8, 10): 'c', (5, 6): 'y', (5, 11): 'm', (6, 12): 'c', (11, 12): 'y', (11, 13): 'm', (13, 15): 'm', (12, 14): 'c', (14, 16): 'c'}
def _keypoints_and_edges_for_display(
keypoints_with_scores,
height,
width,
keypoint_threshold=0.11,
):
"""Returns high confidence keypoints and edges for visualization.
Args:
keypoints_with_scores: A numpy array with shape [1, 1, 17, 3] representing
the keypoint coordinates and scores returned from the MoveNet model.
height: height of the image in pixels.
width: width of the image in pixels.
keypoint_threshold: minimum confidence score for a keypoint to be
visualized.
Returns:
A (keypoints_xy, edges_xy, edge_colors) containing:
* the coordinates of all keypoints of all detected entities;
* the coordinates of all skeleton edges of all detected entities;
* the colors in which the edges should be plotted.
"""
keypoints_all = []
keypoint_edges_all = []
edge_colors = []
(num_instances, _, _, _) = keypoints_with_scores.shape
for idx in range(num_instances):
kpts_x = keypoints_with_scores[0, idx, :, 1]
kpts_y = keypoints_with_scores[0, idx, :, 0]
kpts_scores = keypoints_with_scores[0, idx, :, 2]
kpts_absolute_xy = np.stack([width * np.array(kpts_x), height
* np.array(kpts_y)], axis=-1)
kpts_above_thresh_absolute = kpts_absolute_xy[kpts_scores
> keypoint_threshold, :]
keypoints_all.append(kpts_above_thresh_absolute)
for (edge_pair, color) in KEYPOINT_EDGE_INDS_TO_COLOR.items():
if kpts_scores[edge_pair[0]] > keypoint_threshold \
and kpts_scores[edge_pair[1]] > keypoint_threshold:
x_start = kpts_absolute_xy[edge_pair[0], 0]
y_start = kpts_absolute_xy[edge_pair[0], 1]
x_end = kpts_absolute_xy[edge_pair[1], 0]
y_end = kpts_absolute_xy[edge_pair[1], 1]
line_seg = np.array([[x_start, y_start], [x_end,
y_end]])
keypoint_edges_all.append(line_seg)
edge_colors.append(color)
if keypoints_all:
keypoints_xy = np.concatenate(keypoints_all, axis=0)
else:
keypoints_xy = np.zeros((0, 17, 2))
if keypoint_edges_all:
edges_xy = np.stack(keypoint_edges_all, axis=0)
else:
edges_xy = np.zeros((0, 2, 2))
return (keypoints_xy, edges_xy, edge_colors)
def draw_prediction_on_image(
image,
keypoints_with_scores,
crop_region=None,
close_figure=False,
output_image_height=None,
):
"""Draws the keypoint predictions on image.
Args:
image: A numpy array with shape [height, width, channel] representing the
pixel values of the input image.
keypoints_with_scores: A numpy array with shape [1, 1, 17, 3] representing
the keypoint coordinates and scores returned from the MoveNet model.
crop_region: A dictionary that defines the coordinates of the bounding box
of the crop region in normalized coordinates (see the init_crop_region
function below for more detail). If provided, this function will also
draw the bounding box on the image.
output_image_height: An integer indicating the height of the output image.
Note that the image aspect ratio will be the same as the input image.
Returns:
A numpy array with shape [out_height, out_width, channel] representing the
image overlaid with keypoint predictions.
"""
(height, width, channel) = image.shape
aspect_ratio = float(width) / height
(fig, ax) = plt.subplots(figsize=(12 * aspect_ratio, 12))
# To remove the huge white borders
fig.tight_layout(pad=0)
ax.margins(0)
ax.set_yticklabels([])
ax.set_xticklabels([])
plt.axis('off')
im = ax.imshow(image)
line_segments = LineCollection([], linewidths=4, linestyle='solid')
ax.add_collection(line_segments)
# Turn off tick labels
scat = ax.scatter([], [], s=60, color='#FF1493', zorder=3)
(keypoint_locs, keypoint_edges, edge_colors) = \
_keypoints_and_edges_for_display(keypoints_with_scores, height,
width)
line_segments.set_segments(keypoint_edges)
line_segments.set_color(edge_colors)
if keypoint_edges.shape[0]:
line_segments.set_segments(keypoint_edges)
line_segments.set_color(edge_colors)
if keypoint_locs.shape[0]:
scat.set_offsets(keypoint_locs)
if crop_region is not None:
xmin = max(crop_region['x_min'] * width, 0.0)
ymin = max(crop_region['y_min'] * height, 0.0)
rec_width = min(crop_region['x_max'], 0.99) * width - xmin
rec_height = min(crop_region['y_max'], 0.99) * height - ymin
rect = patches.Rectangle(
(xmin, ymin),
rec_width,
rec_height,
linewidth=1,
edgecolor='b',
facecolor='none',
)
ax.add_patch(rect)
fig.canvas.draw()
image_from_plot = np.frombuffer(fig.canvas.tostring_rgb(),
dtype=np.uint8)
image_from_plot = \
image_from_plot.reshape(fig.canvas.get_width_height()[::-1]
+ (3, ))
plt.close(fig)
if output_image_height is not None:
output_image_width = int(output_image_height / height * width)
image_from_plot = cv2.resize(image_from_plot,
dsize=(output_image_width, output_image_height),
interpolation=cv2.INTER_CUBIC)
return image_from_plot
def to_gif(images, fps):
"""Converts image sequence (4D numpy array) to gif."""
imageio.mimsave('./animation.gif', images, fps=fps)
return embed.embed_file('./animation.gif')
def progress(value, max=100):
return HTML("""
<progress
value='{value}'
max='{max}',
style='width: 100%'
>
{value}
</progress>
""".format(value=value,
max=max))
MIN_CROP_KEYPOINT_SCORE = 0.2
def init_crop_region(image_height, image_width):
"""Defines the default crop region.
The function provides the initial crop region (pads the full image from both
sides to make it a square image) when the algorithm cannot reliably determine
the crop region from the previous frame.
"""
if image_width > image_height:
box_height = image_width / image_height
box_width = 1.0
y_min = (image_height / 2 - image_width / 2) / image_height
x_min = 0.0
else:
box_height = 1.0
box_width = image_height / image_width
y_min = 0.0
x_min = (image_width / 2 - image_height / 2) / image_width
return {'y_min': y_min, 'x_min': x_min, 'y_max': y_min + box_height, 'x_max': x_min + box_width, 'height': box_height, 'width': box_width}
def torso_visible(keypoints):
"""Checks whether there are enough torso keypoints.
This function checks whether the model is confident at predicting one of the
shoulders/hips which is required to determine a good crop region.
"""
return (keypoints[0, 0, KEYPOINT_DICT['left_hip'], 2] > MIN_CROP_KEYPOINT_SCORE or keypoints[0, 0, KEYPOINT_DICT['right_hip'], 2] > MIN_CROP_KEYPOINT_SCORE) and (keypoints[0, 0, KEYPOINT_DICT['left_shoulder'], 2] > MIN_CROP_KEYPOINT_SCORE or keypoints[0, 0, KEYPOINT_DICT['right_shoulder'], 2] > MIN_CROP_KEYPOINT_SCORE)
def determine_torso_and_body_range(keypoints, target_keypoints, center_y, center_x):
"""Calculates the maximum distance from each keypoints to the center location.
The function returns the maximum distances from the two sets of keypoints:
full 17 keypoints and 4 torso keypoints. The returned information will be
used to determine the crop size. See determineCropRegion for more detail.
"""
torso_joints = ['left_shoulder', 'right_shoulder', 'left_hip', 'right_hip']
max_torso_yrange = 0.0
max_torso_xrange = 0.0
for joint in torso_joints:
dist_y = abs(center_y - target_keypoints[joint][0])
dist_x = abs(center_x - target_keypoints[joint][1])
if dist_y > max_torso_yrange:
max_torso_yrange = dist_y
if dist_x > max_torso_xrange:
max_torso_xrange = dist_x
max_body_yrange = 0.0
max_body_xrange = 0.0
for joint in KEYPOINT_DICT.keys():
if keypoints[0, 0, KEYPOINT_DICT[joint], 2] < MIN_CROP_KEYPOINT_SCORE:
continue
dist_y = abs(center_y - target_keypoints[joint][0])
dist_x = abs(center_x - target_keypoints[joint][1])
if dist_y > max_body_yrange:
max_body_yrange = dist_y
if dist_x > max_body_xrange:
max_body_xrange = dist_x
return [max_torso_yrange, max_torso_xrange, max_body_yrange, max_body_xrange]
def determine_crop_region(keypoints, image_height, image_width):
"""Determines the region to crop the image for the model to run inference on.
The algorithm uses the detected joints from the previous frame to estimate
the square region that encloses the full body of the target person and
centers at the midpoint of two hip joints. The crop size is determined by
the distances between each joints and the center point.
When the model is not confident with the four torso joint predictions, the
function returns a default crop which is the full image padded to square.
"""
target_keypoints = {}
for joint in KEYPOINT_DICT.keys():
target_keypoints[joint] = [keypoints[0, 0, KEYPOINT_DICT[joint], 0] * image_height, keypoints[0, 0, KEYPOINT_DICT[joint], 1] * image_width]
if torso_visible(keypoints):
center_y = (target_keypoints['left_hip'][0] + target_keypoints['right_hip'][0]) / 2
center_x = (target_keypoints['left_hip'][1] + target_keypoints['right_hip'][1]) / 2
max_torso_yrange, max_torso_xrange, max_body_yrange, max_body_xrange = determine_torso_and_body_range(keypoints, target_keypoints, center_y, center_x)
crop_length_half = np.amax([max_torso_xrange * 1.9, max_torso_yrange * 1.9, max_body_yrange * 1.2, max_body_xrange * 1.2])
tmp = np.array([center_x, image_width - center_x, center_y, image_height - center_y])
crop_length_half = np.amin([crop_length_half, np.amax(tmp)])
crop_corner = [center_y - crop_length_half, center_x - crop_length_half]
if crop_length_half > max(image_width, image_height) / 2:
return init_crop_region(image_height, image_width)
else:
crop_length = crop_length_half * 2
return {'y_min': crop_corner[0] / image_height, 'x_min': crop_corner[1] / image_width, 'y_max': (crop_corner[0] + crop_length) / image_height, 'x_max': (crop_corner[1] + crop_length) / image_width, 'height': (crop_corner[0] + crop_length) / image_height - crop_corner[0] / image_height, 'width': (crop_corner[1] + crop_length) / image_width - crop_corner[1] / image_width}
else:
return init_crop_region(image_height, image_width)
def crop_and_resize(image, crop_region, crop_size):
"""Crops and resize the image to prepare for the model input."""
boxes = [[crop_region['y_min'], crop_region['x_min'], crop_region['y_max'], crop_region['x_max']]]
output_image = tf.image.crop_and_resize(image, box_indices=[0], boxes=boxes, crop_size=crop_size)
return tf.cast(output_image, tf.int32)
image_path = './ngannou.gif'
raw_image = tf.io.read_file(image_path)
decoded_image = tf.image.decode_gif(raw_image)
image = tf.cast(decoded_image, dtype=tf.int32)
num_frames, image_height, image_width, _ = image.shape
crop_region = init_crop_region(image_height, image_width)
shape = image.shape
print(f'\n - The gif is divided in {shape[0]} frames\n - Image size : ({shape[1]},{shape[2]})\n - Channels : {shape[3]}\n - Data type : {image.dtype}')
|
code
|
88080357/cell_14
|
[
"application_vnd.jupyter.stderr_output_1.png"
] |
import tensorflow_hub as hub
model = hub.load('https://tfhub.dev/google/movenet/multipose/lightning/1')
movenet = model.signatures['serving_default']
|
code
|
88080357/cell_27
|
[
"application_vnd.jupyter.stderr_output_2.png",
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] |
from IPython.display import HTML, display
from matplotlib.collections import LineCollection
import cv2
import imageio
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
KEYPOINT_DICT = {'nose': 0, 'left_eye': 1, 'right_eye': 2, 'left_ear': 3, 'right_ear': 4, 'left_shoulder': 5, 'right_shoulder': 6, 'left_elbow': 7, 'right_elbow': 8, 'left_wrist': 9, 'right_wrist': 10, 'left_hip': 11, 'right_hip': 12, 'left_knee': 13, 'right_knee': 14, 'left_ankle': 15, 'right_ankle': 16}
KEYPOINT_EDGE_INDS_TO_COLOR = {(0, 1): 'm', (0, 2): 'c', (1, 3): 'm', (2, 4): 'c', (0, 5): 'm', (0, 6): 'c', (5, 7): 'm', (7, 9): 'm', (6, 8): 'c', (8, 10): 'c', (5, 6): 'y', (5, 11): 'm', (6, 12): 'c', (11, 12): 'y', (11, 13): 'm', (13, 15): 'm', (12, 14): 'c', (14, 16): 'c'}
def _keypoints_and_edges_for_display(
keypoints_with_scores,
height,
width,
keypoint_threshold=0.11,
):
"""Returns high confidence keypoints and edges for visualization.
Args:
keypoints_with_scores: A numpy array with shape [1, 1, 17, 3] representing
the keypoint coordinates and scores returned from the MoveNet model.
height: height of the image in pixels.
width: width of the image in pixels.
keypoint_threshold: minimum confidence score for a keypoint to be
visualized.
Returns:
A (keypoints_xy, edges_xy, edge_colors) containing:
* the coordinates of all keypoints of all detected entities;
* the coordinates of all skeleton edges of all detected entities;
* the colors in which the edges should be plotted.
"""
keypoints_all = []
keypoint_edges_all = []
edge_colors = []
(num_instances, _, _, _) = keypoints_with_scores.shape
for idx in range(num_instances):
kpts_x = keypoints_with_scores[0, idx, :, 1]
kpts_y = keypoints_with_scores[0, idx, :, 0]
kpts_scores = keypoints_with_scores[0, idx, :, 2]
kpts_absolute_xy = np.stack([width * np.array(kpts_x), height
* np.array(kpts_y)], axis=-1)
kpts_above_thresh_absolute = kpts_absolute_xy[kpts_scores
> keypoint_threshold, :]
keypoints_all.append(kpts_above_thresh_absolute)
for (edge_pair, color) in KEYPOINT_EDGE_INDS_TO_COLOR.items():
if kpts_scores[edge_pair[0]] > keypoint_threshold \
and kpts_scores[edge_pair[1]] > keypoint_threshold:
x_start = kpts_absolute_xy[edge_pair[0], 0]
y_start = kpts_absolute_xy[edge_pair[0], 1]
x_end = kpts_absolute_xy[edge_pair[1], 0]
y_end = kpts_absolute_xy[edge_pair[1], 1]
line_seg = np.array([[x_start, y_start], [x_end,
y_end]])
keypoint_edges_all.append(line_seg)
edge_colors.append(color)
if keypoints_all:
keypoints_xy = np.concatenate(keypoints_all, axis=0)
else:
keypoints_xy = np.zeros((0, 17, 2))
if keypoint_edges_all:
edges_xy = np.stack(keypoint_edges_all, axis=0)
else:
edges_xy = np.zeros((0, 2, 2))
return (keypoints_xy, edges_xy, edge_colors)
def draw_prediction_on_image(
image,
keypoints_with_scores,
crop_region=None,
close_figure=False,
output_image_height=None,
):
"""Draws the keypoint predictions on image.
Args:
image: A numpy array with shape [height, width, channel] representing the
pixel values of the input image.
keypoints_with_scores: A numpy array with shape [1, 1, 17, 3] representing
the keypoint coordinates and scores returned from the MoveNet model.
crop_region: A dictionary that defines the coordinates of the bounding box
of the crop region in normalized coordinates (see the init_crop_region
function below for more detail). If provided, this function will also
draw the bounding box on the image.
output_image_height: An integer indicating the height of the output image.
Note that the image aspect ratio will be the same as the input image.
Returns:
A numpy array with shape [out_height, out_width, channel] representing the
image overlaid with keypoint predictions.
"""
(height, width, channel) = image.shape
aspect_ratio = float(width) / height
(fig, ax) = plt.subplots(figsize=(12 * aspect_ratio, 12))
# To remove the huge white borders
fig.tight_layout(pad=0)
ax.margins(0)
ax.set_yticklabels([])
ax.set_xticklabels([])
plt.axis('off')
im = ax.imshow(image)
line_segments = LineCollection([], linewidths=4, linestyle='solid')
ax.add_collection(line_segments)
# Turn off tick labels
scat = ax.scatter([], [], s=60, color='#FF1493', zorder=3)
(keypoint_locs, keypoint_edges, edge_colors) = \
_keypoints_and_edges_for_display(keypoints_with_scores, height,
width)
line_segments.set_segments(keypoint_edges)
line_segments.set_color(edge_colors)
if keypoint_edges.shape[0]:
line_segments.set_segments(keypoint_edges)
line_segments.set_color(edge_colors)
if keypoint_locs.shape[0]:
scat.set_offsets(keypoint_locs)
if crop_region is not None:
xmin = max(crop_region['x_min'] * width, 0.0)
ymin = max(crop_region['y_min'] * height, 0.0)
rec_width = min(crop_region['x_max'], 0.99) * width - xmin
rec_height = min(crop_region['y_max'], 0.99) * height - ymin
rect = patches.Rectangle(
(xmin, ymin),
rec_width,
rec_height,
linewidth=1,
edgecolor='b',
facecolor='none',
)
ax.add_patch(rect)
fig.canvas.draw()
image_from_plot = np.frombuffer(fig.canvas.tostring_rgb(),
dtype=np.uint8)
image_from_plot = \
image_from_plot.reshape(fig.canvas.get_width_height()[::-1]
+ (3, ))
plt.close(fig)
if output_image_height is not None:
output_image_width = int(output_image_height / height * width)
image_from_plot = cv2.resize(image_from_plot,
dsize=(output_image_width, output_image_height),
interpolation=cv2.INTER_CUBIC)
return image_from_plot
def to_gif(images, fps):
"""Converts image sequence (4D numpy array) to gif."""
imageio.mimsave('./animation.gif', images, fps=fps)
return embed.embed_file('./animation.gif')
def progress(value, max=100):
return HTML("""
<progress
value='{value}'
max='{max}',
style='width: 100%'
>
{value}
</progress>
""".format(value=value,
max=max))
MIN_CROP_KEYPOINT_SCORE = 0.2
def init_crop_region(image_height, image_width):
"""Defines the default crop region.
The function provides the initial crop region (pads the full image from both
sides to make it a square image) when the algorithm cannot reliably determine
the crop region from the previous frame.
"""
if image_width > image_height:
box_height = image_width / image_height
box_width = 1.0
y_min = (image_height / 2 - image_width / 2) / image_height
x_min = 0.0
else:
box_height = 1.0
box_width = image_height / image_width
y_min = 0.0
x_min = (image_width / 2 - image_height / 2) / image_width
return {'y_min': y_min, 'x_min': x_min, 'y_max': y_min + box_height, 'x_max': x_min + box_width, 'height': box_height, 'width': box_width}
def torso_visible(keypoints):
"""Checks whether there are enough torso keypoints.
This function checks whether the model is confident at predicting one of the
shoulders/hips which is required to determine a good crop region.
"""
return (keypoints[0, 0, KEYPOINT_DICT['left_hip'], 2] > MIN_CROP_KEYPOINT_SCORE or keypoints[0, 0, KEYPOINT_DICT['right_hip'], 2] > MIN_CROP_KEYPOINT_SCORE) and (keypoints[0, 0, KEYPOINT_DICT['left_shoulder'], 2] > MIN_CROP_KEYPOINT_SCORE or keypoints[0, 0, KEYPOINT_DICT['right_shoulder'], 2] > MIN_CROP_KEYPOINT_SCORE)
def determine_torso_and_body_range(keypoints, target_keypoints, center_y, center_x):
"""Calculates the maximum distance from each keypoints to the center location.
The function returns the maximum distances from the two sets of keypoints:
full 17 keypoints and 4 torso keypoints. The returned information will be
used to determine the crop size. See determineCropRegion for more detail.
"""
torso_joints = ['left_shoulder', 'right_shoulder', 'left_hip', 'right_hip']
max_torso_yrange = 0.0
max_torso_xrange = 0.0
for joint in torso_joints:
dist_y = abs(center_y - target_keypoints[joint][0])
dist_x = abs(center_x - target_keypoints[joint][1])
if dist_y > max_torso_yrange:
max_torso_yrange = dist_y
if dist_x > max_torso_xrange:
max_torso_xrange = dist_x
max_body_yrange = 0.0
max_body_xrange = 0.0
for joint in KEYPOINT_DICT.keys():
if keypoints[0, 0, KEYPOINT_DICT[joint], 2] < MIN_CROP_KEYPOINT_SCORE:
continue
dist_y = abs(center_y - target_keypoints[joint][0])
dist_x = abs(center_x - target_keypoints[joint][1])
if dist_y > max_body_yrange:
max_body_yrange = dist_y
if dist_x > max_body_xrange:
max_body_xrange = dist_x
return [max_torso_yrange, max_torso_xrange, max_body_yrange, max_body_xrange]
def determine_crop_region(keypoints, image_height, image_width):
"""Determines the region to crop the image for the model to run inference on.
The algorithm uses the detected joints from the previous frame to estimate
the square region that encloses the full body of the target person and
centers at the midpoint of two hip joints. The crop size is determined by
the distances between each joints and the center point.
When the model is not confident with the four torso joint predictions, the
function returns a default crop which is the full image padded to square.
"""
target_keypoints = {}
for joint in KEYPOINT_DICT.keys():
target_keypoints[joint] = [keypoints[0, 0, KEYPOINT_DICT[joint], 0] * image_height, keypoints[0, 0, KEYPOINT_DICT[joint], 1] * image_width]
if torso_visible(keypoints):
center_y = (target_keypoints['left_hip'][0] + target_keypoints['right_hip'][0]) / 2
center_x = (target_keypoints['left_hip'][1] + target_keypoints['right_hip'][1]) / 2
max_torso_yrange, max_torso_xrange, max_body_yrange, max_body_xrange = determine_torso_and_body_range(keypoints, target_keypoints, center_y, center_x)
crop_length_half = np.amax([max_torso_xrange * 1.9, max_torso_yrange * 1.9, max_body_yrange * 1.2, max_body_xrange * 1.2])
tmp = np.array([center_x, image_width - center_x, center_y, image_height - center_y])
crop_length_half = np.amin([crop_length_half, np.amax(tmp)])
crop_corner = [center_y - crop_length_half, center_x - crop_length_half]
if crop_length_half > max(image_width, image_height) / 2:
return init_crop_region(image_height, image_width)
else:
crop_length = crop_length_half * 2
return {'y_min': crop_corner[0] / image_height, 'x_min': crop_corner[1] / image_width, 'y_max': (crop_corner[0] + crop_length) / image_height, 'x_max': (crop_corner[1] + crop_length) / image_width, 'height': (crop_corner[0] + crop_length) / image_height - crop_corner[0] / image_height, 'width': (crop_corner[1] + crop_length) / image_width - crop_corner[1] / image_width}
else:
return init_crop_region(image_height, image_width)
def crop_and_resize(image, crop_region, crop_size):
"""Crops and resize the image to prepare for the model input."""
boxes = [[crop_region['y_min'], crop_region['x_min'], crop_region['y_max'], crop_region['x_max']]]
output_image = tf.image.crop_and_resize(image, box_indices=[0], boxes=boxes, crop_size=crop_size)
return tf.cast(output_image, tf.int32)
model = hub.load('https://tfhub.dev/google/movenet/multipose/lightning/1')
movenet = model.signatures['serving_default']
input_size = 256
image_path = './ngannou.gif'
raw_image = tf.io.read_file(image_path)
decoded_image = tf.image.decode_gif(raw_image)
image = tf.cast(decoded_image, dtype=tf.int32)
num_frames, image_height, image_width, _ = image.shape
crop_region = init_crop_region(image_height, image_width)
shape = image.shape
def run_inference(movenet, image, crop_region, crop_size):
"""Runs model inference on the cropped region.
The function runs the model inference on the cropped region and updates the
model output to the original image coordinate system.
"""
image_height, image_width, _ = image.shape
input_image = crop_and_resize(tf.expand_dims(image, axis=0), crop_region, crop_size=crop_size)
output = movenet(input_image)['output_0']
keypoints_with_scores = output.numpy()
max_instances = 6
for idx in range(17):
keypoints_with_scores[0, 0, idx, 0] = (crop_region['y_min'] * image_height + crop_region['height'] * image_height * keypoints_with_scores[0, 0, idx, 0]) / image_height
keypoints_with_scores[0, 0, idx, 1] = (crop_region['x_min'] * image_width + crop_region['width'] * image_width * keypoints_with_scores[0, 0, idx, 1]) / image_width
return keypoints_with_scores
output_images = []
bar = display(progress(0, num_frames - 1), display_id=True)
for frame_idx in range(num_frames):
keypoints_with_scores = run_inference(movenet, image[frame_idx, :, :, :], crop_region, crop_size=[input_size, input_size])
output_images.append(draw_prediction_on_image(image[frame_idx, :, :, :].numpy().astype(np.int32), keypoints_with_scores, crop_region=None, close_figure=True, output_image_height=300))
crop_region = determine_crop_region(keypoints_with_scores, image_height, image_width)
bar.update(progress(frame_idx, num_frames - 1))
|
code
|
17141241/cell_21
|
[
"text_html_output_1.png"
] |
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().sum()
test.isnull().sum()
train['Age'] = train['Age'].fillna(train['Age'].median())
test['Age'] = test['Age'].fillna(test['Age'].median())
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
train['Embarked'] = train['Embarked'].fillna('S')
test['Fare'] = test['Fare'].fillna(test['Fare'].median())
train.drop('Name', axis=1, inplace=True)
test.drop('Name', axis=1, inplace=True)
train.drop('Ticket', axis=1, inplace=True)
test.drop('Ticket', axis=1, inplace=True)
train = train.join(pd.get_dummies(train['Sex'], prefix='sex'))
test = test.join(pd.get_dummies(test['Sex'], prefix='sex'))
train = train.join(pd.get_dummies(train['Embarked'], prefix='emberk'))
test = test.join(pd.get_dummies(test['Embarked'], prefix='emberk'))
train.drop(['Sex', 'Embarked'], axis=1, inplace=True)
test.drop(['Sex', 'Embarked'], axis=1, inplace=True)
test.head()
|
code
|
17141241/cell_13
|
[
"text_html_output_1.png"
] |
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().sum()
train['Ticket'].value_counts()
|
code
|
17141241/cell_9
|
[
"text_plain_output_1.png"
] |
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().sum()
train.head()
|
code
|
17141241/cell_25
|
[
"text_html_output_1.png"
] |
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().sum()
test.isnull().sum()
train['Age'] = train['Age'].fillna(train['Age'].median())
test['Age'] = test['Age'].fillna(test['Age'].median())
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
train['Embarked'] = train['Embarked'].fillna('S')
test['Fare'] = test['Fare'].fillna(test['Fare'].median())
train.drop('Name', axis=1, inplace=True)
test.drop('Name', axis=1, inplace=True)
train.drop('Ticket', axis=1, inplace=True)
test.drop('Ticket', axis=1, inplace=True)
train = train.join(pd.get_dummies(train['Sex'], prefix='sex'))
test = test.join(pd.get_dummies(test['Sex'], prefix='sex'))
train = train.join(pd.get_dummies(train['Embarked'], prefix='emberk'))
test = test.join(pd.get_dummies(test['Embarked'], prefix='emberk'))
train.drop(['Sex', 'Embarked'], axis=1, inplace=True)
test.drop(['Sex', 'Embarked'], axis=1, inplace=True)
X_train = train.drop('Survived', axis=1)
y_train = train['Survived'].values
y_train
|
code
|
17141241/cell_20
|
[
"text_plain_output_1.png"
] |
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().sum()
test.isnull().sum()
train['Age'] = train['Age'].fillna(train['Age'].median())
test['Age'] = test['Age'].fillna(test['Age'].median())
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
train['Embarked'] = train['Embarked'].fillna('S')
test['Fare'] = test['Fare'].fillna(test['Fare'].median())
train.drop('Name', axis=1, inplace=True)
test.drop('Name', axis=1, inplace=True)
train.drop('Ticket', axis=1, inplace=True)
test.drop('Ticket', axis=1, inplace=True)
train = train.join(pd.get_dummies(train['Sex'], prefix='sex'))
test = test.join(pd.get_dummies(test['Sex'], prefix='sex'))
train = train.join(pd.get_dummies(train['Embarked'], prefix='emberk'))
test = test.join(pd.get_dummies(test['Embarked'], prefix='emberk'))
train.drop(['Sex', 'Embarked'], axis=1, inplace=True)
test.drop(['Sex', 'Embarked'], axis=1, inplace=True)
train.head()
|
code
|
17141241/cell_6
|
[
"text_html_output_1.png"
] |
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.info()
|
code
|
17141241/cell_11
|
[
"text_plain_output_1.png"
] |
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().sum()
train['Cabin'].value_counts()
|
code
|
17141241/cell_7
|
[
"text_html_output_1.png"
] |
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().sum()
|
code
|
17141241/cell_18
|
[
"text_plain_output_1.png"
] |
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().sum()
test.isnull().sum()
train['Age'] = train['Age'].fillna(train['Age'].median())
test['Age'] = test['Age'].fillna(test['Age'].median())
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
train['Embarked'] = train['Embarked'].fillna('S')
test['Fare'] = test['Fare'].fillna(test['Fare'].median())
train.drop('Name', axis=1, inplace=True)
test.drop('Name', axis=1, inplace=True)
train.drop('Ticket', axis=1, inplace=True)
test.drop('Ticket', axis=1, inplace=True)
train = train.join(pd.get_dummies(train['Sex'], prefix='sex'))
test = test.join(pd.get_dummies(test['Sex'], prefix='sex'))
train = train.join(pd.get_dummies(train['Embarked'], prefix='emberk'))
test = test.join(pd.get_dummies(test['Embarked'], prefix='emberk'))
train.head()
|
code
|
17141241/cell_8
|
[
"text_html_output_1.png"
] |
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
test.isnull().sum()
|
code
|
17141241/cell_15
|
[
"text_html_output_1.png"
] |
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().sum()
test.isnull().sum()
train['Age'] = train['Age'].fillna(train['Age'].median())
test['Age'] = test['Age'].fillna(test['Age'].median())
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
train['Embarked'] = train['Embarked'].fillna('S')
test['Fare'] = test['Fare'].fillna(test['Fare'].median())
train.head()
|
code
|
17141241/cell_16
|
[
"text_plain_output_1.png"
] |
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().sum()
test.isnull().sum()
train['Age'] = train['Age'].fillna(train['Age'].median())
test['Age'] = test['Age'].fillna(test['Age'].median())
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
train['Embarked'] = train['Embarked'].fillna('S')
test['Fare'] = test['Fare'].fillna(test['Fare'].median())
print(f"[train]Nmaeのユニークな要素: {train['Name'].nunique()}")
print(f"[test]Nmaeのユニークな要素: {test['Name'].nunique()}")
print(f"[train]Sexのユニークな要素: {train['Sex'].nunique()}")
print(f"[test]Sexのユニークな要素: {test['Sex'].nunique()}")
print(f"[train]Ticketのユニークな要素: {train['Ticket'].nunique()}")
print(f"[test]Ticketのユニークな要素: {test['Ticket'].nunique()}")
print(f"[train]Embarkedのユニークな要素: {train['Embarked'].nunique()}")
print(f"[test]Embarkedのユニークな要素: {test['Embarked'].nunique()}")
|
code
|
17141241/cell_24
|
[
"text_plain_output_1.png"
] |
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().sum()
test.isnull().sum()
train['Age'] = train['Age'].fillna(train['Age'].median())
test['Age'] = test['Age'].fillna(test['Age'].median())
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin', axis=1, inplace=True)
train['Embarked'] = train['Embarked'].fillna('S')
test['Fare'] = test['Fare'].fillna(test['Fare'].median())
train.drop('Name', axis=1, inplace=True)
test.drop('Name', axis=1, inplace=True)
train.drop('Ticket', axis=1, inplace=True)
test.drop('Ticket', axis=1, inplace=True)
train = train.join(pd.get_dummies(train['Sex'], prefix='sex'))
test = test.join(pd.get_dummies(test['Sex'], prefix='sex'))
train = train.join(pd.get_dummies(train['Embarked'], prefix='emberk'))
test = test.join(pd.get_dummies(test['Embarked'], prefix='emberk'))
train.drop(['Sex', 'Embarked'], axis=1, inplace=True)
test.drop(['Sex', 'Embarked'], axis=1, inplace=True)
X_train = train.drop('Survived', axis=1)
y_train = train['Survived'].values
X_train.head()
|
code
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.