path stringlengths 13 17 | screenshot_names listlengths 1 873 | code stringlengths 0 40.4k | cell_type stringclasses 1
value |
|---|---|---|---|
72089413/cell_30 | [
"text_plain_output_1.png"
] | from pandas.io.json import json_normalize
from tqdm import tqdm
from tqdm.notebook import tqdm
import json_lines
import pandas as pd
import random
data0 = []
with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f:
for i, item in enumerate(json_lines.reader(f)):
... | code |
72089413/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import json_lines
data0 = []
with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f:
for i, item in enumerate(json_lines.reader(f)):
if i < 10000:
data0 += [item]
data0[0][0] | code |
72089413/cell_26 | [
"text_plain_output_1.png"
] | from pandas.io.json import json_normalize
from tqdm.notebook import tqdm
import json_lines
import pandas as pd
import random
data0 = []
with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f:
for i, item in enumerate(json_lines.reader(f)):
if i < 10000:
... | code |
72089413/cell_11 | [
"text_plain_output_1.png"
] | from pandas.io.json import json_normalize
from tqdm.notebook import tqdm
import json_lines
import pandas as pd
data0 = []
with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f:
for i, item in enumerate(json_lines.reader(f)):
if i < 10000:
data0 += ... | code |
72089413/cell_19 | [
"text_html_output_1.png"
] | from pandas.io.json import json_normalize
from tqdm.notebook import tqdm
import json_lines
import pandas as pd
data0 = []
with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f:
for i, item in enumerate(json_lines.reader(f)):
if i < 10000:
data0 += ... | code |
72089413/cell_1 | [
"text_plain_output_1.png"
] | !pip install json_lines | code |
72089413/cell_7 | [
"text_plain_output_1.png"
] | import json_lines
data0 = []
with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f:
for i, item in enumerate(json_lines.reader(f)):
if i < 10000:
data0 += [item]
data0[0][0].keys() | code |
72089413/cell_18 | [
"text_plain_output_1.png"
] | from pandas.io.json import json_normalize
from tqdm.notebook import tqdm
import json_lines
import pandas as pd
data0 = []
with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f:
for i, item in enumerate(json_lines.reader(f)):
if i < 10000:
data0 += ... | code |
72089413/cell_32 | [
"text_plain_output_1.png"
] | from sklearn.metrics import mean_squared_error
import lightgbm as lgbm
import numpy as np
import lightgbm as lgbm
from sklearn.metrics import mean_squared_error
def fit_lgbm(X, y, cv, params: dict=None, verbose: int=50):
if params is None:
params = {}
models = []
oof_pred = np.zeros_like(y, dtype... | code |
72089413/cell_15 | [
"text_html_output_1.png"
] | from pandas.io.json import json_normalize
from tqdm.notebook import tqdm
import json_lines
import pandas as pd
data0 = []
with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f:
for i, item in enumerate(json_lines.reader(f)):
if i < 10000:
data0 += ... | code |
72089413/cell_3 | [
"text_html_output_1.png"
] | import tensorflow as tf
import tensorflow as tf
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print('Device:', tpu.master())
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
except:
... | code |
72089413/cell_35 | [
"application_vnd.jupyter.stderr_output_3.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from pandas.io.json import json_normalize
from tqdm import tqdm
from tqdm.notebook import tqdm
import json_lines
import pandas as pd
import random
data0 = []
with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f:
for i, item in enumerate(json_lines.reader(f)):
... | code |
72089413/cell_14 | [
"text_plain_output_1.png"
] | from pandas.io.json import json_normalize
from tqdm.notebook import tqdm
import json_lines
import pandas as pd
data0 = []
with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f:
for i, item in enumerate(json_lines.reader(f)):
if i < 10000:
data0 += ... | code |
72089413/cell_22 | [
"text_html_output_1.png"
] | from pandas.io.json import json_normalize
from tqdm.notebook import tqdm
import json_lines
import pandas as pd
data0 = []
with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f:
for i, item in enumerate(json_lines.reader(f)):
if i < 10000:
data0 += ... | code |
72089413/cell_10 | [
"text_plain_output_1.png"
] | from pandas.io.json import json_normalize
import json_lines
data0 = []
with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f:
for i, item in enumerate(json_lines.reader(f)):
if i < 10000:
data0 += [item]
users0 = json_normalize(data0[0][0])
users0 | code |
72089413/cell_37 | [
"text_plain_output_1.png"
] | from pandas.io.json import json_normalize
from tqdm import tqdm
from tqdm.notebook import tqdm
import json_lines
import pandas as pd
data0 = []
with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f:
for i, item in enumerate(json_lines.reader(f)):
if i < 10000... | code |
72089413/cell_12 | [
"text_plain_output_1.png"
] | from pandas.io.json import json_normalize
from tqdm.notebook import tqdm
import json_lines
import pandas as pd
data0 = []
with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f:
for i, item in enumerate(json_lines.reader(f)):
if i < 10000:
data0 += ... | code |
2015167/cell_13 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import hamming_loss
import pandas as pd
import numpy as np
import pandas as pd
df = pd.read_csv('../input/seattleWeather_1948-2017.csv')
df = df.dropna()
X = df.loc[:, ['PRCP', 'TMAX', 'TMIN']].shift(-1).iloc[:-1].values
y = df.iloc[:-1, -1:].... | code |
2015167/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
df = pd.read_csv('../input/seattleWeather_1948-2017.csv')
df.head() | code |
2015167/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import fbeta_score
import pandas as pd
import numpy as np
import pandas as pd
df = pd.read_csv('../input/seattleWeather_1948-2017.csv')
df = df.dropna()
X = df.loc[:, ['PRCP', 'TMAX', 'TMIN']].shift(-1).iloc[:-1].values
y = df.iloc[:-1, -1:].v... | code |
2015167/cell_8 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
import pandas as pd
import numpy as np
import pandas as pd
df = pd.read_csv('../input/seattleWeather_1948-2017.csv')
df = df.dropna()
X = df.loc[:, ['PRCP', 'TMAX', 'TMIN']].shift(-1).iloc[:-1].values
y = df.iloc[:-1, -... | code |
2015167/cell_15 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_score, recall_score, precision_recall_curve
import pandas as pd
import numpy as np
import pandas as pd
df = pd.read_csv('../input/seattleWeather_1948-2017.csv')
df = df.dropna()
X = df.loc[:, ['PRCP', 'TMAX', 'TMIN']].shift(-1... | code |
2015167/cell_3 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
import pandas as pd
import numpy as np
import pandas as pd
df = pd.read_csv('../input/seattleWeather_1948-2017.csv')
df = df.dropna()
X = df.loc[:, ['PRCP', 'TMAX', 'TMIN']].shift(-1).iloc[:-1].values
y = df.iloc[:-1, -1:].values.astype('int')
from sklearn.linear_m... | code |
2015167/cell_17 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_score, recall_score, precision_recall_curve
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import pandas as pd
df = pd.read_csv('../input/seattleWeather_1948-2017.csv')
df = df.dropna()
X = df.loc[:, [... | code |
2015167/cell_10 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
df = pd.read_csv('../input/seattleWeather_1948-2017.csv')
df = df.dropna()
X = df.loc[:, ['PRCP', 'TMAX', 'TMIN']].shift(-1).iloc[:-1].va... | code |
2015167/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
import pandas as pd
import numpy as np
import pandas as pd
df = pd.read_csv('../input/seattleWeather_1948-2017.csv')
df = df.dropna()
X = df.loc[:, ['PRCP', 'TMAX', 'TMIN']].shift(-1).iloc[:-1].values
y = df.iloc[:-1, -1:... | code |
17144473/cell_9 | [
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from bokeh.io import output_file,show,output_notebook,push_notebook
from bokeh.models import ColumnDataSource,HoverTool,CategoricalColorMapper
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/scmp2k19.csv')
df.loc[:, ['district', 'mandal', 'location']].sample(7, rand... | code |
17144473/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/scmp2k19.csv')
df.info() | code |
17144473/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from bokeh.io import output_file,show,output_notebook,push_notebook
from bokeh.layouts import row,column,gridplot,widgetbox
p1 = figure()
p1.circle(x='district', y='Rangareddy', source=source, color='red')
p2 = figure()
p2.circle(x='district', y='Warangal', source=source, color='black')
p3 = figure()
p3.circle(x='dis... | code |
17144473/cell_1 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from bokeh.io import output_file,show,output_notebook,push_notebook
import os
import numpy as np
import pandas as pd
import seaborn as sns
from ipywidgets import interact
from bokeh.io import output_file, show, output_notebook, push_notebook
from bokeh.plotting import *
from bokeh.models import ColumnDataSource, Hove... | code |
17144473/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from bokeh.io import output_file,show,output_notebook,push_notebook
from bokeh.layouts import row,column,gridplot,widgetbox
# Row and column
p1 = figure()
p1.circle(x = "district",y= "Rangareddy",source = source,color="red")
p2 = figure()
p2.circle(x = "district",y= "Warangal",source = source,color="black")
p3 = figu... | code |
17144473/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/scmp2k19.csv')
df.loc[:, ['district', 'mandal', 'location']].sample(7, random_state=1) | code |
18124991/cell_4 | [
"text_plain_output_1.png"
] | from torch import nn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import torchvision
from torch import nn
from fastai.vision import *
import torchvision
df = pd.read_csv('../input/train.csv')
path = '../input'
device = torch.device('cuda:0' if torch.cuda... | code |
18124991/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from torch import nn
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import torchvision
from torch import nn
from fastai.vision import *
import torchvision
df = pd.read_csv('../input/train.csv')
path = '../input'
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model = t... | code |
18124991/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
18124991/cell_3 | [
"text_html_output_4.png",
"text_plain_output_4.png",
"text_html_output_2.png",
"text_plain_output_3.png",
"text_html_output_1.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"text_html_output_3.png"
] | from torch import nn
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import torchvision
from torch import nn
from fastai.vision import *
import torchvision
df = pd.read_csv('../input/train.csv')
path = '../input'
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model = t... | code |
333270/cell_9 | [
"text_plain_output_1.png"
] | from sklearn.cross_validation import train_test_split
import numpy as np
import xgboost as xgb
ids = test['id']
test = test.drop(['id'], axis=1)
y = train['Demanda_uni_equil']
X = train[test.columns.values]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1729)
test_preds = np.z... | code |
333270/cell_6 | [
"text_plain_output_84.png",
"text_plain_output_56.png",
"text_plain_output_35.png",
"text_plain_output_43.png",
"text_plain_output_78.png",
"text_plain_output_37.png",
"text_plain_output_90.png",
"text_plain_output_79.png",
"text_plain_output_5.png",
"text_plain_output_75.png",
"text_plain_outpu... | nrows = 5000000
dtype = {'Semana': np.uint8, 'Agencia_ID': np.uint16, 'Canal_ID': np.uint8, 'Ruta_SAK': np.uint16, 'Cliente_ID': np.uint32, 'Producto_ID': np.uint16, 'Demanda_uni_equil': np.uint16}
train_filename = '../input/train.csv'
print('Loading Train... nrows : {0}'.format(nrows))
train.head() | code |
333270/cell_11 | [
"text_html_output_1.png"
] | from sklearn.cross_validation import train_test_split
import math
import numpy as np
import pandas as pd
import xgboost as xgb
def evalerror(preds, dtrain):
labels = dtrain.get_label()
assert len(preds) == len(labels)
labels = labels.tolist()
preds = preds.tolist()
terms_to_sum = [(math.log(lab... | code |
333270/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.cross_validation import train_test_split
print('Training_Shape:', train.shape)
ids = test['id']
test = test.drop(['id'], axis=1)
y = train['Demanda_uni_equil']
X = train[test.columns.values]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1729)
print('Division_Set_Sha... | code |
333270/cell_16 | [
"text_plain_output_5.png",
"text_plain_output_4.png",
"text_plain_output_6.png",
"text_plain_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from ml_metrics import rmsle
from sklearn.cross_validation import train_test_split
import math
import numpy as np
import pandas as pd
import xgboost as xgb
def evalerror(preds, dtrain):
labels = dtrain.get_label()
assert len(preds) == len(labels)
labels = labels.tolist()
preds = preds.tolist()
... | code |
333270/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | num_rounds = 100 | code |
333270/cell_12 | [
"text_html_output_1.png"
] | from sklearn.cross_validation import train_test_split
import math
import numpy as np
import pandas as pd
import xgboost as xgb
def evalerror(preds, dtrain):
labels = dtrain.get_label()
assert len(preds) == len(labels)
labels = labels.tolist()
preds = preds.tolist()
terms_to_sum = [(math.log(lab... | code |
333270/cell_5 | [
"text_plain_output_5.png",
"text_plain_output_15.png",
"text_plain_output_9.png",
"text_plain_output_20.png",
"text_plain_output_4.png",
"text_plain_output_13.png",
"text_plain_output_14.png",
"text_plain_output_27.png",
"text_plain_output_10.png",
"text_plain_output_6.png",
"text_plain_output_2... | print('Loading Test...')
dtype_test = {'id': np.uint16, 'Semana': np.uint8, 'Agencia_ID': np.uint16, 'Canal_ID': np.uint8, 'Ruta_SAK': np.uint16, 'Cliente_ID': np.uint32, 'Producto_ID': np.uint16}
test.head() | code |
73069993/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
from sklearn.model_selection import train_test_split
X_train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv')
X_test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv')
X_test.isnull().sum() | code |
73069993/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
from sklearn.model_selection import train_test_split
X_train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv')
X_test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv')
X_train.head() | code |
73069993/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
73069993/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
from sklearn.model_selection import train_test_split
X_train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv')
X_test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv')
X_train.isnull().sum()
y = X... | code |
73069993/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
from sklearn.model_selection import train_test_split
X_train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv')
X_test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv')
X_train.isnull().sum() | code |
1008301/cell_4 | [
"text_plain_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = pd.read_csv(base + 'Labels.csv', usecols=['Label', 'FileName'])
labels['IsBlue'] = labels.Label.str.contains('blue')
labels['Num'] = labels.Label.str.split(' ').str[1].astype(int)
files = [i for i in sorted(os.listdir(base)) if... | code |
1008301/cell_2 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import os
import numpy as np
import pandas as pd
import seaborn as sns
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8'))
base = '../input/MultiSpectralImages/' | code |
1008301/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = pd.read_csv(base + 'Labels.csv', usecols=['Label', 'FileName'])
labels['IsBlue'] = labels.Label.str.contains('blue')
labels['Num'] = labels.Label.str.split(' ').str[1].astype(int)
labels.head() | code |
1008301/cell_5 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
labels = pd.read_csv(base + 'Labels.csv', usecols=['Label', 'FileName'])
labels['IsBlue'] = labels.Label.str.contains('blue')
labels['Num'] = labels.Label.str.split(' ').str[1].astype(int)
files = [i for i in sorted(os.listdir(base)) if... | code |
73093411/cell_21 | [
"text_plain_output_1.png"
] | from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0)
data.shape
data = data... | code |
73093411/cell_13 | [
"text_plain_output_1.png"
] | print(X_train.shape, y_train.shape)
print(X_val.shape, y_val.shape) | code |
73093411/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0)
data.shape
data = data[~data['Review Text'].isnull()]
data.shape
data['Recommended IND'] | code |
73093411/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0)
data.shape
data = data[~data['Review Text'].isnull()]
data.shape | code |
73093411/cell_20 | [
"text_plain_output_1.png"
] | from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0)
data.shape
data = data... | code |
73093411/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0)
data.shape
data = data[~data['Review Text'].isnull()]
data.shape
data['Review Text'].str.split().apply(lambda x: len(x)).describ... | code |
73093411/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0)
data.head() | code |
73093411/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0)
data.shape
data = data[~data['Review Text'].isnull()]
data.shape
X = data['Review Text'].values
X | code |
73093411/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
73093411/cell_18 | [
"text_plain_output_1.png"
] | from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0)
data.shape
data = data... | code |
73093411/cell_8 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import tensorflow as tf
tf.__version__ | code |
73093411/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0)
data.shape | code |
73093411/cell_17 | [
"text_plain_output_1.png"
] | from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0)
data.shape
data = data... | code |
73093411/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0)
data.shape
data = data[~data['Review Text'].isnull()]
data.shape
tf.__version__
labels = tf.keras.util... | code |
73093411/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0)
data.shape
data = data[~data['Review Text'].isnull()]
data.shape
data['Recommended IND'].isnull().sum() | code |
130022433/cell_9 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
from scipy import stats
from scipy.stats import skew, boxcox_normmax, norm
import matplotlib.gridspec as gridspec
from matplot... | code |
130022433/cell_6 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
from scipy import stats
from scipy.stats import skew, boxcox_normmax, norm
import matplotlib.gridspec as gridspec
from matplot... | code |
130022433/cell_2 | [
"text_plain_output_1.png"
] | !pip install --upgrade scikit-learn
# Did this to use latest regressors from sklearn... | code |
130022433/cell_7 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
from scipy import stats
from scipy.stats import skew, boxcox_normmax, norm
import matplotlib.gridspec as gridspec
from matplot... | code |
130022433/cell_8 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
from scipy import stats
from scipy.stats import skew, boxcox_normmax, norm
import matplotlib.gridspec as gridspec
from matplot... | code |
130022433/cell_5 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
from scipy import stats
from scipy.stats import skew, boxcox_normmax, norm
import matplotlib.gridspec as gridspec
from matplot... | code |
72071082/cell_21 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/automobile-dataset/Automobile_data.csv')
data.isnull().sum()
data['price'] = data['price'].replace('?', np.NaN)
data['n... | code |
72071082/cell_23 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/automobile-dataset/Automobile_data.csv')
data.isnull().sum()
data['price'] = data['price'].replace('?', np.NaN)
data['n... | code |
72071082/cell_20 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/automobile-dataset/Automobile_data.csv')
data.isnull().sum()
data['price'] = data['price'].replace('?', np.NaN)
data['normalized-losses'] = data['normalized-losses'].replace('... | code |
72071082/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/automobile-dataset/Automobile_data.csv')
data.head() | code |
72071082/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
import os
for dirname, _, fil... | code |
72071082/cell_28 | [
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/automobile-dataset/Automobile_data.csv')
data.isnull().sum()
data['price'] = data['price'].replace('?', np.NaN)
data['normalized-losses'] = data['normalized-losses'].replace('... | code |
72071082/cell_8 | [
"image_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/automobile-dataset/Automobile_data.csv')
data.isnull().sum()
data['price'] = data['price'].replace('?', np.NaN)
data['normalized-losses'] = data['normalized-losses'].replace('... | code |
72071082/cell_3 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/automobile-dataset/Automobile_data.csv')
data.isnull().sum() | code |
72071082/cell_24 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('/kaggle/input/automobile-dataset/Automobile_data.csv')
data.isnull().sum()
data['price'] = data['price'].replace('?', np.NaN)
data['n... | code |
72071082/cell_14 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/automobile-dataset/Automobile_data.csv')
data.isnull().sum()
data['price'] = data['price'].replace('?', np.NaN)
data['normalized-losses'] = data['normalized-losses'].replace('... | code |
72071082/cell_10 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/automobile-dataset/Automobile_data.csv')
data.isnull().sum()
data['price'] = data['price'].replace('?', np.NaN)
data['normalized-losses'] = data['normalized-losses'].replace('... | code |
72071082/cell_27 | [
"image_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/automobile-dataset/Automobile_data.csv')
data.isnull().sum()
data['price'] = data['price'].replace('?', np.NaN)
data['normalized-losses'] = data['normalized-losses'].replace('... | code |
72071082/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/automobile-dataset/Automobile_data.csv')
data.isnull().sum()
print('Number of ? in columns are')
for col in data.columns:
if len(data[data[col] == '?']) > 0:
print(col, 'has ->', len(data[data[col] ==... | code |
32068084/cell_42 | [
"text_plain_output_1.png"
] | from lightgbm import LGBMClassifier
from sklearn import decomposition
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestCl... | code |
32068084/cell_21 | [
"text_plain_output_1.png"
] | from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
train_df_final = pd.read_csv('../input/pumpitup-challenge-dataset/train_df_final.csv')
X_test_final = pd.read_csv('../input/pumpitup-challenge-dataset/X_test_final.csv')
X_test_final.shap... | code |
32068084/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df_final = pd.read_csv('../input/pumpitup-challenge-dataset/train_df_final.csv')
X_test_final = pd.read_csv('../input/pumpitup-challenge-dataset/X_test_final.csv')
train_df_final.shape
X = train_df_final.drop('label', axis=1)
y = train_df_final['label']
X.isnull().values.any() | code |
32068084/cell_25 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
train_df_final = pd.read_csv('../input/pumpitup-challenge-dataset/train_df_fin... | code |
32068084/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df_final = pd.read_csv('../input/pumpitup-challenge-dataset/train_df_final.csv')
X_test_final = pd.read_csv('../input/pumpitup-challenge-dataset/X_test_final.csv')
X_test_final.shape | code |
32068084/cell_23 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
train_df_final = pd.read_csv('../input/pumpitup-challenge-dataset/train_df_final.csv')
X_test_final = pd.read_csv('../input/pumpitup-ch... | code |
32068084/cell_30 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from lightgbm import LGBMClassifier
from sklearn import decomposition
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.tree import De... | code |
32068084/cell_33 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from lightgbm import LGBMClassifier
from sklearn import decomposition
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_scor... | code |
32068084/cell_20 | [
"text_plain_output_1.png"
] | from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
train_df_final = pd.read_csv('../input/pumpitup-challenge-dataset/train_df_final.csv')
X_test_final = pd.read_csv('../input/pumpitup-challenge-dataset/X_test_final.csv')
X_test_final.shap... | code |
32068084/cell_40 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from lightgbm import LGBMClassifier
from sklearn import decomposition
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestCl... | code |
32068084/cell_39 | [
"text_plain_output_1.png"
] | from lightgbm import LGBMClassifier
from sklearn import decomposition
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestCl... | code |
32068084/cell_41 | [
"text_html_output_1.png"
] | from lightgbm import LGBMClassifier
from sklearn import decomposition
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestCl... | code |
32068084/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler as ss
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import R... | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.