path stringlengths 13 17 | screenshot_names listlengths 1 873 | code stringlengths 0 40.4k | cell_type stringclasses 1
value |
|---|---|---|---|
18159957/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
labels_house = ['yes', 'no', 'unknown']
sizes_house = [2175, 1839, 105]
colors_house = ['#ff6666', '#ffcc99', '#ffb3e6']
labels_loan = ['yes', 'no', 'unknown']
sizes_loan = [665, 3349, 105]
colors_loan = ['#c2c2f0', '#ffb3e6', '#66b3ff']
labels_contact = ['cellular', 'telephone']
sizes_... | code |
16122877/cell_21 | [
"text_html_output_1.png"
] | from nltk.corpus import stopwords
from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import TfidfVectorizer
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd
df = pd.read_csv('../input/taylor_swift_lyrics.csv', encoding='latin-1')
pd.options.display... | code |
16122877/cell_4 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd
df = pd.read_csv('../input/taylor_swift_lyrics.csv', encoding='latin-1')
songs = df.groupby('track_title').agg({'lyric': lambda x: ' '.join(x), 'year': 'mean'}).reset_index()
len(songs) | code |
16122877/cell_20 | [
"text_html_output_1.png"
] | from nltk.corpus import stopwords
from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
import pandas as pd
df = pd.read_csv('../input/taylor_swift_lyrics.csv', encoding='latin-1')
pd.options.display.max_colwidth = 5000
songs = df.groupby('track_titl... | code |
16122877/cell_19 | [
"text_html_output_1.png"
] | from nltk.corpus import stopwords
from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
import pandas as pd
df = pd.read_csv('../input/taylor_swift_lyrics.csv', encoding='latin-1')
pd.options.display.max_colwidth = 5000
songs = df.groupby('track_titl... | code |
16122877/cell_1 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
df = pd.read_csv('../input/taylor_swift_lyrics.csv', encoding='latin-1')
df.head() | code |
16122877/cell_16 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
import pandas as pd
df = pd.read_csv('../input/taylor_swift_lyrics.csv', encoding='latin-1')
pd.options.display.max_colwidth = 5000
songs = df.groupby('track_titl... | code |
16122877/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
df = pd.read_csv('../input/taylor_swift_lyrics.csv', encoding='latin-1')
songs = df.groupby('track_title').agg({'lyric': lambda x: ' '.join(x), 'year': 'mean'}).reset_index()
songs.head() | code |
16122877/cell_17 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
import pandas as pd
df = pd.read_csv('../input/taylor_swift_lyrics.csv', encoding='latin-1')
pd.options.display.max_colwidth = 5000
songs = df.groupby('track_titl... | code |
16122877/cell_14 | [
"text_html_output_1.png"
] | from nltk.corpus import stopwords
from sklearn.decomposition import NMF
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
import pandas as pd
df = pd.read_csv('../input/taylor_swift_lyrics.csv', encoding='latin-1')
songs = df.groupby('track_title').agg({'lyric': lambda x: ' '.join(x),... | code |
327861/cell_15 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
train_data = pd.read_csv('../input/train.csv')
test_data = pd.read_csv('../input/test.csv')
f, axarr = plt.subplots(10, 10)
for row in range(10):
for column in range(10):
entry = train_data[train_data['label']==column].iloc[row]... | code |
327861/cell_3 | [
"image_output_1.png"
] | import pandas as pd
train_data = pd.read_csv('../input/train.csv')
test_data = pd.read_csv('../input/test.csv')
print(train_data.shape)
print(test_data.shape) | code |
327861/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
train_data = pd.read_csv('../input/train.csv')
test_data = pd.read_csv('../input/test.csv')
f, axarr = plt.subplots(10, 10)
for row in range(10):
for column in range(10):
entry = train_data[train_data['label'] == column].iloc[row].drop('label').as_matri... | code |
33095970/cell_9 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
import warnings
papers = pd.read_csv('/kaggle/input/nips-papers/papers.csv')
papers_... | code |
33095970/cell_2 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
papers = pd.read_csv('/kaggle/input/nips-papers/papers.csv')
print(type(papers)) | code |
33095970/cell_11 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import CountVectorizer
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.... | code |
33095970/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import nltk
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
33095970/cell_7 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
papers = pd.read_csv('/kaggle/input/nips-papers/papers.csv')
papers_2010 = papers.loc... | code |
33095970/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
papers = pd.read_csv('/kaggle/input/nips-papers/papers.csv')
papers_2010 = papers.loc... | code |
33095970/cell_3 | [
"text_plain_output_1.png"
] | groups = papers.groupby('year')
counts = groups.size()
import matplotlib.pyplot
counts.plot() | code |
33095970/cell_10 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import CountVectorizer
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.... | code |
122261632/cell_63 | [
"text_plain_output_1.png"
] | print('Shape of X_test', X_test.shape) | code |
122261632/cell_57 | [
"text_plain_output_1.png"
] | from imblearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
import numpy as np
cat_pipe = Pipeline([('imputer', SimpleImputer(strategy='most_frequent', missing_values=np.nan)), ('encoder', OneHotEnc... | code |
122261632/cell_33 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000)
train_label_df = pd.read_csv('../input/amex-default-prediction/train_labels.csv')
test_df = pd.read_csv('../input/amex-default-prediction/test_data.csv', nrows=100000, index_col='customer_ID')
train_df... | code |
122261632/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000)
train_label_df = pd.read_csv('../input/amex-default-prediction/train_labels.csv')
print('Shape of dataset is:', train_label_df.shape)
train_label_df.info() | code |
122261632/cell_55 | [
"text_plain_output_1.png"
] | from imblearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
import numpy as np
cat_pipe = Pipeline([('imputer', SimpleImputer(strategy='most_frequent', missing_values=np.nan)), ('encoder', OneHotEnc... | code |
122261632/cell_6 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble impo... | code |
122261632/cell_39 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000)
train_label_df = pd.read_csv('../input/amex-default-prediction/train_labels.csv')
test_df = pd.read_csv('../input/amex-default-prediction/test_data.csv', nrows=100000, index_col='customer_ID')
test_df.... | code |
122261632/cell_65 | [
"text_plain_output_1.png"
] | print('Shape of y_test', y_test.shape) | code |
122261632/cell_50 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000)
train_label_df = pd.read_csv('../input/amex-default-prediction/train_labels.csv')
test_df = pd.read_csv('../input/amex-default-prediction/test_data.csv', nrows=100000, index_col='customer_ID')
train_df... | code |
122261632/cell_64 | [
"text_plain_output_1.png"
] | print('Shape of y_train', y_train.shape) | code |
122261632/cell_45 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000)
train_label_df = pd.read_csv('../input/amex-default-prediction/train_labels.csv')
test_df = pd.read_csv('../input/amex-default-prediction/test_data.csv', nrows=100000, index_col='customer_ID')
train_df... | code |
122261632/cell_51 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000)
train_label_df = pd.read_csv('../input/amex-default-prediction/train_labels.csv')
test_df = pd.read_csv('../input/amex-default-prediction/test_data.csv', nrows=100000, index_col='customer_ID')
train_df... | code |
122261632/cell_62 | [
"text_plain_output_1.png"
] | print('Shape of X_train', X_train.shape) | code |
122261632/cell_59 | [
"text_plain_output_1.png"
] | from imblearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
import numpy as np
import pandas as pd
train_df_sample = pd.read_csv('../input/amex-defaul... | code |
122261632/cell_28 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000)
train_label_df = pd.read_csv('../input/amex-default-prediction/train_labels.csv')
test_df = pd.read_csv('../input/amex-default-prediction/test_data.csv', nrows=100000, index_col='customer_ID')
train_df... | code |
122261632/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000)
print('Shape of dataset is:', train_df_sample.shape)
train_df_sample.info() | code |
122261632/cell_75 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000)
train_label_df = pd.read_csv('../input/amex-default-prediction/train_labels.csv')
test_df = pd.read_csv('../input/amex-default-prediction/test_data.csv', nrows=100000, index_col='customer_ID')
train_df... | code |
122261632/cell_35 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000)
train_label_df = pd.read_csv('../input/amex-default-prediction/train_labels.csv')
test_df = pd.read_csv('../input/amex-default-prediction/test_data.csv', nrows=100000, index_col='customer_ID')
test_df.... | code |
122261632/cell_43 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000)
train_label_df = pd.read_csv('../input/amex-default-prediction/train_labels.csv')
test_df = pd.read_csv('../input/amex-default-prediction/test_data.csv', nrows=100000, index_col='customer_ID')
train_df... | code |
122261632/cell_24 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000)
train_label_df = pd.read_csv('../input/amex-default-prediction/train_labels.csv')
test_df = pd.read_csv('../input/amex-default-prediction/test_data.csv', nrows=100000, index_col='customer_ID')
print('S... | code |
122261632/cell_53 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000)
train_label_df = pd.read_csv('../input/amex-default-prediction/train_labels.csv')
test_df = pd.read_csv('../input/amex-default-prediction/test_data.csv', nrows=100000, index_col='customer_ID')
train_df... | code |
122261632/cell_37 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df_sample = pd.read_csv('../input/amex-default-prediction/train_data.csv', nrows=100000)
train_label_df = pd.read_csv('../input/amex-default-prediction/train_labels.csv')
test_df = pd.read_csv('../input/amex-default-prediction/test_data.csv', nrows=100000, index_col='customer_ID')
train_df... | code |
88095734/cell_25 | [
"text_html_output_1.png"
] | from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import StandardScaler
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv... | code |
88095734/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train.info() | code |
88095734/cell_20 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train.describe().columns
df_num = train[['Age', 'SibSp', 'Parch', 'Fare']]
df_cat = train[['Survived', 'Pclass'... | code |
88095734/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train.describe().columns | code |
88095734/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train.describe().columns
df_num = train[['Age', 'SibSp', 'Parch', 'Fare']]
df_cat = train[['Survived', 'Pclass', 'Sex', 'Ticket', 'Cabin', 'Embarked... | code |
88095734/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
88095734/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train.describe().columns
df_num = train[['Age', 'SibSp', 'Parch', 'Fare']]
df_cat = train[['Survived', 'Pclass', 'Sex', 'Ticket', 'Cabin', 'Embarked... | code |
88095734/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train.describe().columns
df_num = train[['Age', 'SibSp', 'Parch', 'Fare']]
df_cat = train[['Survived', 'Pclass', 'Sex', 'Ticket', 'Cabin', 'Embarked... | code |
88095734/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train.describe().columns
df_num = train[['Age', 'SibSp', 'Parch', 'Fare']]
df_cat = train[['Survived', 'Pclass', 'Sex', 'Ticket', 'Cabin', 'Embarked... | code |
88095734/cell_3 | [
"text_plain_output_1.png"
] | train['train_test'] = 1
test['train_test'] = 0
test['Survived'] = np.NaN
data = pd.concat([train, test])
data.columns | code |
88095734/cell_17 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train.describe().columns
df_num = train[['Age', 'SibSp', 'Parch', 'Fare']]
df_cat = train[['Survived', 'Pclass', 'Sex', 'Ticket', 'Cabin', 'Embarked... | code |
88095734/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train.describe().columns
df_num = train[['Age', 'SibSp', 'Parch', 'Fare']]
df_cat = train[['Survived', 'Pclass', 'Sex', 'Ticket', 'Cabin', 'Embarked... | code |
88095734/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train.describe().columns
df_num = train[['Age', 'SibSp', 'Parch', 'Fare']]
df_cat = train[['Survived', 'Pclass', 'Sex', 'Ticket', 'Cabin', 'Embarked... | code |
88095734/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/titanic/train.csv')
test = pd.read_csv('../input/titanic/test.csv')
train.describe() | code |
128029410/cell_4 | [
"text_plain_output_1.png"
] | !pip --version | code |
128029410/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from monai.config import print_config
import os
import json
import shutil
import tempfile
import time
import matplotlib.pyplot as plt
import numpy as np
import nibabel as nib
from monai.losses import DiceLoss
from monai.inferers import sliding_window_inference
from monai import transforms
from monai.transforms import ... | code |
128029410/cell_2 | [
"text_plain_output_1.png"
] | !nvidia-smi | code |
128029410/cell_18 | [
"text_plain_output_1.png"
] | from monai import data
from monai import transforms
import json
import matplotlib.pyplot as plt
import nibabel as nib
import numpy as np
import os
import tempfile
import torch
directory = os.environ.get('MONAI_DATA_DIRECTORY')
root_dir = tempfile.mkdtemp() if directory is None else directory
class AverageMete... | code |
128029410/cell_8 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | import os
import tempfile
directory = os.environ.get('MONAI_DATA_DIRECTORY')
root_dir = tempfile.mkdtemp() if directory is None else directory
print(root_dir) | code |
128029410/cell_15 | [
"text_plain_output_1.png"
] | from monai import data
from monai import transforms
import json
import numpy as np
import os
import tempfile
import torch
directory = os.environ.get('MONAI_DATA_DIRECTORY')
root_dir = tempfile.mkdtemp() if directory is None else directory
class AverageMeter(object):
def __init__(self):
self.reset()
... | code |
128029410/cell_16 | [
"text_plain_output_1.png"
] | from monai import data
from monai import transforms
import json
import numpy as np
import os
import tempfile
import torch
directory = os.environ.get('MONAI_DATA_DIRECTORY')
root_dir = tempfile.mkdtemp() if directory is None else directory
class AverageMeter(object):
def __init__(self):
self.reset()
... | code |
128029410/cell_3 | [
"text_plain_output_1.png"
] | !pip install "monai[einops]" | code |
128029410/cell_14 | [
"text_plain_output_1.png"
] | from monai import data
from monai import transforms
import json
import numpy as np
import os
import tempfile
import torch
directory = os.environ.get('MONAI_DATA_DIRECTORY')
root_dir = tempfile.mkdtemp() if directory is None else directory
class AverageMeter(object):
def __init__(self):
self.reset()
... | code |
18149087/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np, pandas as pd, os
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import VarianceThreshold
from tqdm import tqdm
from sklearn.covariance import EmpiricalCovariance
from sklearn.covariance import... | code |
18149087/cell_6 | [
"text_plain_output_1.png"
] | from sklearn.covariance import GraphicalLasso
from sklearn.feature_selection import VarianceThreshold
from sklearn.metrics import roc_auc_score
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import StratifiedKFold
from tqdm import tqdm
import numpy as np, pandas as pd, os
from sklearn.di... | code |
18149087/cell_8 | [
"text_plain_output_1.png"
] | from sklearn.covariance import GraphicalLasso
from sklearn.feature_selection import VarianceThreshold
from sklearn.metrics import roc_auc_score
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import StratifiedKFold
from tqdm import tqdm
import numpy as np, pandas as pd, os
from sklearn.di... | code |
18149087/cell_10 | [
"text_html_output_1.png"
] | from sklearn import svm, neighbors, linear_model, neural_network
from sklearn.covariance import GraphicalLasso
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import Vari... | code |
2003574/cell_4 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.fit(train_X, train_y)
from sklearn.ensemble import GradientBoostingClassifier
clf = GradientBoostingClassifier()
clf.fit(t... | code |
2003574/cell_6 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.fit(train_X, train_y)
from sklearn.ensem... | code |
2003574/cell_1 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
2003574/cell_3 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.fit(train_X, train_y)
print('The training score is: {}\n'.format(clf.score(train_X, train_y)))
print('The test score is: {}\n'.format(clf.score(test_X, test_y))) | code |
2003574/cell_5 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.fit(train_X, train_y)
from sklearn.ensemble import GradientBoostingClassifier
clf = GradientB... | code |
128024415/cell_21 | [
"text_html_output_1.png"
] | from json import loads , dumps
file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r')
data = file.read()
file.close()
js = loads(data)
price = []
for i in js['Abohar']['restaurants'].keys():
if 'North Indian' in js['Abohar']['restaurants'][i]['cuisine']:
price.append(int(js['Abohar']['res... | code |
128024415/cell_25 | [
"text_plain_output_1.png"
] | from json import loads , dumps
import pandas as pd
file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r')
data = file.read()
file.close()
js = loads(data)
cuisines = []
for i in js['Abohar']['restaurants'].keys():
cuisines += js['Abohar']['restaurants'][i]['cuisine'].split(',')
cuisines = list(s... | code |
128024415/cell_23 | [
"text_plain_output_1.png"
] | from json import loads , dumps
import pandas as pd
file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r')
data = file.read()
file.close()
js = loads(data)
cuisines = []
for i in js['Abohar']['restaurants'].keys():
cuisines += js['Abohar']['restaurants'][i]['cuisine'].split(',')
cuisines = list(s... | code |
128024415/cell_33 | [
"text_html_output_1.png"
] | from json import loads , dumps
import numpy as np
import pandas as pd
file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r')
data = file.read()
file.close()
js = loads(data)
cuisines = []
for i in js['Abohar']['restaurants'].keys():
cuisines += js['Abohar']['restaurants'][i]['cuisine'].split(',... | code |
128024415/cell_6 | [
"text_html_output_1.png"
] | from json import loads , dumps
file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r')
data = file.read()
file.close()
js = loads(data)
print(len(js.keys())) | code |
128024415/cell_19 | [
"text_html_output_1.png"
] | from json import loads , dumps
file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r')
data = file.read()
file.close()
js = loads(data)
cost = []
for i in js['Abohar']['restaurants'].keys():
cost.append(int(js['Abohar']['restaurants'][i]['cost'].split(' ')[-1]))
avg_cost = round(sum(cost) / len(co... | code |
128024415/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
from json import loads, dumps
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
128024415/cell_8 | [
"text_html_output_1.png"
] | from json import loads , dumps
file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r')
data = file.read()
file.close()
js = loads(data)
print(len(js['Abohar']['restaurants'].keys())) | code |
128024415/cell_16 | [
"text_plain_output_1.png"
] | from json import loads , dumps
import pandas as pd
file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r')
data = file.read()
file.close()
js = loads(data)
cuisines = []
for i in js['Abohar']['restaurants'].keys():
cuisines += js['Abohar']['restaurants'][i]['cuisine'].split(',')
cuisines = list(s... | code |
128024415/cell_17 | [
"text_plain_output_1.png"
] | from json import loads , dumps
import pandas as pd
file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r')
data = file.read()
file.close()
js = loads(data)
cuisines = []
for i in js['Abohar']['restaurants'].keys():
cuisines += js['Abohar']['restaurants'][i]['cuisine'].split(',')
cuisines = list(s... | code |
128024415/cell_35 | [
"text_html_output_1.png"
] | from json import loads , dumps
import numpy as np
import pandas as pd
file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r')
data = file.read()
file.close()
js = loads(data)
cuisines = []
for i in js['Abohar']['restaurants'].keys():
cuisines += js['Abohar']['restaurants'][i]['cuisine'].split(',... | code |
128024415/cell_31 | [
"text_html_output_1.png"
] | from json import loads , dumps
import pandas as pd
file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r')
data = file.read()
file.close()
js = loads(data)
cuisines = []
for i in js['Abohar']['restaurants'].keys():
cuisines += js['Abohar']['restaurants'][i]['cuisine'].split(',')
cuisines = list(s... | code |
128024415/cell_14 | [
"text_plain_output_1.png"
] | from json import loads , dumps
file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r')
data = file.read()
file.close()
js = loads(data)
cuisines = []
for i in js['Abohar']['restaurants'].keys():
cuisines += js['Abohar']['restaurants'][i]['cuisine'].split(',')
cuisines = list(set(cuisines))
print(l... | code |
128024415/cell_10 | [
"text_plain_output_1.png"
] | from json import loads , dumps
file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r')
data = file.read()
file.close()
js = loads(data)
for i in js['Abohar']['restaurants'].keys():
print(js['Abohar']['restaurants'][i]['name'], '|', len(js['Abohar']['restaurants'][i]['menu'].keys())) | code |
128024415/cell_27 | [
"text_html_output_1.png"
] | from json import loads , dumps
import pandas as pd
file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r')
data = file.read()
file.close()
js = loads(data)
cuisines = []
for i in js['Abohar']['restaurants'].keys():
cuisines += js['Abohar']['restaurants'][i]['cuisine'].split(',')
cuisines = list(s... | code |
128024415/cell_12 | [
"text_plain_output_1.png"
] | from json import loads , dumps
file = open('/kaggle/input/swiggy-restaurants-dataset/data.json', 'r')
data = file.read()
file.close()
js = loads(data)
for i in js['Abohar']['restaurants'].keys():
if len(js['Abohar']['restaurants'][i]['menu']) == 0:
print(js['Abohar']['restaurants'][i]['name'], '|', i) | code |
105190732/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
PATH = '../input/titanic/'
df_train = pd.read_csv(f'{PATH}/train.csv', low_memory=False)
df_test = pd.read_csv(f'{PATH}/test.csv', low_memory=False)
df_train.dtypes
label = df_train['Survived']
label.unique()
if label.isnull().sum() == 0:
... | code |
105190732/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
PATH = '../input/titanic/'
df_train = pd.read_csv(f'{PATH}/train.csv', low_memory=False)
df_test = pd.read_csv(f'{PATH}/test.csv', low_memory=False)
df_train.dtypes | code |
105190732/cell_30 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
PATH = '../input/titanic/'
df_train = pd.read_csv(f'{PATH}/train.csv', low_memory=False)
df_test = pd.read_csv(f'{PATH}/test.csv', low_memory=False)
df_train.dtypes
df_train['Age_NA'] =... | code |
105190732/cell_26 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
PATH = '../input/titanic/'
df_train = pd.read_csv(f'{PATH}/train.csv', low_memory=False)
df_test = pd.read_csv(f'{PATH}/test.csv', low_memory=False)
df_train.dtypes
df_train.Age.describe() | code |
105190732/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
PATH = '../input/titanic/'
df_train = pd.read_csv(f'{PATH}/train.csv', low_memory=False)
df_test = pd.read_csv(f'{PATH}/test.csv', low_memory=False)
df_train.dtypes
for column in df_train.columns:
print(column, len(df_train[column].unique())... | code |
105190732/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
from scipy import stats
from scipy.cluster import hierarchy as hc
import sklearn
import IPython
import matplotlib.pyplot as plt
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_sco... | code |
105190732/cell_7 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
PATH = '../input/titanic/'
df_train = pd.read_csv(f'{PATH}/train.csv', low_memory=False)
df_test = pd.read_csv(f'{PATH}/test.csv', low_memory=False)
print('Train Shape', df_train.shape)
print('Test Shape', df_test.shape) | code |
105190732/cell_32 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
PATH = '../input/titanic/'
df_train = pd.read_csv(f'{PATH}/train.csv', low_memory=False)
df_test = pd.read_csv(f'{PATH}/test.csv', low_memory=False)
df_train.dtypes
df_train['Age_NA'] =... | code |
105190732/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
PATH = '../input/titanic/'
df_train = pd.read_csv(f'{PATH}/train.csv', low_memory=False)
df_test = pd.read_csv(f'{PATH}/test.csv', low_memory=False)
df_train.head().transpose() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.