path
stringlengths
13
17
screenshot_names
listlengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
16124829/cell_21
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd import seaborn as sns import pandas as pd base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv') base = base.iloc[:, :32] base.columns len(base.columns) a = list(base.columns) sns.barplot(y=base['radius_mean'], x=base['diagno...
code
16124829/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import pandas as pd base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv') base = base.iloc[:, :32] base.columns len(base.columns) a = list(base.columns) base['radius_mean'].mean()
code
16124829/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv') base = base.iloc[:, :32] base.columns len(base.columns)
code
16124829/cell_25
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd import seaborn as sns import pandas as pd base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv') base = base.iloc[:, :32] base.columns len(base.columns) a = list(base.columns) cor_base = base[['diagnosis', 'radius_mean', 'tex...
code
16124829/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd import seaborn as sns import pandas as pd base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv') base = base.iloc[:, :32] base.columns len(base.columns) a = list(base.columns) cor_base = base[['diagnosis', 'radius_mean', 'tex...
code
16124829/cell_30
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt import pandas as pd import pandas as pd import seaborn as sns import pandas as pd base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv') base = base.iloc[:, :32] base.columns len(base.columns) a = list(base.columns) b...
code
16124829/cell_33
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd import seaborn as sns import pandas as pd base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv') base = base.iloc[:, :32] base.columns len(base.columns) a = list(base.columns) base.isnull().sum() base.diagnosis.std() base.d...
code
16124829/cell_20
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd import seaborn as sns import pandas as pd base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv') base = base.iloc[:, :32] base.columns len(base.columns) a = list(base.columns) for i in list(base.columns): if i != 'diagnos...
code
16124829/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv') base.head()
code
16124829/cell_29
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd import seaborn as sns import pandas as pd base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv') base = base.iloc[:, :32] base.columns len(base.columns) a = list(base.columns) base.isnull().sum() base['diagnosis'].value_coun...
code
16124829/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import pandas as pd base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv') base = base.iloc[:, :32] base.columns len(base.columns) a = list(base.columns) for i in a: print('-', i)
code
16124829/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import seaborn as sns import pandas as pd base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv') base = base.iloc[:, :32] base.columns len(base.columns) a = list(base.columns) sns.boxplot(x='radius_mean', y='diagnosis', data=base)
code
16124829/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import seaborn as sns import pandas as pd base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv') base = base.iloc[:, :32] base.columns len(base.columns) a = list(base.columns) sns.distplot(base['texture_mean'])
code
16124829/cell_32
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd import seaborn as sns import pandas as pd base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv') base = base.iloc[:, :32] base.columns len(base.columns) a = list(base.columns) base.isnull().sum() base.diagnosis.std()
code
16124829/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv') base = base.iloc[:, :32] base.columns
code
16124829/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import pandas as pd base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv') base = base.iloc[:, :32] base.columns len(base.columns) a = list(base.columns) base.describe()
code
16124829/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import pandas as pd base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv') base = base.iloc[:, :32] base.columns len(base.columns) a = list(base.columns) base.describe(include=['O'])
code
16124829/cell_3
[ "text_html_output_1.png" ]
import os import os print(os.listdir('../input'))
code
16124829/cell_31
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd import seaborn as sns import pandas as pd base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv') base = base.iloc[:, :32] base.columns len(base.columns) a = list(base.columns) base.isnull().sum() base['diagnosis'].value_coun...
code
16124829/cell_24
[ "image_output_11.png", "image_output_24.png", "image_output_25.png", "image_output_17.png", "image_output_30.png", "image_output_14.png", "image_output_28.png", "image_output_23.png", "image_output_13.png", "image_output_5.png", "image_output_18.png", "image_output_21.png", "image_output_7.p...
import matplotlib.pyplot as plt import pandas as pd import pandas as pd import seaborn as sns import pandas as pd base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv') base = base.iloc[:, :32] base.columns len(base.columns) a = list(base.columns) cor_base = base[['diagnosis', 'radius_mean', 'tex...
code
16124829/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd import seaborn as sns import pandas as pd base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv') base = base.iloc[:, :32] base.columns len(base.columns) a = list(base.columns) sns.scatterplot(x=base['area_mean'], y=base['peri...
code
16124829/cell_27
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd import seaborn as sns import pandas as pd base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv') base = base.iloc[:, :32] base.columns len(base.columns) a = list(base.columns) base.isnull().sum()
code
16124829/cell_12
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd import pandas as pd base = pd.read_csv('../input/breast-cancer-wisconsin-data/data.csv') base = base.iloc[:, :32] base.columns len(base.columns) a = list(base.columns) base['radius_mean']
code
88100273/cell_2
[ "text_plain_output_1.png" ]
!pip install scanpy
code
88100273/cell_5
[ "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
from sklearn.decomposition import PCA import matplotlib.pyplot as plt import numpy as np import scanpy as sc import scipy import seaborn as sns import time import time dict_datasets_info = {'krumsiek11': 'Simulated myeloid progenitors [Krumsiek11].', 'moignard15': 'Hematopoiesis in early mouse embryos [Moignard...
code
32069383/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly_express as px df = pd.read_csv('../input/2019-world-happiness-report-csv-file/2019.csv') df.shape top_10 = df.iloc[0:10, 0:3] top_10 fig = px.pie(top_10, values='Score', names='Country or region', color_discrete_sequence=px.colors...
code
1007542/cell_6
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/fivethirtyeight_ncaa_forecasts (2).csv') matchups = [[str(x + 1), str(16 - x)] for x in range(8)] df = df[df.gender == 'mens'] pre = df[df.playin_flag == 1] data = [] for region in pre.team_region.unique(): for seed in range(2, 17): res...
code
1007542/cell_7
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/fivethirtyeight_ncaa_forecasts (2).csv') matchups = [[str(x + 1), str(16 - x)] for x in range(8)] df = df[df.gender == 'mens'] pre = df[df.playin_flag == 1] data = [] for region in pre.team_region.unique(): for seed in range...
code
1007542/cell_3
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/fivethirtyeight_ncaa_forecasts (2).csv') df.head()
code
88101963/cell_23
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/scrabble-point-value/turns_train.csv') tests = pd.read_csv('../input/scrabble-point-value/turns_test.csv') games =...
code
88101963/cell_6
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/scrabble-point-value/turns_train.csv') tests = pd.read_csv('../input/scrabble-point-value/turns_test.csv') games = pd.read_csv('../input/scrabble-point-value/games.csv') sample_submission = pd.read_csv('../input/scrabb...
code
88101963/cell_19
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/scrabble-point-value/turns_train.csv') tests = pd.read_csv('../input/scrabble-point-value/turns_test.csv') games = pd.read_csv('../input/scrabble-point-value/games.csv') sample_submission = pd.read_csv('../input/scrabb...
code
88101963/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from keras.models import Sequential from keras.layers import Dense, Dropout, BatchNormalization from keras.layers import Dense from keras.wrappers.scikit_learn import KerasRegressor from sklearn.metrics import mean_s...
code
88101963/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/scrabble-point-value/turns_train.csv') tests = pd.read_csv('../input/scrabble-point-value/turns_test.csv') games = pd.read_csv('../input/scrabble-point-value/games.csv') sample_submission = pd.read_csv('../input/scrabb...
code
88101963/cell_18
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/scrabble-point-value/turns_train.csv') tests = pd.read_csv('../input/scrabble-point-value/turns_test.csv') games = pd.read_csv('../input/scrabble-point-value/games.csv') sample_submission = pd.read_csv('../input/scrabb...
code
88101963/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/scrabble-point-value/turns_train.csv') tests = pd.read_csv('../input/scrabble-point-value/turns_test.csv') games = pd.read_csv('../input/scrabble-point-value/games.csv') sample_submission = pd.read_csv('../input/scrabb...
code
88101963/cell_22
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/scrabble-point-value/turns_train.csv') tests = pd.read_csv('../input/scrabble-point-value/turns_test.csv') games =...
code
88101963/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/scrabble-point-value/turns_train.csv') tests = pd.read_csv('../input/scrabble-point-value/turns_test.csv') games = pd.read_csv('../input/scrabble-point-value/games.csv') sample_submission = pd.read_csv('../input/scrabb...
code
88101963/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/scrabble-point-value/turns_train.csv') tests = pd.read_csv('../input/scrabble-point-value/turns_test.csv') games = pd.read_csv('../input/scrabble-point-value/games.csv') sample_submission = pd.read_csv('../input/scrabb...
code
330925/cell_13
[ "text_html_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/NationalNames.csv') df['Decade'] = df['Year'].apply(lambda x: x - x % 10) df_pivot = df.pivot_table(values='Count', index=['Decade', 'Name', 'Gender'], aggfunc='sum') new_df = pd.DataFrame(...
code
330925/cell_9
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/NationalNames.csv') df['Decade'] = df['Year'].apply(lambda x: x - x % 10) df_pivot = df.pivot_table(values='Count', index=['Decade', 'Name', 'Gender'], aggfunc='sum') new_df = pd.DataFrame() new_df['Decade'] = df_pivot.index.get_level_values('Decade') new_df['Name'] = d...
code
330925/cell_4
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/NationalNames.csv') df.head()
code
330925/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import pandas as pd df = pd.read_csv('../input/NationalNames.csv') df['Decade'] = df['Year'].apply(lambda x: x - x % 10) df_pivot = df.pivot_table(values='Count', index=['Decade', 'Name', 'Gender'], aggfunc='sum') new_df = pd.DataFrame() new_df['Decade'] = df_pivot.ind...
code
330925/cell_19
[ "text_html_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/NationalNames.csv') df['Decade'] = df['Year'].apply(lambda x: x - x % 10) df_pivot = df.pivot_table(values='Count', index=['Decade', 'Name', 'Gender'], aggfunc='sum')...
code
330925/cell_7
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/NationalNames.csv') df['Decade'] = df['Year'].apply(lambda x: x - x % 10) df.tail()
code
330925/cell_15
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import pandas as pd df = pd.read_csv('../input/NationalNames.csv') df['Decade'] = df['Year'].apply(lambda x: x - x % 10) df_pivot = df.pivot_table(values='Count', index=['Decade', 'Name', 'Gender'], aggfunc='sum') new_df = pd.DataFrame() new_df['Decade'] = df_pivot.ind...
code
330925/cell_17
[ "text_html_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import pandas as pd df = pd.read_csv('../input/NationalNames.csv') df['Decade'] = df['Year'].apply(lambda x: x - x % 10) df_pivot = df.pivot_table(values='Count', index=['Decade', 'Name', 'Gender'], aggfunc='sum') new_df = pd.DataFrame() new_df['Decade'] = df_pivot.ind...
code
330925/cell_5
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/NationalNames.csv') print('Data year ranges from {} to {}'.format(min(df['Year']), max(df['Year'])))
code
130004949/cell_13
[ "text_plain_output_1.png" ]
from collections import ChainMap from esm.model.esm2 import ESM2 from multiprocess import Pool from tqdm import tqdm from tqdm import tqdm from tqdm import tqdm import esm import gc import gc import numpy as np import numpy as np import os import os import os import pandas as pd import pandas as pd impo...
code
130004949/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
from multiprocess import Pool import numpy as np import numpy as np import os import os import pandas as pd import pandas as pd import warnings import pandas as pd import numpy as np import plotly.express as px from plotly.subplots import make_subplots import gc from tqdm import tqdm import pickle import os imp...
code
130004949/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import warnings import pandas as pd import numpy as np import plotly.express as px from plotly.subplots import make_subplots import gc from tqdm import tqdm import pickle import os import warnings warnings.filterwarnings('ignore') train_clinical_data = pd.read_csv('../input/amp-parkinsons-disease-...
code
130004949/cell_11
[ "text_html_output_1.png" ]
from multiprocess import Pool import numpy as np import numpy as np import os import os import pandas as pd import pandas as pd import warnings import pandas as pd import numpy as np import plotly.express as px from plotly.subplots import make_subplots import gc from tqdm import tqdm import pickle import os imp...
code
130004949/cell_7
[ "text_html_output_1.png" ]
import pandas as pd import warnings import pandas as pd import numpy as np import plotly.express as px from plotly.subplots import make_subplots import gc from tqdm import tqdm import pickle import os import warnings warnings.filterwarnings('ignore') train_clinical_data = pd.read_csv('../input/amp-parkinsons-disease-...
code
130004949/cell_8
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import pandas as pd import warnings import pandas as pd import numpy as np import plotly.express as px from plotly.subplots import make_subplots import gc from tqdm import tqdm import pickle import os import warnings warnings.filterwarnings('ignore') train_clinical_data = pd.read_csv('../input/amp-parkinsons-disease-...
code
130004949/cell_16
[ "text_plain_output_1.png" ]
from esm.model.esm2 import ESM2 from multiprocess import Pool from tqdm import tqdm from tqdm import tqdm from tqdm import tqdm import esm import gc import gc import numpy as np import numpy as np import os import os import os import pandas as pd import pandas as pd import re import tensorflow as tf im...
code
130004949/cell_3
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import missingno as msno import pandas as pd import warnings import pandas as pd import numpy as np import plotly.express as px from plotly.subplots import make_subplots import gc from tqdm import tqdm import pickle import os import warnings warnings.filterwarnings('ignore') train_clinical_data = pd.read_csv('../inp...
code
130004949/cell_17
[ "text_plain_output_1.png" ]
from collections import ChainMap from esm.model.esm2 import ESM2 from multiprocess import Pool from sklearn.preprocessing import MinMaxScaler from tqdm import tqdm from tqdm import tqdm from tqdm import tqdm import esm import gc import gc import networkx as nx import numpy as np import numpy as np import o...
code
130004949/cell_14
[ "text_plain_output_1.png" ]
from esm.model.esm2 import ESM2 from multiprocess import Pool from tqdm import tqdm from tqdm import tqdm from tqdm import tqdm import esm import gc import gc import numpy as np import numpy as np import os import os import os import pandas as pd import pandas as pd import re import tensorflow as tf im...
code
130004949/cell_10
[ "image_output_4.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
!pip install --no-index --no-deps /kaggle/input/fair-esm/fair_esm-2.0.0-py3-none-any.whl
code
130004949/cell_12
[ "text_plain_output_1.png" ]
from esm.model.esm2 import ESM2 from multiprocess import Pool from tqdm import tqdm from tqdm import tqdm from tqdm import tqdm import esm import gc import gc import numpy as np import numpy as np import os import os import os import pandas as pd import pandas as pd import re import tensorflow as tf im...
code
33112981/cell_6
[ "text_plain_output_1.png" ]
import json import re # Regular expressions testDirectory = '/kaggle/input/abstraction-and-reasoning-challenge/test/' def readTaskFile(filename): f = open(filename, 'r') data = json.loads(f.read()) data['id'] = re.sub('(.*/)|(\\.json)', '', filename) f.close() return data filename = testDirector...
code
33112981/cell_11
[ "text_plain_output_1.png" ]
f2 = open('submission.csv', 'r') print(f2.read()) f2.close()
code
33112981/cell_8
[ "text_plain_output_1.png" ]
import json import re # Regular expressions def flattener(pred): str_pred = str([row for row in pred]) str_pred = str_pred.replace(', ', '') str_pred = str_pred.replace('[[', '|') str_pred = str_pred.replace('][', '|') str_pred = str_pred.replace(']]', '|') return str_pred testDirectory = '/k...
code
32070001/cell_2
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
16168087/cell_4
[ "image_output_1.png" ]
base_dir = '../input/cell_images/cell_images/' base_path = Path(base_dir) base_path
code
16168087/cell_20
[ "image_output_1.png" ]
base_dir = '../input/cell_images/cell_images/' base_path = Path(base_dir) base_path data = ImageDataBunch.from_folder(base_path, valid_pct=0.1, train='.', ds_tfms=get_transforms(max_warp=0, flip_vert=True), size=128, bs=32, num_workers=0).normalize(imagenet_stats) learner = create_cnn(data, models.resnet50, metrics=a...
code
16168087/cell_6
[ "image_output_1.png" ]
base_dir = '../input/cell_images/cell_images/' base_path = Path(base_dir) base_path data = ImageDataBunch.from_folder(base_path, valid_pct=0.1, train='.', ds_tfms=get_transforms(max_warp=0, flip_vert=True), size=128, bs=32, num_workers=0).normalize(imagenet_stats) print(f'Classes to classify: \n {data.classes}') data....
code
16168087/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
base_dir = '../input/cell_images/cell_images/' base_path = Path(base_dir) base_path data = ImageDataBunch.from_folder(base_path, valid_pct=0.1, train='.', ds_tfms=get_transforms(max_warp=0, flip_vert=True), size=128, bs=32, num_workers=0).normalize(imagenet_stats) learner = create_cnn(data, models.resnet50, metrics=a...
code
16168087/cell_18
[ "image_output_1.png" ]
base_dir = '../input/cell_images/cell_images/' base_path = Path(base_dir) base_path data = ImageDataBunch.from_folder(base_path, valid_pct=0.1, train='.', ds_tfms=get_transforms(max_warp=0, flip_vert=True), size=128, bs=32, num_workers=0).normalize(imagenet_stats) learner = create_cnn(data, models.resnet50, metrics=a...
code
16168087/cell_28
[ "text_html_output_1.png" ]
base_dir = '../input/cell_images/cell_images/' base_path = Path(base_dir) base_path data = ImageDataBunch.from_folder(base_path, valid_pct=0.1, train='.', ds_tfms=get_transforms(max_warp=0, flip_vert=True), size=128, bs=32, num_workers=0).normalize(imagenet_stats) learner = create_cnn(data, models.resnet50, metrics=a...
code
16168087/cell_8
[ "image_output_1.png" ]
base_dir = '../input/cell_images/cell_images/' base_path = Path(base_dir) base_path data = ImageDataBunch.from_folder(base_path, valid_pct=0.1, train='.', ds_tfms=get_transforms(max_warp=0, flip_vert=True), size=128, bs=32, num_workers=0).normalize(imagenet_stats) learner = create_cnn(data, models.resnet50, metrics=a...
code
16168087/cell_16
[ "text_html_output_1.png" ]
base_dir = '../input/cell_images/cell_images/' base_path = Path(base_dir) base_path data = ImageDataBunch.from_folder(base_path, valid_pct=0.1, train='.', ds_tfms=get_transforms(max_warp=0, flip_vert=True), size=128, bs=32, num_workers=0).normalize(imagenet_stats) learner = create_cnn(data, models.resnet50, metrics=a...
code
16168087/cell_24
[ "text_html_output_1.png" ]
base_dir = '../input/cell_images/cell_images/' base_path = Path(base_dir) base_path data = ImageDataBunch.from_folder(base_path, valid_pct=0.1, train='.', ds_tfms=get_transforms(max_warp=0, flip_vert=True), size=128, bs=32, num_workers=0).normalize(imagenet_stats) learner = create_cnn(data, models.resnet50, metrics=a...
code
16168087/cell_14
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
base_dir = '../input/cell_images/cell_images/' base_path = Path(base_dir) base_path data = ImageDataBunch.from_folder(base_path, valid_pct=0.1, train='.', ds_tfms=get_transforms(max_warp=0, flip_vert=True), size=128, bs=32, num_workers=0).normalize(imagenet_stats) learner = create_cnn(data, models.resnet50, metrics=a...
code
16168087/cell_22
[ "image_output_1.png" ]
base_dir = '../input/cell_images/cell_images/' base_path = Path(base_dir) base_path data = ImageDataBunch.from_folder(base_path, valid_pct=0.1, train='.', ds_tfms=get_transforms(max_warp=0, flip_vert=True), size=128, bs=32, num_workers=0).normalize(imagenet_stats) learner = create_cnn(data, models.resnet50, metrics=a...
code
16168087/cell_10
[ "text_plain_output_1.png" ]
base_dir = '../input/cell_images/cell_images/' base_path = Path(base_dir) base_path data = ImageDataBunch.from_folder(base_path, valid_pct=0.1, train='.', ds_tfms=get_transforms(max_warp=0, flip_vert=True), size=128, bs=32, num_workers=0).normalize(imagenet_stats) learner = create_cnn(data, models.resnet50, metrics=a...
code
16168087/cell_12
[ "text_plain_output_1.png", "image_output_1.png" ]
base_dir = '../input/cell_images/cell_images/' base_path = Path(base_dir) base_path data = ImageDataBunch.from_folder(base_path, valid_pct=0.1, train='.', ds_tfms=get_transforms(max_warp=0, flip_vert=True), size=128, bs=32, num_workers=0).normalize(imagenet_stats) learner = create_cnn(data, models.resnet50, metrics=a...
code
88105225/cell_9
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) tweets = pd.read_csv('/kaggle/input/twitter-sentiment-analysis-10/Sentiment140.tenPercent.sample.tweets.tsv', delimiter='\t') tweets.isnull().values.any().sum() print('Shape before removing duplicate rows:', tweets.shape) twe...
code
88105225/cell_33
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report import emoji import re # regular expression operations import wordninja s...
code
88105225/cell_20
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from wordcloud import WordCloud import emoji import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re # regular expression operations import wordninja tweets = p...
code
88105225/cell_6
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
!pip install wordninja !pip install emoji !pip install catboost import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from wordcloud import WordCloud import nltk # used commonly for NLP tasks nltk.download('stopwords') from nltk.corpus import stopwords nltk.download('wordnet') fro...
code
88105225/cell_29
[ "text_plain_output_1.png", "image_output_1.png" ]
from nltk.corpus import stopwords from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score import emoji import re # regular expression operations import wordninja stop_words = stopwords.words('english') abbreviation...
code
88105225/cell_39
[ "text_plain_output_1.png", "image_output_1.png" ]
# The gloVe is a pretrained word embedding model !wget http://nlp.stanford.edu/data/glove.6B.zip !unzip glove.6B.zip
code
88105225/cell_48
[ "text_plain_output_1.png" ]
from keras.layers import LSTM from keras.layers.core import Activation, Dropout, Dense from keras.layers.embeddings import Embedding from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from nltk.corpus import stopwords from ten...
code
88105225/cell_41
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords import emoji import numpy as np import numpy as np # linear algebra import re # regular expression operations import wordninja stop_words = stopwords.words('english') abbreviations = {'a.m.': 'before midday', 'acct': 'account', 'afaik': 'as far as i know', 'afk': 'away from keybo...
code
88105225/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) tweets = pd.read_csv('/kaggle/input/twitter-sentiment-analysis-10/Sentiment140.tenPercent.sample.tweets.tsv', delimiter='\t') tweets.isnull().values.any().sum() tweets = tweets.drop_duplicates() tweets['tweet_text'][0]
code
88105225/cell_19
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from wordcloud import WordCloud import emoji import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re # regular expression operations import wordninja tweets = p...
code
88105225/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
88105225/cell_7
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) tweets = pd.read_csv('/kaggle/input/twitter-sentiment-analysis-10/Sentiment140.tenPercent.sample.tweets.tsv', delimiter='\t') tweets.head()
code
88105225/cell_45
[ "text_plain_output_1.png" ]
from keras.layers import LSTM from keras.layers.core import Activation, Dropout, Dense from keras.layers.embeddings import Embedding from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from nltk.corpus import stopwords import e...
code
88105225/cell_49
[ "text_plain_output_1.png" ]
from keras.layers import LSTM from keras.layers.core import Activation, Dropout, Dense from keras.layers.embeddings import Embedding from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from nltk.corpus import stopwords from skl...
code
88105225/cell_18
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer import emoji import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re # regular expression operations import wordninja tweets = pd.read_csv('/kaggle/input/twitter-sentiment-analysis-10/Sentiment1...
code
88105225/cell_32
[ "text_plain_output_1.png", "image_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from wordcloud import WordCloud import ...
code
88105225/cell_51
[ "text_plain_output_1.png" ]
from keras.layers import LSTM from keras.layers.core import Activation, Dropout, Dense from keras.layers.embeddings import Embedding from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from nltk.corpus import stopwords from nlt...
code
88105225/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) tweets = pd.read_csv('/kaggle/input/twitter-sentiment-analysis-10/Sentiment140.tenPercent.sample.tweets.tsv', delimiter='\t') tweets.isnull().values.any().sum()
code
88105225/cell_38
[ "text_plain_output_1.png" ]
from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer tokenizer = Tokenizer() tokenizer.fit_on_texts(X_train) word_index = tokenizer.word_index X_train_tok = tokenizer.texts_to_sequences(X_train) X_test_tok = tokenizer.texts_to_sequences(X_test) vocab_size = len(tokeni...
code
88105225/cell_24
[ "text_plain_output_1.png", "image_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from sklearn.model_selection import train_test_split from wordcloud import WordCloud import emoji import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re # regul...
code
88105225/cell_14
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns tweets = pd.read_csv('/kaggle/input/twitter-sentiment-analysis-10/Sentiment140.tenPercent.sample.tweets.tsv', delimiter='\t') tweets.isnull().values.any().sum() tweets = tweets.drop_duplicates() sns.co...
code
88105225/cell_27
[ "text_html_output_1.png" ]
from nltk.corpus import stopwords from sklearn.feature_extraction.text import TfidfVectorizer import emoji import re # regular expression operations import wordninja stop_words = stopwords.words('english') abbreviations = {'a.m.': 'before midday', 'acct': 'account', 'afaik': 'as far as i know', 'afk': 'away from k...
code
88105225/cell_37
[ "text_plain_output_1.png" ]
from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer tokenizer = Tokenizer() tokenizer.fit_on_texts(X_train) word_index = tokenizer.word_index X_train_tok = tokenizer.texts_to_sequences(X_train) X_test_tok = tokenizer.texts_to_sequences(X_test) vocab_size = len(tokeni...
code