path
stringlengths
13
17
screenshot_names
listlengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
121154806/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
from tqdm import tqdm import cv2 import gif2numpy import matplotlib.pyplot as plt import numpy as np import os import segmentation_models as sm import tensorflow as tf sm.set_framework('tf.keras') sm.framework() root = '/kaggle/input/retinal-vessel-segmentation/DRIVE/' exts = ('jpg', 'JPG', 'png', 'PNG', 'tif'...
code
121154806/cell_1
[ "text_plain_output_1.png" ]
!pip install -U segmentation-models !pip install gif2numpy import cv2 import numpy as np import pandas as pd import tensorflow as tf from tensorflow import keras import segmentation_models as sm import matplotlib.pyplot as plt import os from tqdm import tqdm import gif2numpy
code
121154806/cell_18
[ "application_vnd.jupyter.stderr_output_1.png" ]
from tqdm import tqdm import cv2 import gif2numpy import matplotlib.pyplot as plt import numpy as np import os import segmentation_models as sm import tensorflow as tf sm.set_framework('tf.keras') sm.framework() root = '/kaggle/input/retinal-vessel-segmentation/DRIVE/' exts = ('jpg', 'JPG', 'png', 'PNG', 'tif'...
code
121154806/cell_8
[ "image_output_5.png", "image_output_4.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import os root = '/kaggle/input/retinal-vessel-segmentation/DRIVE/' exts = ('jpg', 'JPG', 'png', 'PNG', 'tif', 'gif', 'ppm') def Data_sorting(input_data, target_data, exts): images = sorted([os.path.join(input_data, fname) for fname in os.listdir(input_data) if fname.endswith(exts) and (not fname.startswith('.'))...
code
121154806/cell_14
[ "text_plain_output_1.png", "image_output_1.png" ]
from tqdm import tqdm import cv2 import gif2numpy import matplotlib.pyplot as plt import numpy as np import os root = '/kaggle/input/retinal-vessel-segmentation/DRIVE/' exts = ('jpg', 'JPG', 'png', 'PNG', 'tif', 'gif', 'ppm') def Data_sorting(input_data, target_data, exts): images = sorted([os.path.join(inpu...
code
121154806/cell_12
[ "text_plain_output_1.png" ]
from tqdm import tqdm import cv2 import gif2numpy import matplotlib.pyplot as plt import numpy as np import os root = '/kaggle/input/retinal-vessel-segmentation/DRIVE/' exts = ('jpg', 'JPG', 'png', 'PNG', 'tif', 'gif', 'ppm') def Data_sorting(input_data, target_data, exts): images = sorted([os.path.join(inpu...
code
90124098/cell_9
[ "image_output_1.png" ]
from sklearn.cluster import KMeans import matplotlib.pyplot as plt import numpy as np import pandas as pd df = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') x = df.iloc[:, [3, 4]] from sklearn.cluster import KMeans wcss = [] for i in range(1, 11): km = KMeans(n_clusters=i...
code
90124098/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df.info()
code
90124098/cell_8
[ "image_output_1.png" ]
from sklearn.cluster import KMeans import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') x = df.iloc[:, [3, 4]] from sklearn.cluster import KMeans wcss = [] for i in range(1, 11): km = KMeans(n_clusters=i, init='k-means++', ...
code
90124098/cell_3
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df.head()
code
90124098/cell_5
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') x = df.iloc[:, [3, 4]] x.head()
code
1006487/cell_21
[ "application_vnd.jupyter.stderr_output_1.png" ]
from string import punctuation import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) biology = pd.read_csv('../input/biology.csv') cooking = pd.read_csv('../input/cooking.csv') crypto = pd.read_csv('../input/crypto.csv') diy = pd.read_csv('../input/diy.csv') robotics = pd.read_csv('../input/robotics....
code
1006487/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
from string import punctuation import nltk # natural language processing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization biology = pd.read_csv('../input/biology.csv') cooking = pd.read_csv('../input/cooking.csv') crypto = pd.read_csv('../input/crypto.csv'...
code
1006487/cell_20
[ "application_vnd.jupyter.stderr_output_1.png" ]
from string import punctuation import nltk # natural language processing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization biology = pd.read_csv('../input/biology.csv') cooking = pd.read_csv('../input/cooking.csv') crypto = pd.read_csv('../input/crypto.csv'...
code
1006487/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
from string import punctuation import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) biology = pd.read_csv('../input/biology.csv') cooking = pd.read_csv('../input/cooking.csv') crypto = pd.read_csv('../input/crypto.csv') diy = pd.read_csv('../input/diy.csv') robotics = pd.read_csv('../input/robotics....
code
1006487/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
from nltk.corpus import stopwords from subprocess import check_output import numpy as np import pandas as pd import nltk import re from bs4 import BeautifulSoup from nltk.corpus import stopwords import seaborn as sns import matplotlib.pyplot as plt from sklearn.feature_extraction.text import CountVectorizer from skle...
code
1006487/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
from string import punctuation import nltk # natural language processing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization biology = pd.read_csv('../input/biology.csv') cooking = pd.read_csv('../input/cooking.csv') crypto = pd.read_csv('../input/crypto.csv'...
code
1006487/cell_19
[ "application_vnd.jupyter.stderr_output_1.png" ]
from string import punctuation import nltk # natural language processing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) biology = pd.read_csv('../input/biology.csv') cooking = pd.read_csv('../input/cooking.csv') crypto = pd.read_csv('../input/crypto.csv') diy = pd.read_csv('../input/diy.csv')...
code
1006487/cell_18
[ "application_vnd.jupyter.stderr_output_1.png" ]
from string import punctuation import nltk # natural language processing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization biology = pd.read_csv('../input/biology.csv') cooking = pd.read_csv('../input/cooking.csv') crypto = pd.read_csv('../input/crypto.csv'...
code
1006487/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
from string import punctuation import nltk # natural language processing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) biology = pd.read_csv('../input/biology.csv') cooking = pd.read_csv('../input/cooking.csv') crypto = pd.read_csv('../input/crypto.csv') diy = pd.read_csv('../input/diy.csv')...
code
1006487/cell_15
[ "application_vnd.jupyter.stderr_output_1.png" ]
from string import punctuation import nltk # natural language processing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization biology = pd.read_csv('../input/biology.csv') cooking = pd.read_csv('../input/cooking.csv') crypto = pd.read_csv('../input/crypto.csv'...
code
1006487/cell_16
[ "application_vnd.jupyter.stderr_output_1.png" ]
from string import punctuation import nltk # natural language processing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization biology = pd.read_csv('../input/biology.csv') cooking = pd.read_csv('../input/cooking.csv') crypto = pd.read_csv('../input/crypto.csv'...
code
1006487/cell_17
[ "application_vnd.jupyter.stderr_output_1.png" ]
from string import punctuation import nltk # natural language processing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization biology = pd.read_csv('../input/biology.csv') cooking = pd.read_csv('../input/cooking.csv') crypto = pd.read_csv('../input/crypto.csv'...
code
1006487/cell_10
[ "text_plain_output_1.png" ]
from string import punctuation import nltk # natural language processing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) biology = pd.read_csv('../input/biology.csv') cooking = pd.read_csv('../input/cooking.csv') crypto = pd.read_csv('../input/crypto.csv') diy = pd.read_csv('../input/diy.csv')...
code
1006487/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
from string import punctuation import nltk # natural language processing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization biology = pd.read_csv('../input/biology.csv') cooking = pd.read_csv('../input/cooking.csv') crypto = pd.read_csv('../input/crypto.csv'...
code
1006487/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
from string import punctuation import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) biology = pd.read_csv('../input/biology.csv') cooking = pd.read_csv('../input/cooking.csv') crypto = pd.read_csv('../input/crypto.csv') diy = pd.read_csv('../input/diy.csv') robotics = pd.read_csv('../input/robotics....
code
74045588/cell_9
[ "text_plain_output_1.png" ]
from keras.layers import Dense, Dropout, Flatten, Input, Conv2D, MaxPooling2D, BatchNormalization, Activation, UpSampling2D, GlobalAveragePooling2D from keras.models import Sequential, Model from keras.preprocessing.image import ImageDataGenerator from keras.preprocessing.image import load_img, img_to_array from te...
code
74045588/cell_4
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator train_data_dir = '../input/1056lab-covid19-chest-xray-recognit/train' generator = ImageDataGenerator(width_shift_range=0.3, height_shift_range=0.3, horizontal_flip=True, validation_split=0.2) train_generator = generator.flow_from_directory(train_data_dir, targe...
code
74045588/cell_7
[ "text_html_output_1.png" ]
from keras.layers import Dense, Dropout, Flatten, Input, Conv2D, MaxPooling2D, BatchNormalization, Activation, UpSampling2D, GlobalAveragePooling2D from keras.models import Sequential, Model from keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.applications import EfficientNetB0 from tensor...
code
74045588/cell_10
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator train_data_dir = '../input/1056lab-covid19-chest-xray-recognit/train' generator = ImageDataGenerator(width_shift_range=0.3, height_shift_range=0.3, horizontal_flip=True, validation_split=0.2) train_generator = generator.flow_from_directory(train_data_dir, targe...
code
74045588/cell_5
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Dense, Dropout, Flatten, Input, Conv2D, MaxPooling2D, BatchNormalization, Activation, UpSampling2D, GlobalAveragePooling2D from keras.models import Sequential, Model from tensorflow.keras.applications import EfficientNetB0 from tensorflow.keras.applications import EfficientNetB0 efnb0 = Effi...
code
104128103/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.metrics import mean_absolute_error, mean_absolute_percentage_error, mean_squared_error import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns def settings(): plt.style.use('bmh') plt.rcParams['figure.figsize'] = [25...
code
17120136/cell_21
[ "text_plain_output_1.png" ]
from nltk import FreqDist, bigrams, trigrams from nltk.corpus import stopwords import pandas as pd grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_...
code
17120136/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_duplicates(subset=['name', 'address', 'listed_in(type)_x'], inplace=True) newdata...
code
17120136/cell_25
[ "image_output_1.png" ]
from nltk import FreqDist, bigrams, trigrams from nltk.corpus import stopwords import pandas as pd grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_...
code
17120136/cell_33
[ "text_plain_output_1.png" ]
from keras.layers import Dense, Flatten, Embedding, Conv1D, MaxPooling1D, Dropout from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from keras.regularizers import l1, l2 from nltk import FreqDist, bigrams, trigrams from nltk i...
code
17120136/cell_20
[ "image_output_1.png" ]
from nltk import FreqDist, bigrams, trigrams from nltk.corpus import stopwords import pandas as pd grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_...
code
17120136/cell_6
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_duplicates(subset=['name', 'address', 'listed_in(type)_x'], inplace=True) newdata...
code
17120136/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np from sklearn.model_selection import train_test_split import nltk from nltk.corpus import RegexpTokenizer as regextoken from nltk.corpus import stopwords from nltk import FreqDist, bigrams, trigrams from nltk import WordNetLemmatizer import matplotlib from matplotlib import pyplot ...
code
17120136/cell_11
[ "text_html_output_1.png" ]
from nltk import FreqDist, bigrams, trigrams from nltk.corpus import stopwords import pandas as pd grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_...
code
17120136/cell_18
[ "image_output_1.png" ]
from nltk import FreqDist, bigrams, trigrams from nltk.corpus import stopwords import pandas as pd grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_...
code
17120136/cell_28
[ "text_plain_output_1.png" ]
from keras.preprocessing.text import Tokenizer from nltk import FreqDist, bigrams, trigrams from nltk import WordNetLemmatizer from nltk.corpus import stopwords from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder import pandas as pd grouped = data.groupby(['name', ...
code
17120136/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_duplicates(subset=['name', 'address', 'listed_in(type)_x'], inplace=True) newdata...
code
17120136/cell_15
[ "text_plain_output_1.png" ]
from nltk import FreqDist, bigrams, trigrams from nltk.corpus import stopwords import pandas as pd grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_...
code
17120136/cell_17
[ "text_plain_output_1.png" ]
from nltk import FreqDist, bigrams, trigrams from nltk.corpus import stopwords import pandas as pd grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_...
code
17120136/cell_31
[ "text_plain_output_1.png" ]
from keras.layers import Dense, Flatten, Embedding, Conv1D, MaxPooling1D, Dropout from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer from keras.regularizers import l1, l2 from nltk import FreqDist, bigrams, trigrams from nltk i...
code
17120136/cell_14
[ "text_plain_output_1.png" ]
from nltk import FreqDist, bigrams, trigrams from nltk.corpus import stopwords import pandas as pd grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_...
code
17120136/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
from nltk.corpus import stopwords stop = stopwords.words('english') print(stop)
code
17120136/cell_12
[ "text_plain_output_1.png" ]
from nltk import FreqDist, bigrams, trigrams from nltk.corpus import stopwords import pandas as pd grouped = data.groupby(['name', 'address']).agg({'listed_in(type)': list}) newdata = pd.merge(grouped, data, on=['name', 'address']) newdata['listed_in(type)_x'] = newdata['listed_in(type)_x'].astype(str) newdata.drop_...
code
2029692/cell_13
[ "image_output_5.png", "image_output_4.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from scipy.stats import skew from scipy.stats import skew import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/t...
code
2029692/cell_9
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') correlation_matrix = train.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(correlation_matrix, vm...
code
2029692/cell_4
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') correlation_matrix = train.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(correlation_matrix, vm...
code
2029692/cell_6
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') correlation_matrix = train.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(correlation_matrix, vm...
code
2029692/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') train['SalePrice'].describe()
code
2029692/cell_11
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') correlation_matrix = train.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(correlation_matrix, vm...
code
2029692/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import seaborn as sns import numpy as np from scipy.stats import norm from sklearn.preprocessing import StandardScaler from scipy import stats import matplotlib.pyplot as plt from scipy.stats import skew from subprocess import check_output prin...
code
2029692/cell_7
[ "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') correlation_matrix = train.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(correlation_matrix, vm...
code
2029692/cell_8
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') correlation_matrix = train.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(correlation_matrix, vm...
code
2029692/cell_10
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') correlation_matrix = train.corr() f, ax = plt.subplots(figsize=(12, 9)) sns.heatmap(correlation_matrix, vm...
code
2029692/cell_12
[ "image_output_11.png", "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_12.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
from scipy.stats import skew from scipy.stats import skew import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/t...
code
2036121/cell_21
[ "text_html_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer train = pd.read_csv('../input/train.csv') train.toxic.value_counts() pd.crosstab(train.toxic, train.severe_toxic) pd.crosstab(train.toxic, [train.obscene, train.threat, train.insult, train.identity...
code
2036121/cell_13
[ "text_html_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer train = pd.read_csv('../input/train.csv') train.toxic.value_counts() pd.crosstab(train.toxic, train.severe_toxic) pd.crosstab(train.toxic, [train.obscene, train.threat, train.insult, train.identity...
code
2036121/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer train = pd.read_csv('../input/train.csv') train.toxic.value_counts() pd.crosstab(train.toxic, train.severe_toxic)
code
2036121/cell_25
[ "text_html_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer train = pd.read_csv('../input/train.csv') train.toxic.value_counts() pd.crosstab(train.toxic, train.severe_toxic) pd.crosstab(train.toxic, [train.obscene, train.threat, train.insult, train.identity...
code
2036121/cell_23
[ "text_html_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer train = pd.read_csv('../input/train.csv') train.toxic.value_counts() pd.crosstab(train.toxic, train.severe_toxic) pd.crosstab(train.toxic, [train.obscene, train.threat, train.insult, train.identity...
code
2036121/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer train = pd.read_csv('../input/train.csv') train.toxic.value_counts() pd.crosstab(train.toxic, train.severe_toxic) pd.crosstab(train.toxic, [train.obscene, train.threat, train.insult, train.identity...
code
2036121/cell_19
[ "text_html_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer train = pd.read_csv('../input/train.csv') train.toxic.value_counts() pd.crosstab(train.toxic, train.severe_toxic) pd.crosstab(train.toxic, [train.obscene, train.threat, train.insult, train.identity...
code
2036121/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer train = pd.read_csv('../input/train.csv') train.toxic.value_counts()
code
2036121/cell_15
[ "text_html_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer train = pd.read_csv('../input/train.csv') train.toxic.value_counts() pd.crosstab(train.toxic, train.severe_toxic) pd.crosstab(train.toxic, [train.obscene, train.threat, train.insult, train.identity...
code
2036121/cell_27
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import numpy as np import pandas as pd from sklearn.feature_extraction.text import TfidfVectorizer train = pd.read_csv('../input/train.csv') train.toxic.value_counts() pd.crosstab(train.toxic, train.severe_toxic) pd.crosstab(train.toxic, [train.obscene, train.threat, train.in...
code
50223032/cell_21
[ "text_plain_output_1.png" ]
from scipy.stats import pearsonr import pandas as pd import seaborn as sns def getCorr(x, y): corr, _ = pearsonr(x, y) return corr def getSlope(df): return abs(df['slope']) dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y=sns.regpl...
code
50223032/cell_9
[ "text_html_output_1.png" ]
from scipy.stats import pearsonr import pandas as pd import seaborn as sns def getCorr(x, y): corr, _ = pearsonr(x, y) return corr dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y=sns.regplot(x='1',y='target',data=dftrain) L = [] for f...
code
50223032/cell_25
[ "text_html_output_1.png" ]
from scipy.stats import pearsonr import pandas as pd import seaborn as sns import tensorflow as tf def getCorr(x, y): corr, _ = pearsonr(x, y) return corr def getSlope(df): return abs(df['slope']) dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-i...
code
50223032/cell_34
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from scipy.stats import pearsonr import pandas as pd import seaborn as sns import tensorflow as tf def getCorr(x, y): corr, _ = pearsonr(x, y) return corr def getSlope(df): return abs(df['slope']) dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-i...
code
50223032/cell_30
[ "text_plain_output_1.png" ]
from scipy.stats import pearsonr import pandas as pd import seaborn as sns import tensorflow as tf def getCorr(x, y): corr, _ = pearsonr(x, y) return corr def getSlope(df): return abs(df['slope']) dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-i...
code
50223032/cell_33
[ "text_plain_output_1.png" ]
from scipy.stats import pearsonr import pandas as pd import seaborn as sns import tensorflow as tf def getCorr(x, y): corr, _ = pearsonr(x, y) return corr def getSlope(df): return abs(df['slope']) dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-i...
code
50223032/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y_train = dftrain.pop('target') y_train
code
50223032/cell_6
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') plt.scatter(dftrain['299'], dftrain['1']) plt.title('My PCA graph') plt.xlabel('0 -{0}%'.format(dftrain['299'])) plt.ylabel('target -{0}%'.format(d...
code
50223032/cell_26
[ "text_html_output_1.png" ]
import pandas as pd dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') dftest
code
50223032/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y = sns.regplot(x='1', y='target', data=dftrain)
code
50223032/cell_18
[ "text_plain_output_1.png" ]
from scipy.stats import pearsonr import pandas as pd import seaborn as sns def getCorr(x, y): corr, _ = pearsonr(x, y) return corr def getSlope(df): return abs(df['slope']) dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y=sns.regpl...
code
50223032/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y=sns.regplot(x='1',y='target',data=dftrain) y.get_xlim()
code
50223032/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') dftrain['127'].values
code
50223032/cell_17
[ "image_output_1.png" ]
import pandas as pd dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') dftest
code
50223032/cell_31
[ "text_plain_output_1.png" ]
from scipy.stats import pearsonr import pandas as pd import seaborn as sns import tensorflow as tf def getCorr(x, y): corr, _ = pearsonr(x, y) return corr def getSlope(df): return abs(df['slope']) dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-i...
code
50223032/cell_14
[ "image_output_1.png" ]
from scipy.stats import pearsonr import pandas as pd import seaborn as sns def getCorr(x, y): corr, _ = pearsonr(x, y) return corr def getSlope(df): return abs(df['slope']) dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y=sns.regpl...
code
50223032/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y_train = dftrain.pop('target') y_train.shape
code
50223032/cell_10
[ "text_html_output_1.png" ]
from scipy.stats import pearsonr import pandas as pd import seaborn as sns def getCorr(x, y): corr, _ = pearsonr(x, y) return corr dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y=sns.regplot(x='1',y='target',data=dftrain) L=[] for fea...
code
50223032/cell_12
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import pearsonr import pandas as pd import seaborn as sns def getCorr(x, y): corr, _ = pearsonr(x, y) return corr def getSlope(df): return abs(df['slope']) dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') y=sns.regpl...
code
50223032/cell_5
[ "text_html_output_1.png" ]
import pandas as pd dftrain = pd.read_csv('../input/dont-overfit-ii/train.csv') dftest = pd.read_csv('../input/dont-overfit-ii/test.csv') dftrain
code
72120846/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import cv2 as cv import matplotlib.image as mpimg from matplotlib import pyplot as plt pd.options.display.float_format = '{:.2f}'.format training_labels = pd.read_csv('../input/landmark-recognition-2021/train.csv') training_labels['path1'] = training_labels['...
code
72120846/cell_5
[ "image_output_1.png" ]
import matplotlib.image as mpimg import pandas as pd import pandas as pd import numpy as np import cv2 as cv import matplotlib.image as mpimg from matplotlib import pyplot as plt pd.options.display.float_format = '{:.2f}'.format training_labels = pd.read_csv('../input/landmark-recognition-2021/train.csv') training_l...
code
50210665/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) walmart_data = pd.read_csv('../input/walmart-sales/Walmart_Store_sales.csv') walmart_data.head()
code
50210665/cell_6
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) walmart_data = pd.read_csv('../input/walmart-sales/Walmart_Store_sales.csv') walmart_data_groupby = walmart_data.groupby('Store')['Weekly_Sales'].sum() walmart_data_std = walmart_data.groupby('Store').agg({'Weekly_Sales': 'st...
code
50210665/cell_2
[ "text_html_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
50210665/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) walmart_data = pd.read_csv('../input/walmart-sales/Walmart_Store_sales.csv') walmart_data_groupby = walmart_data.groupby('Store')['Weekly_Sales'].sum() walmart_data_std = walmart_data.groupby('Store').agg({'Weekly_Sales': 'st...
code
50210665/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) walmart_data = pd.read_csv('../input/walmart-sales/Walmart_Store_sales.csv') walmart_data_groupby = walmart_data.groupby('Store')['Weekly_Sales'].sum() walmart_data_std = walmart_data.groupby('Store').agg({'Weekly_Sales': 'st...
code
50210665/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) walmart_data = pd.read_csv('../input/walmart-sales/Walmart_Store_sales.csv') walmart_data_groupby = walmart_data.groupby('Store')['Weekly_Sales'].sum() print('Store Number {} has maximum Sales. Sum of Total Sales {}'.format(wa...
code
73072460/cell_42
[ "text_html_output_1.png" ]
from IPython.display import Image Image(url='https://res.cloudinary.com/practicaldev/image/fetch/s--nUoflRuG--/c_limit%2Cf_auto%2Cfl_progressive%2Cq_auto%2Cw_880/https://i.ibb.co/kG5vPdn/final-cnn.png', width=750, height=500)
code
73072460/cell_21
[ "image_output_1.png" ]
import tensorflow as tf training_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0, rotation_range=40, zoom_range=0.2, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, horizontal_flip=True, vertical_flip=True) training_generator = training_data_gen.flow_from_dataframe(datafram...
code