path stringlengths 13 17 | screenshot_names listlengths 1 873 | code stringlengths 0 40.4k | cell_type stringclasses 1
value |
|---|---|---|---|
130023373/cell_16 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd #dataframe manipulation
train = pd.read_csv('/kaggle/input/machinehack-watermark-challenge/train.csv')
test = pd.read_csv('/kaggle/input/machinehack-watermark-challenge/test.csv')
train['Label'].value_counts() | code |
130023373/cell_3 | [
"image_output_2.png",
"image_output_1.png"
] | !pip install pip -U -q
!pip install fastdup -q | code |
130023373/cell_27 | [
"text_plain_output_1.png"
] | from PIL import Image, ImageDraw, ImageEnhance #for read the image
from tqdm import tqdm_notebook
import cv2 #for read the image
import os
import pandas as pd #dataframe manipulation
import seaborn as sns #for visualization
train = pd.read_csv('/kaggle/input/machinehack-watermark-challenge/train.csv')
test = pd.r... | code |
130023373/cell_5 | [
"image_output_1.png"
] | import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw, ImageEnhance
import skimage.color
import skimage.util
import imagehash
import cv2
import os
import re
import itertools
import distance
import time
import warnings
warnings.filterwarnings('... | code |
106208987/cell_4 | [
"text_plain_output_1.png"
] | from matplotlib import image
im = image.imread('../input/rice-image-dataset/Rice_Image_Dataset/Arborio/Arborio (10012).jpg')
im.shape | code |
106208987/cell_2 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
os.listdir('../input/rice-image-dataset/Rice_Image_Dataset') | code |
106208987/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
106208987/cell_7 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import os
import numpy as np
import pandas as pd
import os
os.listdir('../input/rice-image-dataset/Rice_Image_Dataset')
def load(impath):
imgs = []
labels = []
l1 = os.listdir(impath)
for i in l1:
if i[-1] == 't':
continue
l2 = os.listd... | code |
106208987/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from matplotlib import image
import matplotlib.pyplot as plt
im = image.imread('../input/rice-image-dataset/Rice_Image_Dataset/Arborio/Arborio (10012).jpg')
im.shape
plt.imshow(im) | code |
122244126/cell_13 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import warnings
import gc
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
import time
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from sklearn.svm import SVR as skSVR
from cuml.svm import SVR as cuSVR
def smape(y_true,... | code |
122244126/cell_9 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import warnings
import gc
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
import time
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from sklearn.svm import SVR as skSVR
from cuml.svm import SVR as cuSVR
def smape(y_true,... | code |
122244126/cell_6 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import warnings
import gc
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
import time
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from sklearn.svm import SVR as skSVR
from cuml.svm import SVR as cuSVR
def smape(y_true,... | code |
122244126/cell_2 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import warnings
import gc
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
import time
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from sklearn.svm import SVR as skSVR
from cuml.svm import SVR as cuSVR
def smape(y_true,... | code |
122244126/cell_11 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import warnings
import gc
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
import time
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from sklearn.svm import SVR as skSVR
from cuml.svm import SVR as cuSVR
def smape(y_true,... | code |
122244126/cell_1 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import warnings
import gc
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
import time
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
print(pd.__version__)
from sklearn.svm import SVR as skSVR
from cuml.svm import SVR as c... | code |
122244126/cell_8 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import warnings
import gc
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
import time
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from sklearn.svm import SVR as skSVR
from cuml.svm import SVR as cuSVR
def smape(y_true,... | code |
122244126/cell_15 | [
"text_html_output_1.png"
] | BACK = 36
features = ['utrend', 'atrend', 'ntrend', 'lng', 'lat', 'rate0', 'rate1', 'rate2', 'rate3', 'rate4', 'rate_sum', 'last_rate1', 'last_rate2', 'last_rate3', 'last_rate4']
CLIPS = {1: 0.00225, 2: 0.005, 3: 0.011, 4: 0.015, 5: 0.024, 6: 0.032}
for LEAD in range(1, 7):
print(f'Forecast month ahead {LEAD}...')
... | code |
122244126/cell_3 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
import warnings
import gc
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
import time
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from sklearn.svm import SVR as skSVR
from cuml.svm import SVR as cuSVR
def smape(y_true,... | code |
122244126/cell_5 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
import warnings
import gc
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
import time
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
from sklearn.svm import SVR as skSVR
from cuml.svm import SVR as cuSVR
def smape(y_true,... | code |
34120381/cell_2 | [
"text_plain_output_1.png"
] | import os
import os
for dirname, _, filenames in os.walk('/kaggle/'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
34120381/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
from gensim.models import Word2Vec
from keras.models import Sequential
from keras.callbacks import EarlyStopping
from keras import optimizers
from keras.layers... | code |
34120381/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
datasets_dir = ''
vnrows = None
datasets_dir = '/kaggle/input/fakenews-preprocessed-dataset/'
df = pd.read_csv(datasets_dir + 'fakenews_preprocessed_35k.csv', nrows=vnrows, encoding='utf-8') | code |
105213956/cell_21 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
from sklearn.manifold import TSNE
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import nltk
import numpy as np
im... | code |
105213956/cell_9 | [
"image_output_1.png"
] | from nltk.corpus import stopwords
import pandas as pd # For data handling
df = pd.read_csv('../input/google-war-news/war-news.csv', encoding='latin1')
df.isnull().sum()
df.dropna(subset=['Summary'], inplace=True)
T = df['Summary'].str.split(' \n\n---\n\n').str[0]
T = T.str.replace('-', ' ').str.replace('[^\\w\\s]... | code |
105213956/cell_25 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from gensim.models import word2vec
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import pandas as pd # For data handling
df = pd.read_csv('../input/google-war-news/war-news.csv', encoding='latin1')
df.isnull().sum()
df.dropna(subset=['Summary'], inplace... | code |
105213956/cell_30 | [
"text_plain_output_1.png"
] | from gensim.models import word2vec
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import pandas as pd # For data handling
df = pd.read_csv('../input/google-war-news/war-news.csv', encoding='latin1')
df.isnull().sum()
df.dropna(subset=['Summary'], inplace... | code |
105213956/cell_33 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from gensim.models import word2vec
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
from nltk.tokenize import word_tokenize
from sklearn.manifold import TSNE
from tensorflow.keras.preprocessing.sequence import ... | code |
105213956/cell_20 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
from sklearn.manifold import TSNE
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import nltk
import numpy as np
im... | code |
105213956/cell_29 | [
"text_plain_output_1.png"
] | from gensim.models import word2vec
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import pandas as pd # For data handling
df = pd.read_csv('../input/google-war-news/war-news.csv', encoding='latin1')
df.isnull().sum()
df.dropna(subset=['Summary'], inplace... | code |
105213956/cell_11 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
import pandas as pd # For data handling
df = pd.read_csv('../input/google-war-news/war-news.csv', encoding='latin1')
df.isnull().sum()
df.dropna(subset=['Summary'], inplace=True)
T = df['Summary'].str.split(' \n\n---\n\n').str[0]
T = T.str.replace('-', ' ').str.replace('[^\\w\\s]... | code |
105213956/cell_1 | [
"text_plain_output_1.png"
] | import nltk
import re
import pandas as pd
from time import time
from collections import defaultdict
from bs4 import BeautifulSoup
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
from gens... | code |
105213956/cell_18 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import nltk
import numpy as np
import pandas as pd # For data handl... | code |
105213956/cell_32 | [
"text_plain_output_1.png"
] | from gensim.models import word2vec
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
from nltk.tokenize import word_tokenize
from sklearn.manifold import TSNE
from tensorflow.keras.preprocessing.sequence import ... | code |
105213956/cell_15 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
import nltk
import numpy as np
import pandas as pd # For data handl... | code |
105213956/cell_3 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd # For data handling
df = pd.read_csv('../input/google-war-news/war-news.csv', encoding='latin1')
df.head() | code |
105213956/cell_31 | [
"text_plain_output_1.png"
] | from gensim.models import word2vec
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
from nltk.tokenize import word_tokenize
from sklearn.manifold import TSNE
from tensorflow.keras.preprocessing.sequence import ... | code |
105213956/cell_24 | [
"text_plain_output_1.png"
] | from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import pandas as pd # For data handling
df = pd.read_csv('../input/google-war-news/war-news.csv', encoding='latin1')
df.isnull().sum()
df.dropna(subset=['Summary'], inplace=True)
T = df['Summary'].str.split(... | code |
105213956/cell_10 | [
"text_html_output_1.png"
] | from nltk.corpus import stopwords
import pandas as pd # For data handling
df = pd.read_csv('../input/google-war-news/war-news.csv', encoding='latin1')
df.isnull().sum()
df.dropna(subset=['Summary'], inplace=True)
T = df['Summary'].str.split(' \n\n---\n\n').str[0]
T = T.str.replace('-', ' ').str.replace('[^\\w\\s]... | code |
105213956/cell_27 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from gensim.models import word2vec
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import pandas as pd # For data handling
df = pd.read_csv('../input/google-war-news/war-news.csv', encoding='latin1')
df.isnull().sum()
df.dropna(subset=['Summary'], inplace... | code |
105213956/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # For data handling
df = pd.read_csv('../input/google-war-news/war-news.csv', encoding='latin1')
df.isnull().sum() | code |
17144682/cell_21 | [
"text_html_output_1.png"
] | import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'P... | code |
17144682/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'P... | code |
17144682/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.info() | code |
17144682/cell_34 | [
"text_plain_output_1.png"
] | import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'P... | code |
17144682/cell_23 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'P... | code |
17144682/cell_44 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'P... | code |
17144682/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv') | code |
17144682/cell_39 | [
"text_plain_output_1.png"
] | from pylab import rcParams
from pylab import rcParams
from pylab import rcParams
import matplotlib.pyplot as plt #data plotting
import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Du... | code |
17144682/cell_26 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'P... | code |
17144682/cell_48 | [
"text_plain_output_1.png"
] | from pylab import rcParams
from pylab import rcParams
from pylab import rcParams
from pylab import rcParams
import pandas as pd #for data wrangling
import seaborn as sns #data visualization
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_d... | code |
17144682/cell_41 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'P... | code |
17144682/cell_11 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'P... | code |
17144682/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.head() | code |
17144682/cell_18 | [
"text_plain_output_1.png"
] | from pylab import rcParams
import pandas as pd #for data wrangling
import seaborn as sns #data visualization
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type'... | code |
17144682/cell_28 | [
"text_html_output_1.png"
] | from pylab import rcParams
import pandas as pd #for data wrangling
import seaborn as sns #data visualization
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type'... | code |
17144682/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'P... | code |
17144682/cell_16 | [
"text_plain_output_1.png"
] | from pylab import rcParams
import pandas as pd #for data wrangling
import seaborn as sns #data visualization
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type'... | code |
17144682/cell_38 | [
"text_plain_output_1.png"
] | from pylab import rcParams
from pylab import rcParams
from pylab import rcParams
import matplotlib.pyplot as plt #data plotting
import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Du... | code |
17144682/cell_47 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from pylab import rcParams
from pylab import rcParams
from pylab import rcParams
from pylab import rcParams
import pandas as pd #for data wrangling
import seaborn as sns #data visualization
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_d... | code |
17144682/cell_17 | [
"text_html_output_1.png"
] | import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'P... | code |
17144682/cell_35 | [
"text_plain_output_1.png"
] | import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'P... | code |
17144682/cell_31 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'P... | code |
17144682/cell_46 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from pylab import rcParams
from pylab import rcParams
from pylab import rcParams
from pylab import rcParams
import pandas as pd #for data wrangling
import seaborn as sns #data visualization
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_d... | code |
17144682/cell_24 | [
"text_plain_output_1.png"
] | from pylab import rcParams
import pandas as pd #for data wrangling
import seaborn as sns #data visualization
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type'... | code |
17144682/cell_27 | [
"text_plain_output_1.png"
] | import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency Name', 'Landmark', 'Facility Type', 'Status', 'Due Date', 'Resolution Description', 'Community Board', 'P... | code |
17144682/cell_37 | [
"text_plain_output_1.png"
] | from pylab import rcParams
from pylab import rcParams
from pylab import rcParams
import matplotlib.pyplot as plt #data plotting
import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Du... | code |
17144682/cell_5 | [
"text_plain_output_1.png"
] | from plotly.offline import download_plotlyjs,init_notebook_mode,plot,iplot
import cufflinks as cf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from plotly import __version__
import cufflinks as cf
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, i... | code |
17144682/cell_36 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from pylab import rcParams
from pylab import rcParams
import matplotlib.pyplot as plt #data plotting
import pandas as pd #for data wrangling
noise_complaints_data = pd.read_csv('../input/Noise_Complaints.csv')
noise_complaints_data.isna().sum()
noise_complaints_data.drop(['Status', 'Due Date', 'Agency', 'Agency N... | code |
129019356/cell_2 | [
"text_plain_output_1.png"
] | !pip install pycaret | code |
129019356/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
129019356/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import numpy as np
import pandas as pd
pd.set_option('max_columns', None)
pd.set_option('max_rows', 90)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
from sklearn.neighbo... | code |
89139521/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv', parse_dates=['time'], index_col=['row_id'])
df.isna().sum()
index_series = pd.DataFrame(df.time.drop_duplicates().sort_values().reset_index(drop=True))
index_series['t... | code |
89139521/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv', parse_dates=['time'], index_col=['row_id'])
df.head() | code |
89139521/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv', parse_dates=['time'], index_col=['row_id'])
df.isna().sum()
index_series = pd.DataFrame(df.time.drop_duplicates().sort_values().reset_index(drop=True))
index_series['t... | code |
89139521/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
89139521/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv', parse_dates=['time'], index_col=['row_id'])
df.isna().sum()
index_series = pd.DataFrame(df.time.drop_duplicates().sort_values().reset_index(drop=True))
index_series['t... | code |
89139521/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv', parse_dates=['time'], index_col=['row_id'])
df.isna().sum()
index_series = pd.DataFrame(df.time.drop_duplicates().sort_values().reset_index(drop... | code |
89139521/cell_15 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv', parse_dates=['time'], index_col=['row_id'])
df.isna().sum()
index_series = pd.DataFrame(df.time.drop_duplicates().sort_values().reset_index(drop=True))
index_series['t... | code |
89139521/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv', parse_dates=['time'], index_col=['row_id'])
df.isna().sum()
index_series = pd.DataFrame(df.time.drop_duplicates().sort_values().reset_index(drop=True))
index_series['t... | code |
89139521/cell_24 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv', parse_dates=['time'], index_col=['row_id'])
df.isna().sum()
index_series = pd.DataFrame(df.time.drop_duplicates().sort_values().reset_index(drop=True))
index_series['t... | code |
89139521/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv', parse_dates=['time'], index_col=['row_id'])
df.isna().sum()
index_series = pd.DataFrame(df.time.drop_duplicates().sort_values().reset_index(drop=True))
index_series['t... | code |
89139521/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv', parse_dates=['time'], index_col=['row_id'])
df.isna().sum()
index_series = pd.DataFrame(df.time.drop_duplicates().sort_values().reset_index(drop... | code |
89139521/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv', parse_dates=['time'], index_col=['row_id'])
df.isna().sum() | code |
88080357/cell_19 | [
"text_plain_output_1.png"
] | ! wget -O ngannou.gif https://raw.githubusercontent.com/Justsecret123/Human-pose-estimation/main/Test%20gifs/Ngannou_takedown.gif | code |
88080357/cell_24 | [
"text_plain_output_1.png"
] | from IPython.display import HTML, display
from matplotlib.collections import LineCollection
import cv2
import imageio
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
KEYPOINT_DICT = {'nose': 0, 'left_eye': 1, 'right_eye': 2, 'left_ear': 3, 'right_e... | code |
88080357/cell_14 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import tensorflow_hub as hub
model = hub.load('https://tfhub.dev/google/movenet/multipose/lightning/1')
movenet = model.signatures['serving_default'] | code |
88080357/cell_27 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from IPython.display import HTML, display
from matplotlib.collections import LineCollection
import cv2
import imageio
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
KEYPOINT_DICT = {'nose': 0, 'left_eye': 1, 'right_ey... | code |
17141241/cell_21 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().sum()
test.isnull().sum()
train['Age'] = train['Age'].fillna(train['Age'].median())
test['Age'] = test['Age'].fillna(test['Age'].median())
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin',... | code |
17141241/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().sum()
train['Ticket'].value_counts() | code |
17141241/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().sum()
train.head() | code |
17141241/cell_25 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().sum()
test.isnull().sum()
train['Age'] = train['Age'].fillna(train['Age'].median())
test['Age'] = test['Age'].fillna(test['Age'].median())
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin',... | code |
17141241/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().sum()
test.isnull().sum()
train['Age'] = train['Age'].fillna(train['Age'].median())
test['Age'] = test['Age'].fillna(test['Age'].median())
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin',... | code |
17141241/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.info() | code |
17141241/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().sum()
train['Cabin'].value_counts() | code |
17141241/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().sum() | code |
17141241/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().sum()
test.isnull().sum()
train['Age'] = train['Age'].fillna(train['Age'].median())
test['Age'] = test['Age'].fillna(test['Age'].median())
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin',... | code |
17141241/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
test.isnull().sum() | code |
17141241/cell_15 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().sum()
test.isnull().sum()
train['Age'] = train['Age'].fillna(train['Age'].median())
test['Age'] = test['Age'].fillna(test['Age'].median())
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin',... | code |
17141241/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().sum()
test.isnull().sum()
train['Age'] = train['Age'].fillna(train['Age'].median())
test['Age'] = test['Age'].fillna(test['Age'].median())
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin',... | code |
17141241/cell_24 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.isnull().sum()
test.isnull().sum()
train['Age'] = train['Age'].fillna(train['Age'].median())
test['Age'] = test['Age'].fillna(test['Age'].median())
train.drop('Cabin', axis=1, inplace=True)
test.drop('Cabin',... | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.