path stringlengths 13 17 | screenshot_names listlengths 1 873 | code stringlengths 0 40.4k | cell_type stringclasses 1
value |
|---|---|---|---|
90118084/cell_22 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
data = pd.read_csv('../input/suicide-rates-worldwide-20002019/data.csv')
columns = ['Country', 'Year', 'ProbDyingBoth', 'ProbDyingMale', 'ProbDyingFemale', 'SuicideBoth', 'SuicideMale', 'SuicideFemale']
values = data.iloc[1:, :].values
data = pd.DataFrame(values, columns=column... | code |
90118084/cell_27 | [
"image_output_1.png"
] | from wordcloud import WordCloud
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def visualize_word_counts(counts):
wc = WordCloud(max_font_size=130, min_font_size=25, colormap='tab20', background_color='white', prefer_horizontal=0.95, width=2100, height=700, random_state=0)
cloud = wc... | code |
33105771/cell_4 | [
"text_plain_output_1.png"
] | import nltk
import os
import pandas as pd
from datetime import datetime, date, timedelta
import numpy as np
import re
import os
import matplotlib.pyplot as plt
import seaborn as sns
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
imp... | code |
33105771/cell_6 | [
"text_plain_output_1.png"
] | from langdetect import detect
import pandas as pd
npis_csv = '/kaggle/input/covid19-challenges/npi_canada.csv'
raw_data = pd.read_csv(npis_csv, encoding='ISO-8859-1')
df = raw_data.dropna(how='any', subset=['start_date', 'region', 'intervention_category'])
df['region'] = df['region'].replace('Newfoundland', 'Newfound... | code |
33105771/cell_19 | [
"text_plain_output_1.png"
] | # download sentiment map
!wget https://www.clarin.si/repository/xmlui/bitstream/handle/11356/1048/Emoji_Sentiment_Data_v1.0.csv | code |
33105771/cell_8 | [
"text_html_output_2.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"text_html_output_3.png"
] | ex1 = "Here's a wrap of the latest coronavirus news in Canada: 77 cases, one death, an outbreak in a B.C. nursing home and Ottawa asks provinces about their critical supply gaps. https://www.theglobeandmail.com/canada/article-bc-records-canadas-first-coronavirus-death/"
ex2 = 'B.C. records Canadaβs first coronavirus d... | code |
33105771/cell_3 | [
"text_plain_output_1.png"
] | # download necessary packages
!pip install langdetect
!pip install emoji | code |
33105771/cell_17 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from langdetect import detect
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from textblob import TextBlob
import nltk
import nltk
import os
import pandas as pd
import re
import re
import pandas as pd
from datetime import datetime, date, timedelta
import numpy as np
import re
import... | code |
33105771/cell_14 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import nltk
import nltk
import os
import re
import re
import pandas as pd
from datetime import datetime, date, timedelta
import numpy as np
import re
import os
import matplotlib.pyplot as plt
import seaborn as sns
import nltk
nltk.download... | code |
33105771/cell_22 | [
"text_plain_output_1.png"
] | from langdetect import detect
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from textblob import TextBlob
import emoji
import nltk
import nltk
import os
import pandas as pd
import plotly.graph_objects as go
import re
import re
import pandas as pd
from datetime import datetime, da... | code |
33105771/cell_10 | [
"text_plain_output_1.png"
] | from textblob import TextBlob
ex1 = "Here's a wrap of the latest coronavirus news in Canada: 77 cases, one death, an outbreak in a B.C. nursing home and Ottawa asks provinces about their critical supply gaps. https://www.theglobeandmail.com/canada/article-bc-records-canadas-first-coronavirus-death/"
ex2 = 'B.C. recor... | code |
33105771/cell_12 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from textblob import TextBlob
ex = 'first coronavirus death'
ex_tb = TextBlob(ex)
ex_ss = ex_tb.sentiment[0]
print('{} with score={}'.format(ex, ex_ss))
ex = 'coronavirus death'
ex_tb = TextBlob(ex)
ex_ss = ex_tb.sentiment[0]
print('{} with score={}'.format(ex, ex_ss)) | code |
33105771/cell_5 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd
npis_csv = '/kaggle/input/covid19-challenges/npi_canada.csv'
raw_data = pd.read_csv(npis_csv, encoding='ISO-8859-1')
df = raw_data.dropna(how='any', subset=['start_date', 'region', 'intervention_category'])
df['region'] = df['region'].replace('Newfoundland', 'Newfoundland and Labrador')
num_rows_re... | code |
318372/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import sqlite3
con = sqlite3.connect('../input/database.sqlite')
post = pd.read_sql_query('SELECT * FROM post', con)
comment = pd.read_sql_query('SELECT * FROM comment', con)
like = pd.read_sql_query('SELECT * FROM like', con)
rmember = pd.read_sql_query('SELECT distinct id as rid, name rname FR... | code |
318372/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
import sqlite3
con = sqlite3.connect('../input/database.sqlite')
post = pd.read_sql_query('SELECT * FROM post', con)
comment = pd.read_sql_query('SELECT * FROM comment', con)
like = pd.read_sql_query('SELECT * FROM like', con)
rmember = pd.read_sql_query('SELECT distinct id as rid, name rname FR... | code |
318372/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
import sqlite3
con = sqlite3.connect('../input/database.sqlite')
post = pd.read_sql_query('SELECT * FROM post', con)
comment = pd.read_sql_query('SELECT * FROM comment', con)
like = pd.read_sql_query('SELECT * FROM like', con)
rmember = pd.read_sql_query('SELECT distinct id as rid, name rname FR... | code |
318372/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd
import sqlite3
con = sqlite3.connect('../input/database.sqlite')
post = pd.read_sql_query('SELECT * FROM post', con)
comment = pd.read_sql_query('SELECT * FROM comment', con)
like = pd.read_sql_query('SELECT * FROM like', con)
rmember = pd.read_sql_query('SELECT distinct id as rid, name rname FR... | code |
73081016/cell_13 | [
"text_html_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import CountVectorizer
from wordcloud import WordCloud
import json
import matplotlib.pyplot as plt
import pandas as pd
import re
import string
n_common_words = 50
wnl = WordNetLemmatizer()
engstopword... | code |
73081016/cell_4 | [
"image_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import CountVectorizer
from wordcloud import WordCloud
import json
import matplotlib.pyplot as plt
import pandas as pd
import re
import string
n_common_words = 50
wnl = WordNetLemmatizer()
engstopword... | code |
73081016/cell_8 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import CountVectorizer
from wordcloud import WordCloud
import json
import matplotlib.pyplot as plt
import pandas as pd
import re
import string
n_common_words = 50
wnl = WordNetLemmatizer()
engstopword... | code |
73081016/cell_15 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import CountVectorizer
from wordcloud import WordCloud
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import re
import string
n_common_words = 50
wnl = WordNetLemm... | code |
73081016/cell_10 | [
"text_html_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import CountVectorizer
from wordcloud import WordCloud
import json
import matplotlib.pyplot as plt
import pandas as pd
import re
import string
n_common_words = 50
wnl = WordNetLemmatizer()
engstopword... | code |
73081016/cell_12 | [
"text_html_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import CountVectorizer
from wordcloud import WordCloud
import json
import matplotlib.pyplot as plt
import pandas as pd
import re
import string
n_common_words = 50
wnl = WordNetLemmatizer()
engstopword... | code |
34144563/cell_4 | [
"text_plain_output_4.png",
"text_plain_output_6.png",
"application_vnd.jupyter.stderr_output_3.png",
"application_vnd.jupyter.stderr_output_5.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from tqdm import tqdm
import numpy as np
import pandas as pd
import torch
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
a = 100
b = 100
dim_mesh = (a - 1) * (b - 1)
bias_bo = True
bias_bo_g = True
bias_bo_d = True
train_samples = 1200
end_samples = 1600
path = '../input/2stage-simon/f_set_... | code |
34144563/cell_6 | [
"text_plain_output_1.png"
] | from torch import nn, optim
from torch.nn.functional import softmax
from tqdm import tqdm
import numpy as np
import pandas as pd
import torch
nb_takes = 15
nb_reduced = int(300 / nb_takes)
nb_takes_phy = nb_takes
nb_reduced_phy = nb_reduced
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
... | code |
34144563/cell_2 | [
"text_plain_output_1.png"
] | nb_takes = 15
nb_reduced = int(300 / nb_takes)
print(nb_takes, nb_reduced)
nb_takes_phy = nb_takes
nb_reduced_phy = nb_reduced | code |
34144563/cell_3 | [
"text_plain_output_1.png"
] | import numpy as np
import torch
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
a = 100
b = 100
dim_mesh = (a - 1) * (b - 1)
bias_bo = True
bias_bo_g = True
bias_bo_d = True
train_samples = 1200
end_samples = 1600
path = '../input/2stage-simon/f_set_nonl.npy'
ffine_all = 10000 * np.load(path)
... | code |
34144563/cell_5 | [
"text_plain_output_1.png"
] | from torch import nn, optim
from torch.nn.functional import softmax
from tqdm import tqdm
import numpy as np
import pandas as pd
import torch
nb_takes = 15
nb_reduced = int(300 / nb_takes)
nb_takes_phy = nb_takes
nb_reduced_phy = nb_reduced
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
... | code |
74059064/cell_13 | [
"image_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
ROOT = '/kaggle/input/covid19-xray-dataset-train-test-sets/xray_dataset_covid19/'
TRAIN_DIR = ROOT + 'train'
TEST_IMAGE_DIR = '/kaggle/input/covid19-test-sample/pneumonia_test_1.jpg'
VAL_DIR = ROOT + 'test'
NORMAL_DIR = ROOT + 'train/NOR... | code |
74059064/cell_25 | [
"image_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator
import cv2
import matplotlib.pyplot as plt
import tensorflow as tf
ROOT = '/kaggle/input/covid19-xray-dataset-train-test-sets/xray_dataset_covid19/'
TRAIN_DIR = ROOT + 'train'
TEST_IMAGE_DIR = '/kaggle/input/covid19-test-sample/pneumonia_test_1.jpg'
VAL_DIR =... | code |
74059064/cell_6 | [
"text_plain_output_1.png"
] | import cv2
import tensorflow as tf
ROOT = '/kaggle/input/covid19-xray-dataset-train-test-sets/xray_dataset_covid19/'
TRAIN_DIR = ROOT + 'train'
TEST_IMAGE_DIR = '/kaggle/input/covid19-test-sample/pneumonia_test_1.jpg'
VAL_DIR = ROOT + 'test'
NORMAL_DIR = ROOT + 'train/NORMAL/IM-0101-0001.jpeg'
PNEUMONIA_DIR = ROOT + ... | code |
74059064/cell_19 | [
"text_plain_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
ROOT = '/kaggle/input/covid19-xray-dataset-train-test-sets/xray_dataset_covid19/'
TRAIN_DIR = ROOT + 'train'
TEST_IMAGE_DIR = '/kaggle/input/covid19-test-sample/pneumonia_test_1.jpg'
VAL_DIR = ROOT + 'test'
NORMAL_DIR = ROOT + 'train/NOR... | code |
74059064/cell_18 | [
"text_plain_output_1.png"
] | import tensorflow as tf
ROOT = '/kaggle/input/covid19-xray-dataset-train-test-sets/xray_dataset_covid19/'
TRAIN_DIR = ROOT + 'train'
TEST_IMAGE_DIR = '/kaggle/input/covid19-test-sample/pneumonia_test_1.jpg'
VAL_DIR = ROOT + 'test'
NORMAL_DIR = ROOT + 'train/NORMAL/IM-0101-0001.jpeg'
PNEUMONIA_DIR = ROOT + 'train/PNEUM... | code |
74059064/cell_28 | [
"image_output_1.png"
] | from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
import numpy as np # linear algebra
import tensorflow as tf
ROOT = '/kaggle/input/covid19-xray-dataset-train-test-sets/xray_dataset_covid19/'
TRAIN_DIR = ROOT + 'train'
TEST_IMAGE_DIR = '/kaggle/input/covid19-test-sample/... | code |
74059064/cell_14 | [
"text_plain_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
ROOT = '/kaggle/input/covid19-xray-dataset-train-test-sets/xray_dataset_covid19/'
TRAIN_DIR = ROOT + 'train'
TEST_IMAGE_DIR = '/kaggle/input/covid19-test-sample/pneumonia_test_1.jpg'
VAL_DIR = ROOT + 'test'
NORMAL_DIR = ROOT + 'train/NOR... | code |
74059064/cell_22 | [
"text_plain_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator
import cv2
import matplotlib.pyplot as plt
import tensorflow as tf
ROOT = '/kaggle/input/covid19-xray-dataset-train-test-sets/xray_dataset_covid19/'
TRAIN_DIR = ROOT + 'train'
TEST_IMAGE_DIR = '/kaggle/input/covid19-test-sample/pneumonia_test_1.jpg'
VAL_DIR =... | code |
74059064/cell_10 | [
"text_plain_output_1.png"
] | import cv2
import matplotlib.pyplot as plt
import tensorflow as tf
ROOT = '/kaggle/input/covid19-xray-dataset-train-test-sets/xray_dataset_covid19/'
TRAIN_DIR = ROOT + 'train'
TEST_IMAGE_DIR = '/kaggle/input/covid19-test-sample/pneumonia_test_1.jpg'
VAL_DIR = ROOT + 'test'
NORMAL_DIR = ROOT + 'train/NORMAL/IM-0101-0... | code |
90111632/cell_9 | [
"image_output_1.png"
] | from nltk.corpus import stopwords
from textblob import Word
import matplotlib.pyplot as plt
import nltk
import pandas as pd
import pandas as pd
import seaborn as sns
import tweepy
consumer_key = 'w3M2j4hfO3ByQlROB3W05ooH0'
consumer_secret = 'JmkPXnxlTKV3u5Fnd3xUMor3QF7MIFVJmonXxTN8okLebupXhk'
access_token = '13... | code |
90111632/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
import tweepy
consumer_key = 'w3M2j4hfO3ByQlROB3W05ooH0'
consumer_secret = 'JmkPXnxlTKV3u5Fnd3xUMor3QF7MIFVJmonXxTN8okLebupXhk'
access_token = '1358404783477043201-A3U7lU8ZvTATIBchtrG5x94nauprT8'
access_token_secret = 'e4F74LVCQ7BWHZ0HPPTF4Tz1laeFJ0a341LPcLQ3jpqvX'
auth = twee... | code |
90111632/cell_1 | [
"text_plain_output_1.png"
] | !pip install tweepy | code |
90111632/cell_7 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
from textblob import Word
import nltk
import pandas as pd
import pandas as pd
import tweepy
consumer_key = 'w3M2j4hfO3ByQlROB3W05ooH0'
consumer_secret = 'JmkPXnxlTKV3u5Fnd3xUMor3QF7MIFVJmonXxTN8okLebupXhk'
access_token = '1358404783477043201-A3U7lU8ZvTATIBchtrG5x94nauprT8'
access... | code |
90111632/cell_8 | [
"text_html_output_1.png"
] | from nltk.corpus import stopwords
from textblob import Word
import nltk
import pandas as pd
import pandas as pd
import tweepy
consumer_key = 'w3M2j4hfO3ByQlROB3W05ooH0'
consumer_secret = 'JmkPXnxlTKV3u5Fnd3xUMor3QF7MIFVJmonXxTN8okLebupXhk'
access_token = '1358404783477043201-A3U7lU8ZvTATIBchtrG5x94nauprT8'
access... | code |
17135958/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from PIL import Image
from keras import optimizers
from keras.layers import Input, Dropout, Dense, Conv2D, MaxPooling2D, UpSampling2D
from keras.layers.noise import GaussianNoise, GaussianDropout
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.regularizers impor... | code |
17135958/cell_4 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from PIL import Image
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import xml.etree.ElementTree as ET
root_images = '../input/all-dogs/all-dogs/'
root_annots = '../input/annotation/Annotation/'
all_images = os.listdir('../input/all-dogs/all-dogs/')
breeds = glob.glob('../input/annotati... | code |
17135958/cell_6 | [
"text_plain_output_1.png"
] | from keras.layers import Input, Dropout, Dense, Conv2D, MaxPooling2D, UpSampling2D
from keras.layers.noise import GaussianNoise, GaussianDropout
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.regularizers import l1,l2,l1_l2
from keras.models import Model
from ke... | code |
17135958/cell_2 | [
"image_output_1.png"
] | import glob
import os
root_images = '../input/all-dogs/all-dogs/'
root_annots = '../input/annotation/Annotation/'
all_images = os.listdir('../input/all-dogs/all-dogs/')
print(f'Total images : {len(all_images)}')
breeds = glob.glob('../input/annotation/Annotation/*')
annotation = []
for b in breeds:
annotation += ... | code |
17135958/cell_8 | [
"text_plain_output_1.png"
] | from PIL import Image
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import xml.etree.ElementTree as ET
root_images = '../input/all-dogs/all-dogs/'
root_annots = '../input/annotation/Annotation/'
all_images = os.listdir('../input/all-dogs/all-dogs/')
breeds = glob.glob('../input/annotati... | code |
17135958/cell_3 | [
"text_plain_output_1.png"
] | from PIL import Image
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import xml.etree.ElementTree as ET
root_images = '../input/all-dogs/all-dogs/'
root_annots = '../input/annotation/Annotation/'
all_images = os.listdir('../input/all-dogs/all-dogs/')
breeds = glob.glob('../input/annotati... | code |
17135958/cell_10 | [
"text_plain_output_1.png"
] | from PIL import Image
from keras import optimizers
from keras.layers import Input, Dropout, Dense, Conv2D, MaxPooling2D, UpSampling2D
from keras.layers.noise import GaussianNoise, GaussianDropout
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.regularizers impor... | code |
130000382/cell_13 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from collections import Counter
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
csv = pd.read_csv('/kaggle/input/trip-advisor-hotel-reviews/tripadvisor_hotel_reviews.csv')
csv = csv.rename(columns={'Review': 'review', 'Rating': 'rating'})
csv['rating'] = csv... | code |
130000382/cell_2 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import os
from collections import Counter
from sklearn import preprocessing
import torch
from torch import nn
from torchvision import transforms, datasets | code |
130000382/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
130000382/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
csv = pd.read_csv('/kaggle/input/trip-advisor-hotel-reviews/tripadvisor_hotel_reviews.csv')
csv = csv.rename(columns={'Review': 'review', 'Rating': 'rating'})
csv['rating'] = csv['rating'] - 1
reviews = csv['review'].tolist()
ratings = csv['rating... | code |
130000382/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
csv = pd.read_csv('/kaggle/input/trip-advisor-hotel-reviews/tripadvisor_hotel_reviews.csv')
csv = csv.rename(columns={'Review': 'review', 'Rating': 'rating'})
csv['rating'] = csv['rating'] - 1
print('Length: {}'.format(len(csv)))
csv.head(10) | code |
130000382/cell_12 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from collections import Counter
from sklearn import preprocessing
from torch import nn
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import torch
csv = pd.read_csv('/kaggle/input/trip-advisor-hotel-reviews/tripadvisor_hotel_reviews.csv')
csv = csv.renam... | code |
106201252/cell_4 | [
"text_plain_output_1.png"
] | from scipy.sparse import csr_matrix
from sklearn.neighbors import KNeighborsClassifier
import pandas as pd
train_data = pd.read_csv('../input/digit-recognizer/train.csv')
test_data = pd.read_csv('../input/digit-recognizer/test.csv')
train_size = round(0.8 * len(train_data))
val_size = round(0.2 * len(train_data))
tr... | code |
106201252/cell_3 | [
"text_plain_output_1.png"
] | from scipy.sparse import csr_matrix
from sklearn.neighbors import KNeighborsClassifier
import pandas as pd
train_data = pd.read_csv('../input/digit-recognizer/train.csv')
test_data = pd.read_csv('../input/digit-recognizer/test.csv')
train_size = round(0.8 * len(train_data))
val_size = round(0.2 * len(train_data))
tr... | code |
106201252/cell_5 | [
"text_plain_output_5.png",
"text_plain_output_4.png",
"image_output_5.png",
"text_plain_output_3.png",
"image_output_4.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | from scipy.sparse import csr_matrix
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import pandas as pd
import random
train_data = pd.read_csv('../input/digit-recognizer/train.csv')
test_data = pd.read_csv('../input/digit-recognizer/test.csv')
train_size = round(0.8 * len(train_d... | code |
122251379/cell_13 | [
"text_plain_output_1.png"
] | A = {1, 2, 3, 4}
B = {3, 4, 5, 6}
C = A.intersection(B)
C = A & B
C
D = A.union(B)
D = A | B
D = B | A
D
SYM_DIF = A ^ B
SYM_DIF
SYM_DIF = A.symmetric_difference(B)
SYM_DIF | code |
122251379/cell_9 | [
"text_plain_output_1.png"
] | A = {1, 2, 3, 4}
B = {3, 4, 5, 6}
C = A.intersection(B)
C = A & B
C
D = A.union(B)
D = A | B
D = B | A
D
X = A - B
X | code |
122251379/cell_20 | [
"text_plain_output_1.png"
] | A = {1, 2, 3, 4}
B = {3, 4, 5, 6}
C = A.intersection(B)
C = A & B
C
D = A.union(B)
D = A | B
D = B | A
D
SYM_DIF = A ^ B
SYM_DIF
SYM_DIF = A.symmetric_difference(B)
SYM_DIF
A.add(100)
A.pop()
A.pop() | code |
122251379/cell_26 | [
"text_plain_output_1.png"
] | A = {1, 2, 3, 4}
B = {3, 4, 5, 6}
C = A.intersection(B)
C = A & B
C
D = A.union(B)
D = A | B
D = B | A
D
SYM_DIF = A ^ B
SYM_DIF
SYM_DIF = A.symmetric_difference(B)
SYM_DIF
A.add(100)
A.pop()
A.pop()
A.remove(100)
H = A.discard(4)
H
A.update(B)
A | code |
122251379/cell_2 | [
"text_plain_output_1.png"
] | s = set()
type(s)
s = {'INDIA', 'SRILANKA', 'PAKISTAN'}
type(s) | code |
122251379/cell_19 | [
"text_plain_output_1.png"
] | A = {1, 2, 3, 4}
B = {3, 4, 5, 6}
C = A.intersection(B)
C = A & B
C
D = A.union(B)
D = A | B
D = B | A
D
SYM_DIF = A ^ B
SYM_DIF
SYM_DIF = A.symmetric_difference(B)
SYM_DIF
A.add(100)
A.pop() | code |
122251379/cell_1 | [
"text_plain_output_1.png"
] | s = set()
type(s) | code |
122251379/cell_7 | [
"text_plain_output_1.png"
] | A = {1, 2, 3, 4}
B = {3, 4, 5, 6}
C = A.intersection(B)
C = A & B
C
D = A.union(B)
D = A | B
D = B | A
D | code |
122251379/cell_18 | [
"text_plain_output_1.png"
] | A = {1, 2, 3, 4}
B = {3, 4, 5, 6}
C = A.intersection(B)
C = A & B
C
D = A.union(B)
D = A | B
D = B | A
D
SYM_DIF = A ^ B
SYM_DIF
SYM_DIF = A.symmetric_difference(B)
SYM_DIF
A.add(100)
A | code |
122251379/cell_28 | [
"text_plain_output_1.png"
] | A = {1, 2, 3, 4}
B = {3, 4, 5, 6}
C = A.intersection(B)
C = A & B
C
D = A.union(B)
D = A | B
D = B | A
D
SYM_DIF = A ^ B
SYM_DIF
SYM_DIF = A.symmetric_difference(B)
SYM_DIF
A.add(100)
A.pop()
A.pop()
A.remove(100)
H = A.discard(4)
H
A.update(B)
A.clear()
A | code |
122251379/cell_16 | [
"text_plain_output_1.png"
] | A = {1, 2, 3, 4}
B = {3, 4, 5, 6}
C = A.intersection(B)
C = A & B
C
D = A.union(B)
D = A | B
D = B | A
D
SYM_DIF = A ^ B
SYM_DIF
SYM_DIF = A.symmetric_difference(B)
SYM_DIF
A | code |
122251379/cell_24 | [
"text_plain_output_1.png"
] | A = {1, 2, 3, 4}
B = {3, 4, 5, 6}
C = A.intersection(B)
C = A & B
C
D = A.union(B)
D = A | B
D = B | A
D
SYM_DIF = A ^ B
SYM_DIF
SYM_DIF = A.symmetric_difference(B)
SYM_DIF
A.add(100)
A.pop()
A.pop()
A.remove(100)
H = A.discard(4)
H
A | code |
122251379/cell_14 | [
"text_plain_output_1.png"
] | A = {1, 2, 3, 4}
B = {3, 4, 5, 6}
C = A.intersection(B)
C = A & B
C
D = A.union(B)
D = A | B
D = B | A
D
SYM_DIF = A ^ B
SYM_DIF
SYM_DIF = A.symmetric_difference(B)
SYM_DIF
sym_dif = B ^ A
sym_dif | code |
122251379/cell_22 | [
"text_plain_output_1.png"
] | A = {1, 2, 3, 4}
B = {3, 4, 5, 6}
C = A.intersection(B)
C = A & B
C
D = A.union(B)
D = A | B
D = B | A
D
SYM_DIF = A ^ B
SYM_DIF
SYM_DIF = A.symmetric_difference(B)
SYM_DIF
A.add(100)
A.pop()
A.pop()
A.remove(100)
A | code |
122251379/cell_10 | [
"text_plain_output_1.png"
] | A = {1, 2, 3, 4}
B = {3, 4, 5, 6}
C = A.intersection(B)
C = A & B
C
D = A.union(B)
D = A | B
D = B | A
D
Y = B - A
Y | code |
122251379/cell_12 | [
"text_plain_output_1.png"
] | A = {1, 2, 3, 4}
B = {3, 4, 5, 6}
C = A.intersection(B)
C = A & B
C
D = A.union(B)
D = A | B
D = B | A
D
SYM_DIF = A ^ B
SYM_DIF | code |
122251379/cell_5 | [
"text_plain_output_1.png"
] | A = {1, 2, 3, 4}
B = {3, 4, 5, 6}
C = A.intersection(B)
C = A & B
C | code |
2001469/cell_9 | [
"image_output_1.png"
] | from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from skle... | code |
2001469/cell_6 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/train.csv')
test_data = pd.read_csv('../input/test.csv')
files_listing = test_data.PassengerId
test_labels = pd.read_csv('../input/gender_submission.csv')
labels_test = test_l... | code |
2001469/cell_1 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
2001469/cell_8 | [
"text_plain_output_1.png"
] | from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from skle... | code |
2001469/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/train.csv')
test_data = pd.read_csv('../input/test.csv')
files_listing = test_data.PassengerId
test_labels = pd.read_csv('../input/gender_submission.csv')
train_data.head() | code |
2001469/cell_10 | [
"text_html_output_1.png"
] | from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from skle... | code |
73067530/cell_9 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import numpy as np
import pandas as pd
M = np.array([[1, 1, 1, 0, 0], [3, 3, 3, 0, 0], [4, 4, 4, 0, 0], [5, 5, 5, 0, 0], [0, 0, 0, 4, 4], [0, 0, 0, 5, 5], [0, 0, 0, 2, 2]])
d = pd.DataFrame(M, index=['Joe', 'Jim', 'John', 'Jack', 'Jill', 'Jenny', 'Jane'], columns=['Matrix', 'Ali... | code |
73067530/cell_11 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
import numpy as np
import pandas as pd
M = np.array([[1, 1, 1, 0, 0], [3, 3, 3, 0, 0], [4, 4, 4, 0, 0], [5, 5, 5, 0, 0], [0, 0, 0, 4, 4], [0, 0, 0, 5, 5], [0, 0, 0, 2, 2]])
d = pd.DataFrame(M, index=['Joe', 'Jim', 'John', 'Jack', 'Jill', 'Jenny', 'Jane'], columns=['Matrix', 'Ali... | code |
73067530/cell_7 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import numpy as np
import pandas as pd
M = np.array([[1, 1, 1, 0, 0], [3, 3, 3, 0, 0], [4, 4, 4, 0, 0], [5, 5, 5, 0, 0], [0, 0, 0, 4, 4], [0, 0, 0, 5, 5], [0, 0, 0, 2, 2]])
d = pd.DataFrame(M, index=['Joe', 'Jim', 'John', 'Jack', 'Jill', 'Jenny', 'Jane'], columns=['Matrix', 'Ali... | code |
73067530/cell_3 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
import numpy as np
import pandas as pd
np.set_printoptions(precision=1)
M = np.array([[1, 1, 1, 0, 0], [3, 3, 3, 0, 0], [4, 4, 4, 0, 0], [5, 5, 5, 0, 0], [0, 0, 0, 4, 4], [0, 0, 0, 5, 5], [0, 0, 0, 2, 2]])
d = pd.DataFrame(M, index=['Joe', 'Jim', 'John', 'Jack', 'Jill', 'Jenny',... | code |
73067530/cell_5 | [
"text_plain_output_1.png"
] | from numpy.linalg import svd
import numpy as np
import pandas as pd
import numpy as np
import pandas as pd
M = np.array([[1, 1, 1, 0, 0], [3, 3, 3, 0, 0], [4, 4, 4, 0, 0], [5, 5, 5, 0, 0], [0, 0, 0, 4, 4], [0, 0, 0, 5, 5], [0, 0, 0, 2, 2]])
d = pd.DataFrame(M, index=['Joe', 'Jim', 'John', 'Jack', 'Jill', 'Jenny', 'J... | code |
89136755/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('/kaggle/input/osgdxaspectcapital/train.csv', index_col=0)
X_test = pd.read_csv('/kaggle/input/osgdxaspectcapital/test.csv', index_col=0)
X_train = train_df[[c for c in train_df if c != 'y']]
y_train = train_df['y'].values
sample = X_train.sample(n=1)
sample_market = sample[... | code |
90117670/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
russian_equipment = pd.read_csv('../input/2022-ukraine-russian-war/russia_losses_equipment.csv', index_col='date', parse_dates=True)
russian_equipment | code |
90117670/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
russian_equipment = pd.read_csv('../input/2022-ukraine-russian-war/russia_losses_equipment.csv', index_col='date', parse_dates=True)
russian_equipment
russian_equipment.dtypes | code |
90117670/cell_19 | [
"text_html_output_1.png"
] | from sklearn.impute import SimpleImputer
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
russian_equipment = pd.read_csv('../input/2022-ukraine-russian-war/russia_losses_equipment.csv', index_col='date', parse_dates=True)
russian_equipment... | code |
90117670/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
90117670/cell_8 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import seaborn as sns
russian_equipment_no_day = russian_equipment.drop('day', axis=1)
russian_equipment_no_day.head()
sns.set_style('darkgrid')
plt.figure(figsize=(20, 9))
plt.title('Russian Equipment Losses')
plt.xlabel('Date')
plt.ylabel('Asset')
sns.lineplot(data=russian_equipment_no... | code |
90117670/cell_15 | [
"text_plain_output_1.png",
"image_output_2.png",
"image_output_1.png"
] | from sklearn.impute import SimpleImputer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
russian_equipment = pd.read_csv('../input/2022-ukraine-russian-war/russia_losses_equipment.csv', index_col='date', parse_dates=True)
russian_equipment
russian_personnel = pd.read_csv('../input/2022-ukraine... | code |
90117670/cell_17 | [
"text_html_output_1.png"
] | from sklearn.impute import SimpleImputer
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
russian_equipment = pd.read_csv('../input/2022-ukraine-russian-war/russia_losses_equipment.csv', index_col='date', parse_dates=True)
russian_equipment
russian_personnel = pd.read_csv('../input/2022-ukraine... | code |
90117670/cell_14 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
russian_equipment = pd.read_csv('../input/2022-ukraine-russian-war/russia_losses_equipment.csv', index_col='date', parse_dates=True)
russian_equipment
russian_personnel = pd.read_csv('../input/2022-ukraine-russian-war/russia_losses_personnel.csv',... | code |
90117670/cell_10 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize=(20, 9))
x_data = ['military auto', 'APC']
for vehicle in x_data:
sns.lmplot(data=russian_equipment_no_day, x=vehicle, y='tank')
plt.show() | code |
90117670/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
russian_equipment = pd.read_csv('../input/2022-ukraine-russian-war/russia_losses_equipment.csv', index_col='date', parse_dates=True)
russian_equipment
russian_personnel = pd.read_csv('../input/2022-ukraine-russian-war/russia_losses_personnel.csv',... | code |
2032867/cell_13 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd
import warnings # We want to suppress warnings
warnings.filterwarnings('ignore')
HRData = pd.read_csv('../input/WA_Fn-UseC_-HR-Employee-Attrition.csv')
HRData.isnull().any()
hrdunique = HRData.nunique()
hrdunique = hrdunique.sort_values()
hrdunique
hrd = HRData.copy()
hrd.drop('Over18', axis... | code |
2032867/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import warnings # We want to suppress warnings
warnings.filterwarnings('ignore')
HRData = pd.read_csv('../input/WA_Fn-UseC_-HR-Employee-Attrition.csv')
HRData.head() | code |
2032867/cell_23 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
import warnings # We want to suppress warnings
warnings.filterwarnings('ignore')
HRData = pd.read_csv('../input/WA_Fn-UseC_-HR-Employee-Attrition.csv')
HRData.isnull().any()
hrdunique = HRData.nunique()
hrdunique = hrdunique.sort_values()
hrdunique
hrd = HRData.copy()
... | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.