path stringlengths 13 17 | screenshot_names listlengths 1 873 | code stringlengths 0 40.4k | cell_type stringclasses 1
value |
|---|---|---|---|
18124779/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from pandas import DataFrame
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance... | code |
18124779/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
18124779/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from pandas import DataFrame
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance... | code |
18124779/cell_8 | [
"image_output_1.png"
] | from pandas import DataFrame
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance... | code |
18124779/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from pandas import DataFrame
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance... | code |
18124779/cell_10 | [
"text_html_output_1.png"
] | from pandas import DataFrame
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0,... | code |
18124779/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from pandas import DataFrame
import pandas as pd
from pandas import DataFrame
performance = {'id': [1, 2, 3, 4], 'date': ['19/12/2018', '20/12/2018', '21/12/2018', '22/12/2018'], 'time': [45, 50, 90, 50], 'km': [6.0, 5.5, 6.0, 4.0], 'rider_performance': [3, 4, 4, 4], 'horse_performance': [4, 4, 5, 5], 'avg_performance... | code |
130011284/cell_6 | [
"text_plain_output_1.png"
] | import pandas
train = pandas.read_csv('/kaggle/input/loan-status-binary-classification/train.csv')
test = pandas.read_csv('/kaggle/input/loan-status-binary-classification/test.csv')
for column in train.columns:
print(column, train[column].isnull().sum()) | code |
130011284/cell_3 | [
"text_plain_output_1.png"
] | import pandas
import numpy
from sklearn.preprocessing import OneHotEncoder, MinMaxScaler
from sklearn.linear_model import LogisticRegression | code |
130011284/cell_17 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import OneHotEncoder, MinMaxScaler
import numpy
import pandas
train = pandas.read_csv('/kaggle/input/loan-status-binary-classification/train.csv')
test = pandas.read_csv('/kaggle/input/loan-status-binary-classification/test.csv')
train[... | code |
50233625/cell_15 | [
"text_html_output_2.png"
] | from plotly.subplots import make_subplots
import pandas as pd
import plotly.graph_objects as go
data_2017 = pd.read_csv('../input/kaggle-survey-2017/multipleChoiceResponses.csv', encoding='ISO-8859-1', low_memory=False)
data_2018 = pd.read_csv('../input/kaggle-survey-2018/multipleChoiceResponses.csv', low_memory=Fal... | code |
50233625/cell_17 | [
"text_html_output_1.png"
] | from plotly.subplots import make_subplots
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
data_2017 = pd.read_csv('../input/kaggle-survey-2017/multipleChoiceResponses.csv', encoding='ISO-8859-1', low_memory=False)
data_2018 = pd.read_csv('../input/kaggle-survey-2018/multipleChoiceR... | code |
128034461/cell_13 | [
"text_html_output_1.png"
] | from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', names=['Sentiment', 'Tweet'], encoding='latin-1')
temp = data.groupby('Sentiment').count()['Tweet'].reset_index().sort_values(by='Tweet', ascending=Fa... | code |
128034461/cell_9 | [
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', names=['Sentiment', 'Tweet'], encoding='latin-1')
data.describe() | code |
128034461/cell_40 | [
"text_html_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from wordcloud import WordClo... | code |
128034461/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', names=['Sentiment', 'Tweet'], encoding='latin-1')
temp = data.groupby('Sentiment').count()['Tweet'].reset_index().sort_values(by='Tweet', ascending=False)
temp.style.background_gradient(cmap='Purples') | code |
128034461/cell_18 | [
"text_html_output_1.png"
] | from collections import Counter
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
import plotly.express as px
data = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', names=['Sentiment', 'Tweet'], encoding='latin-1')
temp = data.groupby('Sentiment').count... | code |
128034461/cell_28 | [
"text_html_output_1.png"
] | from collections import Counter
from nltk.corpus import stopwords, wordnet
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
import re
import string
data = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', names=['Sentiment', 'Tweet'], encoding='latin-1'... | code |
128034461/cell_8 | [
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', names=['Sentiment', 'Tweet'], encoding='latin-1')
data.head() | code |
128034461/cell_15 | [
"image_output_1.png"
] | from collections import Counter
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
import plotly.express as px
data = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', names=['Sentiment', 'Tweet'], encoding='latin-1')
temp = data.groupby('Sentiment').count... | code |
128034461/cell_17 | [
"text_html_output_1.png"
] | from collections import Counter
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', names=['Sentiment', 'Tweet'], encoding='latin-1')
temp = data.groupby('Sentiment').count()['Tweet'].reset_index().sor... | code |
128034461/cell_31 | [
"text_html_output_1.png"
] | from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', names=['Sentiment', 'Tweet'], encoding='latin-1')
temp = data.groupby('Sentiment').count()['Tweet'].reset_index().sort_values(by='Tweet', ascending=Fa... | code |
128034461/cell_14 | [
"text_html_output_2.png"
] | from collections import Counter
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', names=['Sentiment', 'Tweet'], encoding='latin-1')
temp = data.groupby('Sentiment').count()['Tweet'].reset_index().sor... | code |
128034461/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/sentiment-analysis-for-financial-news/all-data.csv', names=['Sentiment', 'Tweet'], encoding='latin-1')
temp = data.groupby('Sentiment').count()['Tweet'].reset_index().sort_values(by='Tweet', ascending=False)
temp.style.background_gradient(cmap='Purples')
fig = go.Figu... | code |
128034461/cell_36 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from wordcloud import WordClo... | code |
1008769/cell_13 | [
"text_plain_output_1.png"
] | from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from numpy import newaxis
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, ... | code |
1008769/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
prices_dataset = pd.read_csv('../input/prices.csv', header=0)
prices_dataset
wltw = prices_dataset[prices_dataset['symbol'] == 'WLTW']
wltw.shape | code |
1008769/cell_6 | [
"image_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
prices_dataset = pd.read_csv('../input/prices.csv', header=0)
prices_dataset
wltw = prices_dataset[prices_dataset['symbol'] == 'WLTW']
wltw.shape
wltw_stock_prices =... | code |
1008769/cell_2 | [
"image_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from sklearn.cross_validation import train_test_split
import time
from skle... | code |
1008769/cell_11 | [
"text_html_output_1.png"
] | from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
import numpy as np # linear algebra
import time #helper libraries
def create_dataset(dataset, look_back=1):
dataX, dataY = ([], [])
for i in range(len(dataset) - look_back - ... | code |
1008769/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
prices_dataset = pd.read_csv('../input/prices.csv', header=0)
prices_dataset | code |
1008769/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
import time #helper libraries
model = Sequential()
model.add(LSTM(input_dim=1, output_dim=50, return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(100, return_sequences=Fals... | code |
1008769/cell_5 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
prices_dataset = pd.read_csv('../input/prices.csv', header=0)
prices_dataset
wltw = prices_dataset[prices_dataset['symbol'] == 'WLTW']
wltw.shape
wltw_stock_prices =... | code |
18154187/cell_21 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pylab as pl
comp_df = pd.read_csv('../input/free-7-million-company-dataset/companies_sorted.csv')
#Check of dataset
comp_df.head()
comp_df.tail()
co... | code |
18154187/cell_13 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
comp_df = pd.read_csv('../input/free-7-million-company-dataset/companies_sorted.csv')
#Check of dataset
comp_df.head()
comp_df.tail()
comp_df.shape
comp_df.info()
comp_df.describe()
#change name of columns and make it with ... | code |
18154187/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
comp_df = pd.read_csv('../input/free-7-million-company-dataset/companies_sorted.csv')
#Check of dataset
comp_df.head()
comp_df.tail()
comp_df.shape
comp_df.info()
comp_df.describe()
#change name of columns and make it with ... | code |
18154187/cell_23 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_html_output_1.png",
"text_plain_output_1.png"
] | from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pylab as pl
comp_df = pd.read_csv('../input/free-7-million-company-dataset/companies_sorted.csv')
#Check of dataset
comp_df.head()
comp_df.tail()
co... | code |
18154187/cell_6 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
comp_df = pd.read_csv('../input/free-7-million-company-dataset/companies_sorted.csv')
comp_df.head()
comp_df.tail()
comp_df.shape
comp_df.info()
comp_df.describe()
comp_df.rename(columns={'year founded': 'year_founded', 'size ... | code |
18154187/cell_19 | [
"text_plain_output_1.png"
] | from sklearn.cluster import KMeans
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pylab as pl
comp_df = pd.read_csv('../input/free-7-million-company-dataset/companies_sorted.csv')
#Check of dataset
comp_df.head()
comp_df.tail()
comp_df.shape
comp_df.info()
comp_... | code |
18154187/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
18154187/cell_8 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
comp_df = pd.read_csv('../input/free-7-million-company-dataset/companies_sorted.csv')
continent_df = pd.read_csv('../input/continent/country_continent.csv', delimiter=';', encoding='ISO-8859-1')
continent_df.head()
continent_d... | code |
18154187/cell_15 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
comp_df = pd.read_csv('../input/free-7-million-company-dataset/companies_sorted.csv')
#Check of dataset
comp_df.head()
comp_df.tail()
comp_df.shape
comp_df.info()
comp_df.describe()
#change name of columns and make it with ... | code |
18154187/cell_17 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
comp_df = pd.read_csv('../input/free-7-million-company-dataset/companies_sorted.csv')
#Check of dataset
comp_df.head()
comp_df.tail()
comp_df.shape
comp_df.info()
comp_df.describe()
#change name of columns and make it with ... | code |
18154187/cell_10 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
comp_df = pd.read_csv('../input/free-7-million-company-dataset/companies_sorted.csv')
#Check of dataset
comp_df.head()
comp_df.tail()
comp_df.shape
comp_df.info()
comp_df.describe()
#change name of columns and make it with ... | code |
2004768/cell_8 | [
"text_plain_output_1.png"
] | import lightgbm as lgb
import numpy as np
import pandas as pd
MAX_PRED = 1000
MAX_ROUNDS = 2000
indir = '../input/preparing-data-ii/'
indir2 = '../input/favorita-grocery-sales-forecasting/'
X_test = pd.read_csv(indir + 'X_test.csv')
X_val = pd.read_csv(indir + 'X_val.csv')
X_train = pd.read_csv(indir + 'X_train.cs... | code |
2004768/cell_3 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | from subprocess import check_output
from datetime import date, timedelta
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
import lightgbm as lgb
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
2004768/cell_10 | [
"text_plain_output_1.png"
] | from sklearn.metrics import mean_squared_error
import lightgbm as lgb
import numpy as np
import pandas as pd
MAX_PRED = 1000
MAX_ROUNDS = 2000
indir = '../input/preparing-data-ii/'
indir2 = '../input/favorita-grocery-sales-forecasting/'
X_test = pd.read_csv(indir + 'X_test.csv')
X_val = pd.read_csv(indir + 'X_val... | code |
330380/cell_13 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
titanic_train = pd.read_csv('../input/train.csv', index_col='PassengerId')
titanic_test = pd.read_csv('../input/test.csv', index_col='PassengerId')
combined = pd.concat((titanic_train, titanic_test), axis=0)
ages_mean = combined.pivot_table('Age', index=['Title'], columns=['Se... | code |
330380/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
titanic_train = pd.read_csv('../input/train.csv', index_col='PassengerId')
titanic_test = pd.read_csv('../input/test.csv', index_col='PassengerId')
combined = pd.concat((titanic_train, titanic_test), axis=0)
ages_mean = combined.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfu... | code |
330380/cell_6 | [
"image_output_1.png"
] | import pandas as pd
titanic_train = pd.read_csv('../input/train.csv', index_col='PassengerId')
titanic_test = pd.read_csv('../input/test.csv', index_col='PassengerId')
combined = pd.concat((titanic_train, titanic_test), axis=0)
combined.info() | code |
330380/cell_11 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
titanic_train = pd.read_csv('../input/train.csv', index_col='PassengerId')
titanic_test = pd.read_csv('../input/test.csv', index_col='PassengerId')
combined = pd.concat((titanic_train, titanic_test), axis=0)
ages_mean = combined.pivot_table('Age', index=['Title'], columns=['Se... | code |
330380/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
titanic_train = pd.read_csv('../input/train.csv', index_col='PassengerId')
titanic_test = pd.read_csv('../input/test.csv', index_col='PassengerId')
combined = pd.concat((titanic_train, titanic_test), axis=0)
ages_mean = combined.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfu... | code |
330380/cell_15 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
import seaborn as sns
titanic_train = pd.read_csv('../input/train.csv', index_col='PassengerId')
titanic_test = pd.read_csv('../input/test.csv', index_col='PassengerId')
combined = pd.concat((titanic_train, titanic_test), axis=0)
ages_mean = combined.pivot_table('Age', index=... | code |
330380/cell_17 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import RobustScaler
from sklearn.feature_selection import RFECV, RFE
from sklearn.cross_validation import StratifiedKFold, cross_val_score
from sklearn.decomposition import KernelPCA
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import RandomForestClassifier, AdaBoostClas... | code |
330380/cell_14 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
titanic_train = pd.read_csv('../input/train.csv', index_col='PassengerId')
titanic_test = pd.read_csv('../input/test.csv', index_col='PassengerId')
combined = pd.concat((titanic_train, titanic_test), axis=0)
ages_mean = combined.pivot_table('Age', index=['Title'], columns=['Se... | code |
330380/cell_10 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
titanic_train = pd.read_csv('../input/train.csv', index_col='PassengerId')
titanic_test = pd.read_csv('../input/test.csv', index_col='PassengerId')
combined = pd.concat((titanic_train, titanic_test), axis=0)
ages_mean = combined.pivot_table('Age', index=['Title'], columns=['Se... | code |
330380/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
titanic_train = pd.read_csv('../input/train.csv', index_col='PassengerId')
titanic_test = pd.read_csv('../input/test.csv', index_col='PassengerId')
titanic_train.info()
print('\n')
titanic_test.info() | code |
17118578/cell_13 | [
"text_plain_output_1.png"
] | from glob import glob
from sklearn.model_selection import train_test_split
import numpy as np
import os
DATA = '../input/sleepstate/sleep-state'
fnames = sorted(glob(os.path.join(DATA, '*.edf')))
PP_DATA = '../input/respiracion1'
fnames = sorted(glob(os.path.join(PP_DATA, '*.npz')))
total_fs = [f for f in fnames i... | code |
17118578/cell_25 | [
"text_plain_output_1.png"
] | from glob import glob
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, CSVLogger
from tensorflow.keras.layers import Input, Conv1D, Dense, Dropout, MaxPool1D, Activation, Convolution1D, Spatial... | code |
17118578/cell_23 | [
"text_plain_output_1.png"
] | from glob import glob
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, CSVLogger
from tensorflow.keras.layers import Input, Conv1D, Dense, Dropout, MaxPool1D, Activation, Convolution1D, Spatial... | code |
17118578/cell_20 | [
"text_plain_output_1.png"
] | from glob import glob
from sklearn.metrics import f1_score, accuracy_score, classification_report, roc_auc_score, confusion_matrix, roc_auc_score, roc_curve
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, Reduce... | code |
17118578/cell_6 | [
"image_output_2.png",
"image_output_1.png"
] | from glob import glob
import os
DATA = '../input/sleepstate/sleep-state'
fnames = sorted(glob(os.path.join(DATA, '*.edf')))
print(fnames[0]) | code |
17118578/cell_26 | [
"text_plain_output_1.png"
] | from glob import glob
from sklearn.metrics import f1_score, accuracy_score, classification_report, roc_auc_score, confusion_matrix, roc_auc_score, roc_curve
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, Reduce... | code |
17118578/cell_19 | [
"text_plain_output_1.png"
] | from glob import glob
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, CSVLogger
from tensorflow.keras.layers import Input, Conv1D, Dense, Dropout, MaxPool1D, Activation, Convolution1D, Spatial... | code |
17118578/cell_8 | [
"text_plain_output_1.png"
] | from glob import glob
import mne
import os
DATA = '../input/sleepstate/sleep-state'
fnames = sorted(glob(os.path.join(DATA, '*.edf')))
raw_train = mne.io.read_raw_edf(fnames[0], preload=True)
annot_train = mne.read_annotations(fnames[1])
raw_train.pick_channels(['Resp oro-nasal'])
raw_train.set_annotations(annot_tr... | code |
17118578/cell_15 | [
"text_plain_output_1.png",
"image_output_2.png",
"image_output_1.png"
] | from tensorflow import keras
from tensorflow.keras.layers import Input, Conv1D, Dense, Dropout, MaxPool1D, Activation, Convolution1D, SpatialDropout1D
from tensorflow.keras.layers import Reshape, LSTM, TimeDistributed, Bidirectional, BatchNormalization, Flatten, RepeatVector
from tensorflow.keras.layers import conca... | code |
17118578/cell_16 | [
"text_plain_output_1.png"
] | from tensorflow import keras
from tensorflow.keras.layers import Input, Conv1D, Dense, Dropout, MaxPool1D, Activation, Convolution1D, SpatialDropout1D
from tensorflow.keras.layers import Reshape, LSTM, TimeDistributed, Bidirectional, BatchNormalization, Flatten, RepeatVector
from tensorflow.keras.layers import conca... | code |
17118578/cell_24 | [
"text_plain_output_1.png"
] | from glob import glob
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, CSVLogger
from tensorflow.keras.layers import Input, Conv1D, Dense, Dropout, MaxPool1D, Activation, Convolution1D, Spatial... | code |
17118578/cell_22 | [
"text_plain_output_1.png"
] | from glob import glob
from sklearn.metrics import f1_score, accuracy_score, classification_report, roc_auc_score, confusion_matrix, roc_auc_score, roc_curve
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, Reduce... | code |
17118578/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from glob import glob
import numpy as np
import os
DATA = '../input/sleepstate/sleep-state'
fnames = sorted(glob(os.path.join(DATA, '*.edf')))
PP_DATA = '../input/respiracion1'
fnames = sorted(glob(os.path.join(PP_DATA, '*.npz')))
total_fs = [f for f in fnames if f.split('/')[-1][:5]]
total_data = {k: np.load(k) fo... | code |
17118578/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm, tqdm_notebook
import tensorflow as tf
from tensorflow import keras
from keras import optimizers, losses, activations, models
from tensorflow.keras.utils import to_categorical, normalize
from tensorflow.keras.models import Mode... | code |
16136832/cell_33 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import StratifiedShuffleSplit
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
total_suicidal = pd.read_csv('../input/master.csv')
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedShuffleSplit
split = St... | code |
16136832/cell_26 | [
"text_html_output_1.png"
] | from sklearn.model_selection import StratifiedShuffleSplit
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
total_suicidal = pd.read_csv('../input/master.csv')
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedShuffleSplit
split = St... | code |
16136832/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
16136832/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
total_suicidal = pd.read_csv('../input/master.csv')
total_suicidal['country'].value_counts() | code |
16136832/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
total_suicidal = pd.read_csv('../input/master.csv')
import matplotlib.pyplot as plt
total_suicidal.hist(bins=50, figsize=(20, 15))
plt.show() | code |
16136832/cell_32 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import StratifiedShuffleSplit
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
total_suicidal = pd.read_csv('../input/master.csv')
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedShuffleSplit
split = St... | code |
16136832/cell_28 | [
"image_output_1.png"
] | from sklearn.model_selection import StratifiedShuffleSplit
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
total_suicidal = pd.read_csv('../input/master.csv')
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedShuffleSplit
split = St... | code |
16136832/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
total_suicidal = pd.read_csv('../input/master.csv')
total_suicidal.describe() | code |
16136832/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
total_suicidal = pd.read_csv('../input/master.csv')
total_suicidal.info() | code |
16136832/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
total_suicidal = pd.read_csv('../input/master.csv')
total_suicidal.head() | code |
90122427/cell_25 | [
"text_html_output_1.png"
] | from wordcloud import WordCloud
import geopandas
import matplotlib.pyplot as plt
import pandas as pd
def visualize_word_counts(counts, show=True):
wc = WordCloud(max_font_size=130, min_font_size=25, colormap='tab20', background_color='black', prefer_horizontal=0.95, width=2100, height=700, random_state=0)
c... | code |
90122427/cell_33 | [
"text_html_output_1.png"
] | from wordcloud import WordCloud
import geopandas
import matplotlib.pyplot as plt
import pandas as pd
def visualize_word_counts(counts, show=True):
wc = WordCloud(max_font_size=130, min_font_size=25, colormap='tab20', background_color='black', prefer_horizontal=0.95, width=2100, height=700, random_state=0)
c... | code |
90122427/cell_6 | [
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/suicide-rates-worldwide-20002019/data.csv')
data.head() | code |
90122427/cell_29 | [
"image_output_1.png"
] | from wordcloud import WordCloud
import geopandas
import matplotlib.pyplot as plt
import pandas as pd
def visualize_word_counts(counts, show=True):
wc = WordCloud(max_font_size=130, min_font_size=25, colormap='tab20', background_color='black', prefer_horizontal=0.95, width=2100, height=700, random_state=0)
c... | code |
90122427/cell_26 | [
"image_output_1.png"
] | from wordcloud import WordCloud
import geopandas
import matplotlib.pyplot as plt
import pandas as pd
def visualize_word_counts(counts, show=True):
wc = WordCloud(max_font_size=130, min_font_size=25, colormap='tab20', background_color='black', prefer_horizontal=0.95, width=2100, height=700, random_state=0)
c... | code |
90122427/cell_19 | [
"image_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/suicide-rates-worldwide-20002019/data.csv')
columns = ['Country', 'Year', 'ProbDyingBoth', 'ProbDyingMale', 'ProbDyingFemale', 'SuicideBoth', 'SuicideMale', 'SuicideFemale']
values = data.iloc[1:, :].values
data = pd.DataFrame(values, columns=columns)
for col in column... | code |
90122427/cell_18 | [
"text_html_output_1.png"
] | from wordcloud import WordCloud
import matplotlib.pyplot as plt
import pandas as pd
def visualize_word_counts(counts, show=True):
wc = WordCloud(max_font_size=130, min_font_size=25, colormap='tab20', background_color='black', prefer_horizontal=0.95, width=2100, height=700, random_state=0)
cloud = wc.generate... | code |
90122427/cell_32 | [
"text_html_output_1.png"
] | from wordcloud import WordCloud
import geopandas
import matplotlib.pyplot as plt
import pandas as pd
def visualize_word_counts(counts, show=True):
wc = WordCloud(max_font_size=130, min_font_size=25, colormap='tab20', background_color='black', prefer_horizontal=0.95, width=2100, height=700, random_state=0)
c... | code |
90122427/cell_28 | [
"image_output_1.png"
] | from wordcloud import WordCloud
import geopandas
import matplotlib.pyplot as plt
import pandas as pd
def visualize_word_counts(counts, show=True):
wc = WordCloud(max_font_size=130, min_font_size=25, colormap='tab20', background_color='black', prefer_horizontal=0.95, width=2100, height=700, random_state=0)
c... | code |
90122427/cell_16 | [
"text_html_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/suicide-rates-worldwide-20002019/data.csv')
columns = ['Country', 'Year', 'ProbDyingBoth', 'ProbDyingMale', 'ProbDyingFemale', 'SuicideBoth', 'SuicideMale', 'SuicideFemale']
values = data.iloc[1:, :].values
data = pd.DataFrame(values, columns=columns)
for col in column... | code |
90122427/cell_31 | [
"image_output_1.png"
] | from wordcloud import WordCloud
import geopandas
import matplotlib.pyplot as plt
import pandas as pd
def visualize_word_counts(counts, show=True):
wc = WordCloud(max_font_size=130, min_font_size=25, colormap='tab20', background_color='black', prefer_horizontal=0.95, width=2100, height=700, random_state=0)
c... | code |
90122427/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/suicide-rates-worldwide-20002019/data.csv')
columns = ['Country', 'Year', 'ProbDyingBoth', 'ProbDyingMale', 'ProbDyingFemale', 'SuicideBoth', 'SuicideMale', 'SuicideFemale']
values = data.iloc[1:, :].values
data = pd.DataFrame(values, columns=columns)
for col in column... | code |
1007495/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
x = pd.read_csv('../input/train.csv')
x_2 = pd.read_csv('../input/train.csv')
y = pd.read_csv('../input/test.csv')
toPredict = x.pop('Survived')
data = pd.concat([x, y])
data.describe(include=['O']) | code |
1007495/cell_11 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
x = pd.read_csv('../input/train.csv')
x_2 = pd.read_csv('../input/train.csv')
y = pd.read_csv('../input/test.csv')
toPredict = x.pop('Survived')
data = pd.concat([x, y])
newage = data[['Age', 'Pclass', 'Sex']].dropna()
print('Pclass 1 F = ' + str(np.median(newage.query('Pclass ... | code |
1007495/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
x = pd.read_csv('../input/train.csv')
x_2 = pd.read_csv('../input/train.csv')
y = pd.read_csv('../input/test.csv')
toPredict = x.pop('Survived')
data = pd.concat([x, y])
data.describe() | code |
128021494/cell_4 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import random
data = np.genfromtxt('/kaggle/input/da-assignment2/wdbc.data', delimiter=',')
data = np.delete(data, [0, 1], axis=1)
file = open('/kaggle/input/wdbc-labels/wdbc_labels.csv', 'r')
lines = file.readlines()
count = 0
labels = np.zeros((data.shape[0], 1))
... | code |
128021494/cell_6 | [
"text_plain_output_1.png"
] | import tensorflow as tf
model = tf.keras.Sequential()
model.add(tf.keras.layers.Input(30))
model.add(tf.keras.layers.Dense(512, activation='relu'))
model.add(tf.keras.layers.Dense(256, activation='relu'))
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))... | code |
128021494/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from tensorflow import keras
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import math
import random
from tensorflow.keras import layers | code |
128021494/cell_7 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np
import random
import tensorflow as tf
data = np.genfromtxt('/kaggle/input/da-assignment2/wdbc.data', delimiter=',')
data = np.delete(data, [0, 1], axis=1)
file = open('/kaggle/input/wdbc-labels/wdbc_labels.csv', 'r'... | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.