path
stringlengths
13
17
screenshot_names
listlengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
122255862/cell_17
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic = pd.read_csv('/kaggle/input/test-file/tested.csv') titanic.shape titanic.groupby('Sex').Survived.sum() titanic_class = titanic.groupby('Pclass') titanic_class titanic_class.get_group(1) titanic_class.get_group(2) ...
code
122255862/cell_35
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.graph_objs as go import plotly.offline as offline import seaborn as sns titanic = pd.read_csv('/kaggle/input/test-file/tested.csv') titanic.shape titanic_gender = titanic['Sex...
code
122255862/cell_31
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic = pd.read_csv('/kaggle/input/test-file/tested.csv') titanic.shape titanic.groupby('Sex').Survived.sum() titanic_class = titanic.groupby('Pclass') titanic_class titanic['avg_fare_class'] = titanic.groupby('Pclass')['...
code
122255862/cell_24
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic = pd.read_csv('/kaggle/input/test-file/tested.csv') titanic.shape titanic.groupby('Sex').Survived.sum() titanic_class = titanic.groupby('Pclass') titanic_class titanic_class.get_group(1) titanic_class.get_group(2) ...
code
122255862/cell_14
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic = pd.read_csv('/kaggle/input/test-file/tested.csv') titanic.shape titanic.groupby('Sex').Survived.sum() titanic_class = titanic.groupby('Pclass') titanic_class titanic_class.get_group(1)
code
122255862/cell_22
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns titanic = pd.read_csv('/kaggle/input/test-file/tested.csv') titanic.shape titanic_gender = titanic['Sex'].value_counts(normalize=True) wp = {'linewidth': 1, 'edgecolor'...
code
122255862/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic = pd.read_csv('/kaggle/input/test-file/tested.csv') titanic.shape titanic_gender = titanic['Sex'].value_counts(normalize=True) wp = {'linewidth': 1, 'edgecolor': 'black'} plt.pie(tita...
code
122255862/cell_27
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic = pd.read_csv('/kaggle/input/test-file/tested.csv') titanic.shape titanic.groupby('Sex').Survived.sum() titanic_class = titanic.groupby('Pclass') titanic_class titanic_class.get_group(1) titanic_class.get_group(2) ...
code
122255862/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic = pd.read_csv('/kaggle/input/test-file/tested.csv') titanic.shape titanic.groupby('Sex').Survived.sum() titanic_class = titanic.groupby('Pclass') titanic_class
code
122255862/cell_5
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic = pd.read_csv('/kaggle/input/test-file/tested.csv') titanic.shape print(f'The table above contains: \nrows: {titanic.shape[0]} \ncolumns: {titanic.shape[1]}')
code
73090244/cell_13
[ "text_html_output_1.png" ]
code
73090244/cell_4
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import cudf PATH = '/kaggle/input/optiver-realized-volatility-prediction' def load_data(mode, path='/kaggle/input/optiver-realized-volatility-prediction'): file_name = f'{path}/{mode}.csv' return cudf.read_csv(file_name) dev_df = load_data('train', path=PATH) SCALE = 100 dev_df['target'] = SCALE * dev_df['tar...
code
73090244/cell_6
[ "text_plain_output_1.png" ]
import cudf import glob PATH = '/kaggle/input/optiver-realized-volatility-prediction' def load_data(mode, path='/kaggle/input/optiver-realized-volatility-prediction'): file_name = f'{path}/{mode}.csv' return cudf.read_csv(file_name) dev_df = load_data('train', path=PATH) order_book_training = glob.glob(f'{PA...
code
73090244/cell_2
[ "text_plain_output_1.png" ]
import cupy as cp import cudf import cuml import glob from tqdm import tqdm import lightgbm as lgb import numpy as np from sklearn.model_selection import KFold import matplotlib.pyplot as plt
code
73090244/cell_8
[ "text_plain_output_1.png" ]
code
73090244/cell_16
[ "text_plain_output_1.png" ]
from tqdm import tqdm import cu_utils.transform as cutran import cudf import cupy as cp import glob PATH = '/kaggle/input/optiver-realized-volatility-prediction' def load_data(mode, path='/kaggle/input/optiver-realized-volatility-prediction'): file_name = f'{path}/{mode}.csv' return cudf.read_csv(file_name...
code
73090244/cell_3
[ "text_plain_output_1.png" ]
import cudf PATH = '/kaggle/input/optiver-realized-volatility-prediction' def load_data(mode, path='/kaggle/input/optiver-realized-volatility-prediction'): file_name = f'{path}/{mode}.csv' return cudf.read_csv(file_name) dev_df = load_data('train', path=PATH) dev_df.head()
code
73090244/cell_5
[ "text_plain_output_1.png" ]
import cudf import glob PATH = '/kaggle/input/optiver-realized-volatility-prediction' def load_data(mode, path='/kaggle/input/optiver-realized-volatility-prediction'): file_name = f'{path}/{mode}.csv' return cudf.read_csv(file_name) dev_df = load_data('train', path=PATH) order_book_training = glob.glob(f'{PA...
code
73099200/cell_21
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder,LabelEncoder, OrdinalEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv') data.columns data.isnull()...
code
73099200/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv') data.columns data.isnull() data.shape data.isnull().sum() data.fillna(data.mean()) data.select_dtypes(include='int') num_data = data.select_dtypes(...
code
73099200/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv') data.columns data.isnull() data.shape data.isnull().sum() data.fillna(data.mean()) data.select_dtypes(include='int')
code
73099200/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv') data.columns
code
73099200/cell_34
[ "text_plain_output_1.png" ]
from sklearn.compose import make_column_transformer from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error from sklearn.pipeline import make_pipeline from sklearn.pipeline import make_pipeline from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing impor...
code
73099200/cell_23
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder,LabelEncoder, OrdinalEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv') data.columns data.isnull()...
code
73099200/cell_30
[ "text_plain_output_1.png" ]
from sklearn.model_selection import KFold from sklearn.model_selection import KFold kf = KFold(n_splits=3) for i in kf.split([0, 1, 2, 3, 4, 5, 6, 7, 8]): print(i)
code
73099200/cell_33
[ "text_plain_output_1.png" ]
from sklearn.compose import make_column_transformer from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error from sklearn.pipeline import make_pipeline from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder,LabelEncoder, OrdinalEncoder...
code
73099200/cell_20
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder,LabelEncoder, OrdinalEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv') data.columns data.isnull()...
code
73099200/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv') data.columns data.isnull() data.shape
code
73099200/cell_26
[ "text_plain_output_1.png" ]
from sklearn.compose import make_column_transformer from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder,LabelEncoder, OrdinalEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Te...
code
73099200/cell_2
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv') data.head()
code
73099200/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv') data.columns data.isnull() data.shape data.isnull().sum() data.fillna(data.mean()) data.select_dtypes(include='int') num_data = data.select_dtypes(...
code
73099200/cell_19
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder,LabelEncoder, OrdinalEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv') data.columns data.isnull()...
code
73099200/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
73099200/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv') data.columns data.isnull() data.shape data.isnull().sum()
code
73099200/cell_18
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder,LabelEncoder, OrdinalEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv') data.columns data.isnull()...
code
73099200/cell_32
[ "text_plain_output_1.png" ]
from sklearn.compose import make_column_transformer from sklearn.linear_model import LinearRegression from sklearn.pipeline import make_pipeline from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder,LabelEncoder, OrdinalEncoder import pandas as pd # data processing, CSV fil...
code
73099200/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv') data.columns data.isnull() data.shape data.isnull().sum() data.fillna(data.mean())
code
73099200/cell_16
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder,LabelEncoder, OrdinalEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv') data.columns data.isnull()...
code
73099200/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv') data
code
73099200/cell_17
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder,LabelEncoder, OrdinalEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv') data.columns data.isnull()...
code
73099200/cell_31
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder,LabelEncoder, OrdinalEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read...
code
73099200/cell_14
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv') data.columns data.isnull() data.shape data.isnull().sum() data.fillna(data.mean()) data.select_dtype...
code
73099200/cell_22
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder,LabelEncoder, OrdinalEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv') data.columns data.isnull()...
code
73099200/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv') data.columns data.isnull() data.shape data.isnull().sum() data.fillna(data.mean()) data.select_dtypes(include='int') num_data = data.select_dtypes(...
code
73099200/cell_27
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import OneHotEncoder,LabelEncoder, OrdinalEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-T...
code
73099200/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv') data.columns data.isnull() data.shape data.isnull().sum() data.fillna(data.mean()) data.select_dtypes(include='int') num_data = data.select_dtypes(...
code
73099200/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/telco-customer-churn/WA_Fn-UseC_-Telco-Customer-Churn.csv') data.columns data.isnull()
code
329077/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import math import pandas as pd names_data = pd.read_csv('../input/NationalNames.csv') frequent_names = names_data[names_data['Count'] > 10] indexed_names = frequent_names.set_index(['Year', 'Name'])['Count'] def ambiguity_measure(grouped_frame): return 2 * (1 - grouped_frame.max() / grouped_fr...
code
329077/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import math import pandas as pd names_data = pd.read_csv('../input/NationalNames.csv') frequent_names = names_data[names_data['Count'] > 10] indexed_names = frequent_names.set_index(['Year', 'Name'])['Count'] def ambiguity_measure(grouped_frame): return 2 * (1 - grouped_frame.max() / grouped_fr...
code
329077/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import math import pandas as pd names_data = pd.read_csv('../input/NationalNames.csv') frequent_names = names_data[names_data['Count'] > 10] indexed_names = frequent_names.set_index(['Year', 'Name'])['Count'] def ambiguity_measure(grouped_frame): return 2 * (1 - grouped_frame.max() / grouped_fr...
code
329077/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import math import pandas as pd names_data = pd.read_csv('../input/NationalNames.csv') frequent_names = names_data[names_data['Count'] > 10] indexed_names = frequent_names.set_index(['Year', 'Name'])['Count'] def ambiguity_measure(grouped_frame): return 2 * (1 - grouped_frame.max() / grouped_fr...
code
106198852/cell_4
[ "text_plain_output_1.png" ]
!pip install transformers from transformers import BertForQuestionAnswering, AutoTokenizer modelname = 'deepset/bert-base-cased-squad2' model = BertForQuestionAnswering.from_pretrained(modelname) tokenizer = AutoTokenizer.from_pretrained(modelname)
code
106198852/cell_7
[ "text_plain_output_1.png" ]
from transformers import pipeline context = 'The Intergovernmental Panel on Climate Change (IPCC) is a scientifie intergovernmental body under the auspicesof the United Notio ns, set up at the request of member governments. It was first established in 1988 by two UnitedNations organizations, the World Me teorological ...
code
106198852/cell_8
[ "text_plain_output_1.png" ]
from transformers import pipeline context = 'The Intergovernmental Panel on Climate Change (IPCC) is a scientifie intergovernmental body under the auspicesof the United Notio ns, set up at the request of member governments. It was first established in 1988 by two UnitedNations organizations, the World Me teorological ...
code
106198852/cell_5
[ "text_plain_output_1.png" ]
questions = ['what orpanization is the IPCC a part of?', 'What UN organizations established the IPCC?', 'What does the UN want to stabilize?'] tokenizer.encode(questions[0], truncation=True, padding=True)
code
130024391/cell_21
[ "text_plain_output_1.png" ]
import glob import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data_directory = '/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/' defog_meta = pd.read_csv(data_directory + 'defog_metadata.csv') tdcsfog_meta = pd.read_csv(data_direct...
code
130024391/cell_13
[ "text_html_output_1.png" ]
import glob import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_directory = '/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/' defog_meta = pd.read_csv(data_directory + 'defog_metadata.csv') tdcsfog_meta = pd.read_csv(data_directory + 'tdcsfog_metadata.csv') def prepare_fog_table(df_...
code
130024391/cell_9
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_directory = '/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/' defog_meta = pd.read_csv(data_directory + 'defog_metadata.csv') tdcsfog_meta = pd.read_csv(data_directory + 'tdcsfog_metadata.csv') # Creat function to display main info ...
code
130024391/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_directory = '/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/' defog_meta = pd.read_csv(data_directory + 'defog_metadata.csv') tdcsfog_meta = pd.read_csv(data_directory + 'tdcsfog_metadata.csv') tdcsfog_meta.head()
code
130024391/cell_19
[ "text_html_output_1.png" ]
import glob import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_directory = '/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/' defog_meta = pd.read_csv(data_directory + 'defog_metadata.csv') tdcsfog_meta = pd.read_csv(data_directory + 'tdcsfog_metadata.csv') def prepare_fog_table(df_...
code
130024391/cell_1
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import os import glob import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
130024391/cell_17
[ "text_html_output_1.png" ]
import glob import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_directory = '/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/' defog_meta = pd.read_csv(data_directory + 'defog_metadata.csv') tdcsfog_meta = pd.read_csv(data_directory + 'tdcsfog_metadata.csv') def prepare_fog_table(df_...
code
130024391/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_directory = '/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/' defog_meta = pd.read_csv(data_directory + 'defog_metadata.csv') tdcsfog_meta = pd.read_csv(data_directory + 'tdcsfog_metadata.csv') defog_meta.head()
code
16124614/cell_4
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import tensorflow as tf print('Version: {}'.format(tf.VERSION))
code
16124614/cell_6
[ "text_plain_output_1.png" ]
import pathlib main_path = pathlib.Path('../input/oct2017/OCT2017 ') train_path = main_path / 'train' test_path = main_path / 'test' val_path = main_path / 'val' train_path
code
16124614/cell_29
[ "text_plain_output_1.png" ]
print('Model Accuracy on Test Data: {:.1f}%'.format(test_acc * 100))
code
16124614/cell_19
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pathlib import random import tensorflow as tf AUTOTUNE = tf.data.experimental.AUTOTUNE main_path = pathlib.Path('../input/oct2017/OCT2017 ') train_path = main_path / 'train' test_path = main_path / 'test' val_path = main_path / 'val' train_path import random train_image_path...
code
16124614/cell_28
[ "text_plain_output_1.png" ]
from tensorflow.keras import datasets, layers, models import matplotlib.pyplot as plt import os import os import pathlib import random import tensorflow as tf AUTOTUNE = tf.data.experimental.AUTOTUNE main_path = pathlib.Path('../input/oct2017/OCT2017 ') train_path = main_path / 'train' test_path = main_path / '...
code
16124614/cell_8
[ "text_plain_output_1.png" ]
import pathlib import random main_path = pathlib.Path('../input/oct2017/OCT2017 ') train_path = main_path / 'train' test_path = main_path / 'test' val_path = main_path / 'val' train_path import random train_image_paths = [str(path) for path in list(train_path.glob('*/*.jpeg'))] random.shuffle(train_image_paths) test...
code
16124614/cell_24
[ "text_plain_output_1.png" ]
from tensorflow.keras import datasets, layers, models import matplotlib.pyplot as plt import pathlib import random import tensorflow as tf AUTOTUNE = tf.data.experimental.AUTOTUNE main_path = pathlib.Path('../input/oct2017/OCT2017 ') train_path = main_path / 'train' test_path = main_path / 'test' val_path = main_...
code
16124614/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pathlib import random import tensorflow as tf AUTOTUNE = tf.data.experimental.AUTOTUNE main_path = pathlib.Path('../input/oct2017/OCT2017 ') train_path = main_path / 'train' test_path = main_path / 'test' val_path = main_path / 'val' train_path import random train_image_path...
code
16124614/cell_10
[ "text_plain_output_1.png" ]
import pathlib import random main_path = pathlib.Path('../input/oct2017/OCT2017 ') train_path = main_path / 'train' test_path = main_path / 'test' val_path = main_path / 'val' train_path import random train_image_paths = [str(path) for path in list(train_path.glob('*/*.jpeg'))] random.shuffle(train_image_paths) test...
code
16124614/cell_27
[ "text_plain_output_1.png", "image_output_1.png" ]
from tensorflow.keras import datasets, layers, models import matplotlib.pyplot as plt import os import os import pathlib import random import tensorflow as tf AUTOTUNE = tf.data.experimental.AUTOTUNE main_path = pathlib.Path('../input/oct2017/OCT2017 ') train_path = main_path / 'train' test_path = main_path / '...
code
16124614/cell_12
[ "text_plain_output_1.png" ]
import pathlib import random main_path = pathlib.Path('../input/oct2017/OCT2017 ') train_path = main_path / 'train' test_path = main_path / 'test' val_path = main_path / 'val' train_path import random train_image_paths = [str(path) for path in list(train_path.glob('*/*.jpeg'))] random.shuffle(train_image_paths) test...
code
122255004/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
def thing1(): thing = input('put something: ') a = 0 for x in list(thing): if x == 'a': a += 1 print('total characters:', len(thing), "\nnumber of a's:", a) thing()
code
72115124/cell_4
[ "image_output_2.png", "image_output_1.png" ]
from sklearn.metrics import roc_auc_score from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.callbacks import Callback import matplotlib.pyplot as plt import pandas as pd # data processing, CSV f...
code
72115124/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.callbacks import Callback from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import LabelEncoder from sklearn.model_selection import train_test_split from...
code
72115124/cell_3
[ "text_plain_output_1.png" ]
from sklearn.metrics import roc_auc_score from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.callbacks import Callback import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) impor...
code
89132235/cell_4
[ "image_output_1.png" ]
import matplotlib.pyplot as plt class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] plt.figure(figsize=(10, 10)) for i in range(25): plt.subplot(5, 5, i + 1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.imshow(train_images[i])
code
89132235/cell_6
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from tensorflow.keras import datasets, layers, models model = models.Sequential() model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2...
code
89132235/cell_7
[ "text_plain_output_1.png" ]
from tensorflow.keras import datasets, layers, models model = models.Sequential() model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2...
code
89132235/cell_3
[ "text_plain_output_1.png" ]
from tensorflow.keras import datasets, layers, models import tensorflow as tf from tensorflow.keras import datasets, layers, models import matplotlib.pyplot as plt (train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
code
129014806/cell_4
[ "image_output_11.png", "text_plain_output_5.png", "application_vnd.jupyter.stderr_output_2.png", "text_html_output_4.png", "image_output_14.png", "application_vnd.jupyter.stderr_output_4.png", "text_html_output_2.png", "image_output_13.png", "text_html_output_5.png", "image_output_5.png", "text_...
from solarcurtailment import curtailment_calculation file_path = '/kaggle/input/solarunsw/Data' for i in [1, 11, 14, 4, 5, 9]: sample_number = i print('Analyzing sample number {}'.format(i)) data_file = '/data_sample_{}.csv'.format(sample_number) ghi_file = '/ghi_sample_{}.csv'.format(sample_number) ...
code
129014806/cell_2
[ "text_plain_output_1.png" ]
! pip install solarcurtailment
code
129014806/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
from solarcurtailment import curtailment_calculation
code
2041009/cell_4
[ "text_html_output_1.png" ]
import datetime import numpy as np import pandas as pd def LeaveOneOut(data1, data2, groupcolumns, columnName, useLOO=False, cut=1, addNoise=False): features = list([]) for a in groupcolumns: features.append(a) if columnName is not None: features.append(columnName) grpCount = data1.gr...
code
2041009/cell_7
[ "text_plain_output_1.png" ]
import datetime import numpy as np import pandas as pd def LeaveOneOut(data1, data2, groupcolumns, columnName, useLOO=False, cut=1, addNoise=False): features = list([]) for a in groupcolumns: features.append(a) if columnName is not None: features.append(columnName) grpCount = data1.gr...
code
2041009/cell_8
[ "text_plain_output_1.png" ]
import datetime import numpy as np import pandas as pd def LeaveOneOut(data1, data2, groupcolumns, columnName, useLOO=False, cut=1, addNoise=False): features = list([]) for a in groupcolumns: features.append(a) if columnName is not None: features.append(columnName) grpCount = data1.gr...
code
2041009/cell_5
[ "text_plain_output_1.png" ]
import datetime import numpy as np import pandas as pd def LeaveOneOut(data1, data2, groupcolumns, columnName, useLOO=False, cut=1, addNoise=False): features = list([]) for a in groupcolumns: features.append(a) if columnName is not None: features.append(columnName) grpCount = data1.gr...
code
32065505/cell_18
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from gensim.models import KeyedVectors from sklearn.linear_model import LogisticRegression from sklearn.metrics import f1_score import nltk import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) FILE_PATH = '../input/fasttext-wikinews/wiki-news-300d-1M.vec' key...
code
32065505/cell_8
[ "text_plain_output_1.png" ]
from gensim.models import KeyedVectors FILE_PATH = '../input/fasttext-wikinews/wiki-news-300d-1M.vec' keyed_vec = KeyedVectors.load_word2vec_format(FILE_PATH) for word in ['hello', '!', '2', 'Turing', 'foobarz', 'hi!']: print(word, 'is in the vocabulary:', word in keyed_vec.vocab)
code
32065505/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_sample = pd.read_csv('../input/quora-insincere-questions-classification/train.csv', nrows=6000) train_sample = data_sample[:5000] test_sample = data_sample[5000:] train_sample.head()
code
32065505/cell_3
[ "text_html_output_1.png" ]
import os print(os.listdir('../input'))
code
32065505/cell_17
[ "text_plain_output_1.png" ]
from gensim.models import KeyedVectors import nltk import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) FILE_PATH = '../input/fasttext-wikinews/wiki-news-300d-1M.vec' keyed_vec = KeyedVectors.load_word2vec_format(FILE_PATH) word_vec = keyed_vec.get_vector('foo...
code
32065505/cell_10
[ "text_plain_output_1.png" ]
from gensim.models import KeyedVectors FILE_PATH = '../input/fasttext-wikinews/wiki-news-300d-1M.vec' keyed_vec = KeyedVectors.load_word2vec_format(FILE_PATH) word_vec = keyed_vec.get_vector('foobar') print(word_vec.shape) print(word_vec[:25])
code
32065505/cell_12
[ "text_plain_output_1.png" ]
from gensim.models import KeyedVectors FILE_PATH = '../input/fasttext-wikinews/wiki-news-300d-1M.vec' keyed_vec = KeyedVectors.load_word2vec_format(FILE_PATH) word_vec = keyed_vec.get_vector('foobar') keras_embedding = keyed_vec.get_keras_embedding() keras_embedding.get_config()
code
32065505/cell_5
[ "text_plain_output_1.png" ]
FILE_PATH = '../input/fasttext-wikinews/wiki-news-300d-1M.vec' with open(FILE_PATH) as f: for _ in range(5): print(f.readline()[:80])
code
72089413/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import json_lines data0 = [] with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f: for i, item in enumerate(json_lines.reader(f)): if i < 10000: data0 += [item] print(len(data0[0]))
code
72089413/cell_34
[ "text_plain_output_1.png" ]
from pandas.io.json import json_normalize from tqdm import tqdm from tqdm.notebook import tqdm import json_lines import pandas as pd import random data0 = [] with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f: for i, item in enumerate(json_lines.reader(f)): ...
code
72089413/cell_23
[ "text_plain_output_1.png" ]
from pandas.io.json import json_normalize from tqdm.notebook import tqdm import json_lines import pandas as pd import random data0 = [] with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f: for i, item in enumerate(json_lines.reader(f)): if i < 10000: ...
code