Commit 27683fb0 authored by pawel rosikiewicz's avatar pawel rosikiewicz 💬
Browse files

Saved changes to: notebooks/03_Feature_Extraction-Copy1.ipynb...

Saved changes to: notebooks/03_Feature_Extraction-Copy1.ipynb notebooks/03_Feature_Extraction.ipynb 
	notebooks/05_skilearn_and_dense_nn_models_b.ipynb
parent f262be7d
Pipeline #187611 passed with stage
in 19 seconds
%% Cell type:markdown id: tags:
# SkinAnaliticAI, Skin Cancer Detection with AI Deep Learning
## __Evaluation of Harvard Dataset with different AI classiffication techniques using FastClassAI papeline__
Author: __Pawel Rosikiewicz__
prosikiewicz@gmail.com
License: __MIT__
ttps://opensource.org/licenses/MIT
Copyright (C) 2021.01.30 Pawel Rosikiewicz
%% Cell type:markdown id: tags:
#### standard imports
%% Cell type:code id: tags:
``` python
import os # allow changing, and navigating files and folders,
import sys
import shutil
import re # module to use regular expressions,
import glob # lists names in folders that match Unix shell patterns
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
from tensorflow.keras.preprocessing.image import ImageDataGenerator
```
%% Cell type:code id: tags:
``` python
# setup basedir
basedir = os.path.dirname(os.getcwd())
os.chdir(basedir)
sys.path.append(basedir)
# set up paths for the project
PATH_raw = os.path.join(basedir, "data/raw")
PATH_interim = os.path.join(basedir, "data/interim")
PATH_models = os.path.join(basedir, "models")
PATH_interim_dataset_summary_tables = os.path.join(PATH_interim, "dataset_summary_tables") # create in that notebook,
```
%% Cell type:code id: tags:
``` python
# load functions,
from src.utils.feature_extraction_tools import encode_images
# load configs
from src.configs.project_configs import CLASS_DESCRIPTION # information on each class, including descriptive class name and diegnostic description - used to help wiht the project
from src.configs.tfhub_configs import TFHUB_MODELS # names of TF hub modules that I presenlected for featuress extraction with all relevant info,
from src.configs.dataset_configs import DATASET_CONFIGS # names created for clases, assigned to original one, and colors assigned to these classes
from src.configs.dataset_configs import CLASS_LABELS_CONFIGS # names created for clases, assigned to original one, and colors assigned to these classes
from src.configs.dataset_configs import DROPOUT_VALUE # str, special value to indicate samples to remoce in class labels
from src.configs.config_functions import DEFINE_DATASETS # function that creates datasunbsets collections for one dataset (custome made for that project)
# set project variables
PROJECT_NAME = "SkinAnaliticAI_Harvard_dataset_evaluation" #
DATASET_NAME = "HAM10000" # name used in config files to identify all info on that dataset variant
DATASET_VARIANTS = DATASET_CONFIGS[DATASET_NAME]["labels"] # class labels that will be used, SORT_FILES_WITH must be included
```
%% Cell type:markdown id: tags:
## FEATURE EXTRACTION
%% Cell type:code id: tags:
``` python
# preset values
generator_batch_size = 3000 # no more then 3000 images will be taken, but we expect no more then 2000 in that tassk.
use_url = "no" # the script is adapted only to use sys.path, but configs carries url's and ulr can be used with feature extraction function
# extract features from images in each dataset varinat using one or more tf hub modules,
for dv_i, dataset_variant in enumerate(DATASET_VARIANTS):
print(f"\n- {dv_i} - Extracting features from: {dataset_variant}")
# find names off train/valid/test subsets in dataset folder,
os.chdir(os.path.join(PATH_interim, f"{DATASET_NAME}__{dataset_variant}"))
subset_names_to_encode = []
for file in glob.glob(f"[train|valid|test]*"):
subset_names_to_encode.append(file)
# Create lists with info required for feture extraction from images
'this step is super usefull when many models is used for feature extraction'
tfmodules = list(TFHUB_MODELS.keys()) # names of tf hub models used
module_names = [TFHUB_MODELS[x]['module_name'] for x in tfmodules]
module_file_names = [TFHUB_MODELS[x]['file_name'] for x in tfmodules]
img_imput_size = [TFHUB_MODELS[x]['input_size'] for x in tfmodules]
# extract features, from images from each subset, and store them togther as one batch array,
for i, (one_module_name, one_module_file_name, one_img_input_size) in enumerate(zip(module_names, module_file_names, img_imput_size)):
'''
all data subsets found in load_dir will be encoded automatically,
- logfile will be created for a given datasets
- batch_labels csv file and npz file with encoded features will be created for
each data subset will have:
-
'''
print("\n ................................................")
print(f" - {dv_i}/{i} module: {one_module_name}")
print(f" - {dv_i}/{i} filename or url: {one_module_file_name}")
print(f" - {dv_i}/{i} RGB image size : {one_img_input_size}")
print(f" - {dv_i}/{i} datset subsets : {subset_names_to_encode}")
print(f" - Cataloging subsets, then extracting features from all images")
print(f" - Important: Each subset will be saved as one matrix")
print("\n")
# I am using modules saved in computer memory, thus I need to build fiull path to them,
if use_url=="no":
one_module_full_path = os.path.join(PATH_models, one_module_file_name)
else:
one_module_full_path = one_module_file_name # here I am using module url, (no path)
# extract features
encode_images(
# .. dastaset name & directories,
dataset_name = f"{DATASET_NAME}__{dataset_variant}",# name used when saving encoded files, logfiles and other things, related to encoding,
subset_names = subset_names_to_encode,# list, ust names of files in the load_dir, if any,
load_dir = os.path.join(PATH_interim, f"{DATASET_NAME}__{dataset_variant}"), # full path to input data, ie. file folder with either folders with images names after class names, or folders with subsetnames, and folders names after each class in them,
save_dir = os.path.join(PATH_interim, f"{DATASET_NAME}__{dataset_variant}__extracted_features"), # all new files, will be saved as one batch, with logfile, if None, load_dir will be used,
# .. encoding module parameters,
module_name = one_module_name, # name used when saving files
module_location = one_module_full_path, # full path to a given module, or url,
img_target_size = one_img_input_size, # image resolution in pixels,
generator_batch_size = generator_batch_size, # must be larger or equal to in size of the largest subset
generator_shuffle = False,
# .. other,
save_files = True,
verbose = False
)
```
%%%% Output: stream
- 0 - Extracting features from: Cancer_Detection_And_Classification
................................................
- 0/0 module: MobileNet_v2
- 0/0 filename or url: imagenet_mobilenet_v2_100_224_feature_vector_2
- 0/0 RGB image size : (224, 224)
- 0/0 datset subsets : ['train_05', 'train_02', 'valid_01', 'train_03', 'train_04', 'test_01', 'train_01', 'train_06', 'valid_02', 'train_07', 'test_02']
- Cataloging subsets, then extracting features from all images
- Important: Each subset will be saved as one matrix
Found 744 images belonging to 7 classes.
Found 742 images belonging to 7 classes.
Found 740 images belonging to 7 classes.
Found 742 images belonging to 7 classes.
Found 744 images belonging to 7 classes.
Found 367 images belonging to 7 classes.
Found 742 images belonging to 7 classes.
Found 738 images belonging to 7 classes.
Found 741 images belonging to 7 classes.
Found 751 images belonging to 7 classes.
Found 367 images belonging to 7 classes.
INFO:tensorflow:Saver not created because there are no variables in the graph to restore
................................................
- 0/1 module: BiT_M_Resnet101
- 0/1 filename or url: bit_m-r101x1_1
- 0/1 RGB image size : (224, 224)
- 0/1 datset subsets : ['train_05', 'train_02', 'valid_01', 'train_03', 'train_04', 'test_01', 'train_01', 'train_06', 'valid_02', 'train_07', 'test_02']
- Cataloging subsets, then extracting features from all images
- Important: Each subset will be saved as one matrix
Found 744 images belonging to 7 classes.
Found 742 images belonging to 7 classes.
Found 740 images belonging to 7 classes.
Found 742 images belonging to 7 classes.
Found 744 images belonging to 7 classes.
Found 367 images belonging to 7 classes.
Found 742 images belonging to 7 classes.
Found 738 images belonging to 7 classes.
Found 741 images belonging to 7 classes.
Found 751 images belonging to 7 classes.
Found 367 images belonging to 7 classes.
- 1 - Extracting features from: Cancer_Risk_Groups
................................................
- 1/0 module: MobileNet_v2
- 1/0 filename or url: imagenet_mobilenet_v2_100_224_feature_vector_2
- 1/0 RGB image size : (224, 224)
- 1/0 datset subsets : ['train_05', 'train_02', 'valid_01', 'train_03', 'train_04', 'test_01', 'train_01', 'train_06', 'valid_02', 'train_07', 'test_02']
- Cataloging subsets, then extracting features from all images
- Important: Each subset will be saved as one matrix
Found 743 images belonging to 3 classes.
Found 742 images belonging to 3 classes.
Found 741 images belonging to 3 classes.
Found 742 images belonging to 3 classes.
Found 743 images belonging to 3 classes.
Found 369 images belonging to 3 classes.
Found 741 images belonging to 3 classes.
Found 740 images belonging to 3 classes.
Found 741 images belonging to 3 classes.
Found 746 images belonging to 3 classes.
Found 370 images belonging to 3 classes.
INFO:tensorflow:Saver not created because there are no variables in the graph to restore
................................................
- 1/1 module: BiT_M_Resnet101
- 1/1 filename or url: bit_m-r101x1_1
- 1/1 RGB image size : (224, 224)
- 1/1 datset subsets : ['train_05', 'train_02', 'valid_01', 'train_03', 'train_04', 'test_01', 'train_01', 'train_06', 'valid_02', 'train_07', 'test_02']
- Cataloging subsets, then extracting features from all images
- Important: Each subset will be saved as one matrix
Found 743 images belonging to 3 classes.
Found 742 images belonging to 3 classes.
Found 741 images belonging to 3 classes.
Found 742 images belonging to 3 classes.
Found 743 images belonging to 3 classes.
Found 369 images belonging to 3 classes.
Found 741 images belonging to 3 classes.
Found 740 images belonging to 3 classes.
Found 741 images belonging to 3 classes.
Found 746 images belonging to 3 classes.
Found 370 images belonging to 3 classes.
This diff is collapsed.
......@@ -82,30 +82,6 @@
"from src.configs.model_parameters_configs import MODEL_PARAMETERS_GRID # dict, key: method name, value: ParameterGrid"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'ham10000': {'info': 'raw data grouped with original classes, no augmentation, no duplicate removal',\n",
" 'labels': ['cancer_detection_and_classification', 'cancer_risk_groups']},\n",
" 'ham10000_workshop': {'info': 'raw data grouped with original classes, no augmentation, no duplicate removal',\n",
" 'labels': ['cancer_detection_and_classification', 'cancer_risk_groups']}}"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"PATH_interim\n",
"DATASET_CONFIGS"
]
},
{
"cell_type": "code",
"execution_count": 4,
......@@ -135,9 +111,40 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 5,
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"0. Evaluating 16 knn models for:\n",
" .... cancer_detection_and_classification, MobileNet_v2 .... ['small_subset_data', 'all_data']\n",
"1. Evaluating 16 knn models for:\n",
" .... cancer_detection_and_classification, BiT_M_Resnet101 .... ['small_subset_data', 'all_data']\n",
"2. Evaluating 16 knn models for:\n",
" .... cancer_risk_groups, MobileNet_v2 .... ['small_subset_data', 'all_data']\n",
"3. Evaluating 16 knn models for:\n",
" .... cancer_risk_groups, BiT_M_Resnet101 .... ['small_subset_data', 'all_data']\n",
"4. Evaluating 54 random_forest models for:\n",
" .... cancer_detection_and_classification, MobileNet_v2 .... ['small_subset_data', 'all_data']\n",
"5. Evaluating 54 random_forest models for:\n",
" .... cancer_detection_and_classification, BiT_M_Resnet101 .... ['small_subset_data', 'all_data']\n",
"6. Evaluating 54 random_forest models for:\n",
" .... cancer_risk_groups, MobileNet_v2 .... ['small_subset_data', 'all_data']\n",
"7. Evaluating 54 random_forest models for:\n",
" .... cancer_risk_groups, BiT_M_Resnet101 .... ['small_subset_data', 'all_data']\n",
"8. Evaluating 16 dense_nn models for:\n",
" .... cancer_detection_and_classification, MobileNet_v2 .... ['small_subset_data', 'all_data']\n",
"9. Evaluating 16 dense_nn models for:\n",
" .... cancer_detection_and_classification, BiT_M_Resnet101 .... ['small_subset_data', 'all_data']\n",
"10. Evaluating 16 dense_nn models for:\n",
" .... cancer_risk_groups, MobileNet_v2 .... ['small_subset_data', 'all_data']\n",
"11. Evaluating 16 dense_nn models for:\n",
" .... cancer_risk_groups, BiT_M_Resnet101 .... ['small_subset_data', 'all_data']\n"
]
}
],
"source": [
"# --------------------\n",
"train_models = False # for security\n",
......@@ -211,6 +218,123 @@
" \n"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"0. Evaluating 16 knn models for:\n",
" .... cancer_detection_and_classification, MobileNet_v2 .... ['small_subset_data', 'all_data']\n",
"\n",
" - - - CYCLE: 0 - - -\n",
"\n",
"\n",
"Grid search for - knn - with 16 params combinations: 2021-04-26 21:40:27.462311\n",
" method: knn\n",
"run_name: small_subset_data__run01\n",
"dataset_name: ham10000_workshop\n",
"dataset_variant: cancer_detection_and_classification\n",
"module_name: MobileNet_v2\n",
"Number of combinations: 16\n",
"Unit test run: False\n",
"\n",
"."
]
},
{
"ename": "ValueError",
"evalue": "Cannot load file containing pickled data when allow_pickle=False",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-6-059ac1c5a076>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 54\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mgeneral_parameters\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 55\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mmodel_parameters\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 56\u001b[0;31m \u001b[0;34m**\u001b[0m\u001b[0minput_data\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 57\u001b[0m )\n\u001b[1;32m 58\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/work/amld-2021-workshop/src/utils/FastClassAI_skilearn_tools.py\u001b[0m in \u001b[0;36mtrain_skilearn_models_iteratively\u001b[0;34m(run_ID, dataset_name, dataset_variant, module_name, subset_composition_list, data_subsets_role, subset_collection_names, method_name, grid, models_selected_at_each_cycle, include_method_variant_with_model_selection, include_random_nr_with_model_selection, sort_models_by, save_path, save_partial_results, class_encoding, class_decoding, valid_proportion, dropout_value, unit_test, verbose)\u001b[0m\n\u001b[1;32m 391\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 392\u001b[0m \u001b[0;31m# other,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 393\u001b[0;31m \u001b[0mverbose\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mverbose\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 394\u001b[0m ) \n\u001b[1;32m 395\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/work/amld-2021-workshop/src/utils/FastClassAI_skilearn_tools.py\u001b[0m in \u001b[0;36mtrain_and_test_sklearn_models\u001b[0;34m(run_ID, dataset_name, dataset_variant, module_name, subset_collection_names, subset_collection_composition_dict, data_subsets_role, method_name, grid, models_selected_at_each_cycle, include_method_variant_with_selection, include_random_nr_with_selection, sort_models_by, save_path, save_partial_results, class_encoding, class_decoding, train_proportion, dropout_value, unit_test, verbose)\u001b[0m\n\u001b[1;32m 260\u001b[0m \u001b[0mstore_predictions\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 261\u001b[0m \u001b[0mtrack_progres\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdisplay_partial_results\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 262\u001b[0;31m \u001b[0mverbose\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 263\u001b[0m ) \n\u001b[1;32m 264\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/work/amld-2021-workshop/src/utils/sklearn_models_tools.py\u001b[0m in \u001b[0;36msklearn_grid_search\u001b[0;34m(method, grid, run_name, dataset_name, dataset_variant, module_name, file_namepath_table, file_namepath_table_dict, PATH_batch_data, PATH_batch_labels, PATH_results, class_encoding, class_decoding, dropout_value, train_subset_name, valid_subset_name, test_subset_name_list, train_proportion, unit_test, store_predictions, track_progres, verbose)\u001b[0m\n\u001b[1;32m 382\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 383\u001b[0m \u001b[0mos\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mchdir\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mPATH_batch_data\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 384\u001b[0;31m \u001b[0mencoded_img_batch\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mone_data_batch_filename\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 385\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 386\u001b[0m \u001b[0;31m# ......\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m/opt/conda/lib/python3.7/site-packages/numpy/lib/npyio.py\u001b[0m in \u001b[0;36mload\u001b[0;34m(file, mmap_mode, allow_pickle, fix_imports, encoding)\u001b[0m\n\u001b[1;32m 455\u001b[0m \u001b[0;31m# Try a pickle\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 456\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mallow_pickle\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 457\u001b[0;31m raise ValueError(\"Cannot load file containing pickled data \"\n\u001b[0m\u001b[1;32m 458\u001b[0m \"when allow_pickle=False\")\n\u001b[1;32m 459\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mValueError\u001b[0m: Cannot load file containing pickled data when allow_pickle=False"
]
}
],
"source": [
"# --------------------\n",
"train_models = True # for security\n",
"# --------------------\n",
"\n",
"\n",
"# set general prameters, that are the same to all cycles and grid search functions\n",
"ai_methods = list(MODEL_PARAMETERS_GRID.keys())\n",
"module_names = list(TFHUB_MODELS.keys())\n",
"grid_for_all_methods = MODEL_PARAMETERS_GRID\n",
"general_parameters = dict(\n",
" run_ID = \"run01\",\n",
" save_path = PATH_results,\n",
" save_partial_results = True, \n",
" unit_test = False,\n",
" verbose = True\n",
") \n",
"\n",
"counter = 0\n",
"for ai_method in ai_methods:\n",
" for dataset_variant in DATASET_VARIANTS:\n",
" for module_name in module_names:\n",
" \n",
" # define model paramters and input data ( some varinables depens on dataset varinat and tf hub model selected)\n",
" model_parameters = dict(\n",
" method_name = ai_method,\n",
" grid = grid_for_all_methods[ai_method], \n",
" models_selected_at_each_cycle = 0.1, # int, or float, if float, a top franction is used, if int, a top nr of models is used, \n",
" include_method_variant_with_model_selection = True, # bool, if True, top models_selected_at_each_cycle wiht different model variant will be selected to next cycle\n",
" include_random_nr_with_model_selection = False, # bool, if True, top models_selected_at_each_cycle wiht different random nr will be selected to next cycle\n",
" sort_models_by = \"model_acc_valid\" # str {\"model_acc_valid\", \"model_acc_train\", \"model_acc_test\"}, in the future I will add other metrics \n",
" )\n",
" \n",
" input_data = dict(\n",
" dataset_name = DATASET_NAME,\n",
" dataset_variant = dataset_variant,\n",
" module_name = module_name, \n",
" subset_composition_list = SUBSET_COMPOSITION, # from configs, \n",
" data_subsets_role = DATA_SUBSET_ROLES,\n",
" subset_collection_names = [\"small_subset_data\", \"all_data\"], # sets the order \n",
" class_encoding = CLASS_LABELS_CONFIGS[dataset_variant][\"class_encoding\"],\n",
" class_decoding = CLASS_LABELS_CONFIGS[dataset_variant][\"class_decoding\"],\n",
" valid_proportion = 0.3, # float, 0-1, on how much of the data shdould be randomly sorted into train subset, used only if valid datasusbet role is == None, \n",
" dropout_value = None, \n",
" )\n",
" \n",
" # info:\n",
" print(f\"{counter}. Evaluating {len(grid_for_all_methods[ai_method])} {ai_method} models for:\")\n",
" print(f\" .... {dataset_variant}, {module_name} .... {input_data['subset_collection_names']}\") \n",
" counter+=1\n",
" \n",
" if train_models==True:\n",
" if grid_for_all_methods[ai_method][0]['method_group'] == \"sklearn_models\":\n",
" train_skilearn_models_iteratively(\n",
" **general_parameters,\n",
" **model_parameters, \n",
" **input_data\n",
" )\n",
"\n",
" elif grid_for_all_methods[ai_method][0]['method_group'] == \"cnn_transfer_learning\":\n",
" train_dense_network_NN_models_iteratively(\n",
" plot_history = True, # adds a nice plot with acc and error after each model is trained\n",
" **general_parameters,\n",
" **model_parameters, \n",
" **input_data,\n",
" )\n",
" else:\n",
" pass\n",
" else:\n",
" pass\n",
" \n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment