{ "cells": [ { "cell_type": "markdown", "id": "cb60aea8-54a9-4cf8-9cff-5f9b2043036b", "metadata": {}, "source": [ "# Evaluation \n", "* Container: codna_pytorch_py39" ] }, { "cell_type": "markdown", "id": "4a8e8660-8e76-4a99-8d41-3a408ef1056a", "metadata": {}, "source": [ "## -1. 가상의 GT 작업 수행" ] }, { "cell_type": "code", "execution_count": null, "id": "682bfa96-c977-4dac-bfce-c61829dc521e", "metadata": {}, "outputs": [], "source": [ "import pathlib\n", "import pickle\n", "\n", "gt_list = [\n", " 'rubout n she yn', \n", " 'rubout n she yn',\n", " 'rubout n she yn',\n", " 'rubout n she yn',\n", " 'rubout n she yn',\n", " 'rubout n she yn',\n", " 'rubout n she yn',\n", " 'rubout n she yn',\n", " 'rubout n she yn',\n", " 'rubout n she yn',\n", " 'rubout n she yn',\n", " 'rubout n she yn'\n", "]\n", "\n", "\n", "pathlib.Path(\"opt/ml/processing/input/manifest\").mkdir(parents=True, exist_ok=True)\n", "with open('opt/ml/processing/input/manifest/gt_manifest.pkl', 'wb') as f:\n", " pickle.dump(gt_list, f, protocol=pickle.HIGHEST_PROTOCOL)\n", "\n" ] }, { "cell_type": "markdown", "id": "0b0d0db9-f14a-4166-b438-020d22ac4a2a", "metadata": {}, "source": [ "## 1. 설정값 " ] }, { "cell_type": "code", "execution_count": null, "id": "30eca871-f6d0-43ec-ad20-bc2c938aff0b", "metadata": { "tags": [] }, "outputs": [], "source": [ "select_date = '2023/03/23'\n", "variant_name = 'AllTraffic'\n", "tolerance = 0.5" ] }, { "cell_type": "markdown", "id": "84ecd6e6-2939-4712-b342-136d077100a9", "metadata": {}, "source": [ "## 2. 패키지 설치" ] }, { "cell_type": "code", "execution_count": null, "id": "57b7cd87-f9b2-474d-8f91-c1563eba9918", "metadata": { "tags": [] }, "outputs": [], "source": [ "import boto3\n", "import sys" ] }, { "cell_type": "code", "execution_count": null, "id": "cd61f269-3726-47b7-8a06-8c7f72467092", "metadata": {}, "outputs": [], "source": [ "%%bash\n", "chmod 777 /tmp\n", "apt -y update && apt -y install sox libsox-fmt-all\n", "\n", "pip install --no-cache-dir --upgrade pip\n", "pip install --no-cache-dir -U omegaconf hydra-core librosa sentencepiece youtokentome inflect sox\n", "pip install --no-cache-dir -U braceexpand webdataset editdistance jiwer jsonlines\n", "pip install --no-cache-dir -U pytorch-lightning==1.9.4\n", "pip install --no-cache-dir -U https://github.com/pyannote/pyannote-audio/archive/develop.zip\n", "pip install --no-cache-dir git+https://github.com/huggingface/transformers\n", "pip install --no-cache-dir git+https://github.com/NVIDIA/NeMo.git@main\n", "pip install sagemaker-experiments" ] }, { "cell_type": "markdown", "id": "4e9e6def-d279-4929-b93e-396707193295", "metadata": {}, "source": [ "## 3. parameter store 설정" ] }, { "cell_type": "code", "execution_count": null, "id": "396bb85e-376b-4bb5-9ea5-9580885ba857", "metadata": {}, "outputs": [], "source": [ "import os\n", "import boto3\n", "\n", "class parameter_store():\n", " \n", " def __init__(self, region_name=\"ap-northeast-2\"):\n", " \n", " self.ssm = boto3.client('ssm', region_name=region_name)\n", " \n", " def put_params(self, key, value, dtype=\"String\", overwrite=False, enc=False) -> str:\n", " \n", " #aws ssm put-parameter --name \"RDS-MASTER-PASSWORD\" --value 'PASSWORD' --type \"SecureString\"\n", " if enc: dtype=\"SecureString\"\n", " if overwrite:\n", " strQuery = ''.join(['aws ssm put-parameter', ' --name ', '\"', str(key), '\"', ' --value ', '\"', str(value), '\"', ' --type ', '\"', str(dtype), '\"', ' --overwrite'])\n", " strResponse = os.popen(strQuery).read()\n", " \n", " if strResponse != '': return 'Store suceess'\n", " else: return 'Error'\n", " \n", " def get_params(self, key, enc=False):\n", " \n", " if enc: WithDecryption = True\n", " else: WithDecryption = False\n", " response = self.ssm.get_parameters(\n", " Names=[key,],\n", " WithDecryption=WithDecryption\n", " )\n", " \n", " return response['Parameters'][0]['Value']\n", "\n", " def get_all_params(self, ):\n", "\n", " response = self.ssm.describe_parameters(MaxResults=50)\n", "\n", " return [dicParam[\"Name\"] for dicParam in response[\"Parameters\"]]\n", "\n", " def delete_param(self, listParams):\n", "\n", " response = self.ssm.delete_parameters(\n", " Names=listParams\n", " )\n", " print (f\" parameters: {listParams} is deleted successfully\")" ] }, { "cell_type": "markdown", "id": "057d7bc6-5365-4533-b445-c15f99abf0f3", "metadata": {}, "source": [ "## 4. 기존 설정값 가져오기" ] }, { "cell_type": "code", "execution_count": null, "id": "5951e03a-5637-4fa1-8208-f7e5684c522a", "metadata": {}, "outputs": [], "source": [ "strRegionName=boto3.Session().region_name\n", "pm = parameter_store(strRegionName)" ] }, { "cell_type": "code", "execution_count": null, "id": "1f7ba4a5-8649-4b67-9cdc-ce660b7c137e", "metadata": { "tags": [] }, "outputs": [], "source": [ "prefix = pm.get_params(key=\"PREFIX\")\n", "endpoint_name = pm.get_params(key='ENDPOINTNAME-lg-ramp-cyj-staging')\n", "monitor_output = pm.get_params(key='MONITOROUTPUT-lg-ramp-cyj-staging')\n", "bucket_name = pm.get_params(key=prefix + '-BUCKET')" ] }, { "cell_type": "markdown", "id": "b8ac9782-2e85-4812-b7a4-79c93bf9c7ac", "metadata": {}, "source": [ "## 5. 경로 설정하기" ] }, { "cell_type": "code", "execution_count": null, "id": "76fbcb5d-f805-4c58-b79e-0da766f34ccf", "metadata": {}, "outputs": [], "source": [ "\n", "#s3://sagemaker-us-west-2-322537213286/nemo-prod/inference/monitor_output\n", "#s3://sagemaker-us-west-2-322537213286/nemo-prod/inference/monitor_output/nemo-prod-nemo-experiments-0320-07331679297605/AllTraffic/2023/03/20/07/\n", "\n", "inference_output_s3uri = os.path.join(\n", " monitor_output,\n", " endpoint_name,\n", " variant_name,\n", " select_date\n", ")\n", "\n", "gtmanifest_s3uri = os.path.join(\n", " \"s3://{}\".format(bucket_name),\n", " prefix,\n", " \"gt-manifest\"\n", ")\n", "\n", "output_s3uri = os.path.join(\n", " \"s3://{}\".format(bucket_name),\n", " prefix,\n", " \"pred-output\"\n", ")\n", "\n", "print (f\"bucket_name: {bucket_name}\")\n", "print (f\"endpoint_name: {endpoint_name}\")\n", "print (f\"monitor_output: {monitor_output}\")\n" ] }, { "cell_type": "code", "execution_count": null, "id": "4de3c9b9-9d63-41b8-ae24-f6a4883449a0", "metadata": {}, "outputs": [], "source": [ "# !aws s3 sync ./manifest $gtmanifest_s3uri" ] }, { "cell_type": "markdown", "id": "82575d6d-d04d-4076-a8b8-d64d7842de27", "metadata": {}, "source": [ "## 6. SageMaker Endpoint에서 설정한 Data Capture 정보 가져오기" ] }, { "cell_type": "code", "execution_count": null, "id": "78137f09-c44f-4ad8-b137-d45bc6da82f6", "metadata": {}, "outputs": [], "source": [ "!aws s3 sync $inference_output_s3uri 'opt/ml/processing/input/inference_data'" ] }, { "cell_type": "markdown", "id": "333d3a82-e506-4af3-9f91-634e597488ff", "metadata": {}, "source": [ "## 7. Evaluation 코드 가져오기" ] }, { "cell_type": "code", "execution_count": null, "id": "ba6f0f81-5eae-4935-8cd4-6b8529d98afe", "metadata": {}, "outputs": [], "source": [ "import os\n", "import copy\n", "import boto3\n", "import logging\n", "import json\n", "import jsonlines # !pip install jsonlines 해주기\n", "import torch\n", "import tarfile\n", "\n", "from tqdm.auto import tqdm\n", "from omegaconf import open_dict\n", "\n", "# import glob\n", "import pickle\n", "import sox\n", "import time\n", "import io\n", "import soundfile as sf\n", "import base64\n", "import numpy as np\n", "import pathlib\n", "from sagemaker.s3 import S3Downloader\n", "from datetime import datetime\n", "\n", "from nemo.collections.asr.metrics.wer import word_error_rate\n", "from nemo.collections.asr.parts.utils.transcribe_utils import PunctuationCapitalization\n", "from nemo.core.config import hydra_runner\n", "from nemo.utils import logging\n", "\n", "\n", "use_cuda = torch.cuda.is_available()\n", "device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n", "\n", "# logger = logging.getLogger()\n", "# logger.setLevel(logging.INFO)\n", "# logger.addHandler(logging.StreamHandler())\n", "\n", "\n", "def find_checkpoint(model_dir):\n", " checkpoint_path = None\n", " for (root, dirs, files) in os.walk(model_dir):\n", " if len(files) > 0:\n", " for file_name in files:\n", " if file_name.endswith('last.ckpt'):\n", " checkpoint_path = root + '/' + file_name\n", " return checkpoint_path\n", "\n", "\n", "def find_files(jsonl_dir):\n", " jsonl_list = []\n", " for (root, dirs, files) in os.walk(jsonl_dir):\n", " if len(files) > 0:\n", " for file_name in files:\n", " if file_name.endswith('jsonl'):\n", " jsonl_list.append(root + '/' + file_name)\n", " return jsonl_list\n", "\n", "\n", "def read_manifest(path):\n", " manifest = []\n", " with open(path, 'r') as f:\n", " for line in tqdm(f, desc=\"Reading manifest data\"):\n", " line = line.replace(\"\\n\", \"\")\n", " data = json.loads(line)\n", " manifest.append(data)\n", " return manifest\n", "\n", "\n", "def write_processed_manifest(data, original_path):\n", " original_manifest_name = os.path.basename(original_path)\n", " new_manifest_name = original_manifest_name.replace(\".json\", \"_processed.json\")\n", "\n", " manifest_dir = os.path.split(original_path)[0]\n", " filepath = os.path.join(manifest_dir, new_manifest_name)\n", " with open(filepath, 'w') as f:\n", " for datum in tqdm(data, desc=\"Writing manifest data\"):\n", " datum = json.dumps(datum)\n", " f.write(f\"{datum}\\n\")\n", " print(f\"Finished writing manifest: {filepath}\")\n", " return filepath\n", "\n", "\n", "def apply_preprocessors(manifest, preprocessors):\n", " for processor in preprocessors:\n", " for idx in tqdm(range(len(manifest)), desc=f\"Applying {processor.__name__}\"):\n", " manifest[idx] = processor(manifest[idx])\n", "\n", " print(\"Finished processing manifest !\")\n", " return manifest\n", "\n", "\n", "def change_dir(data):\n", " MANIFEST_PATH = os.environ['MANIFEST_PATH']\n", " WAV_PATH = os.environ['WAV_PATH']\n", " data['audio_filepath'] = data['audio_filepath'].replace(MANIFEST_PATH, WAV_PATH)\n", " return data\n", "\n", "\n", "def predict(asr_model, predictions, targets, target_lengths, predictions_lengths=None):\n", " references = []\n", " with torch.no_grad():\n", " # prediction_cpu_tensor = tensors[0].long().cpu()\n", " targets_cpu_tensor = targets.long().cpu()\n", " tgt_lenths_cpu_tensor = target_lengths.long().cpu()\n", "\n", " # iterate over batch\n", " for ind in range(targets_cpu_tensor.shape[0]):\n", " tgt_len = tgt_lenths_cpu_tensor[ind].item()\n", " target = targets_cpu_tensor[ind][:tgt_len].numpy().tolist()\n", " reference = asr_model.decoding.decode_tokens_to_str(target)\n", " references.append(reference)\n", "\n", " hypotheses, _ = asr_model.decoding.ctc_decoder_predictions_tensor(\n", " predictions, predictions_lengths, fold_consecutive=True\n", " )\n", " return references[0], hypotheses[0]\n", "\n", "def start_retraining_codepipeline():\n", " # region_name = os.environ[\"region\"]\n", " sm_client = boto3.client('sagemaker', region_name=strRegionName)\n", " pipeline_client = boto3.client('codepipeline', region_name=strRegionName)\n", "\n", " response = sm_client.list_projects(\n", " SortBy='CreationTime',\n", " SortOrder='Descending'\n", " )\n", "\n", " for pjt_list in response['ProjectSummaryList']:\n", " if pjt_list['ProjectStatus'] == 'CreateCompleted':\n", " ProjectName = pjt_list['ProjectName']\n", " break\n", "\n", " des_response = sm_client.describe_project(\n", " ProjectName=ProjectName\n", " )\n", "\n", " code_pipeline_name = f\"sagemaker-{des_response['ProjectName']}-{des_response['ProjectId']}-modelbuild\"\n", " pipeline_client.start_pipeline_execution(name=code_pipeline_name) \n", " print(\"Start retraining ........\")\n", " \n", "def main():\n", " \n", " reference_list = []\n", " predicted_list = [] \n", " \n", " \n", " # select_date = os.environ[\"select_date\"]\n", "\n", " # output_list = S3Downloader.list(inference_output_s3uri + f'/output_monitor/{endpoint_name}/{target_model}/{select_date}')\n", " output_list = find_files('opt/ml/processing/input/inference_data')\n", " print(f\"output_list: {output_list}\")\n", " with open('opt/ml/processing/input/manifest/gt_manifest.pkl', 'rb') as f:\n", " gt_list = pickle.load(f)\n", "\n", "\n", " result_data = []\n", "\n", " train_mount_dir=f\"opt/ml/input/data/training/\"\n", " test_mount_dir=f\"opt/ml/input/data/testing/\"\n", " manifest_path = f\"opt/ml/processing/output/{select_date}/manifest\"\n", " manifest_file = f\"{manifest_path}/test_manifest.json\"\n", " result_wav_file = f\"opt/ml/processing/output/{select_date}/wav\"\n", "\n", " pathlib.Path(manifest_path).mkdir(parents=True, exist_ok=True)\n", " pathlib.Path(result_wav_file).mkdir(parents=True, exist_ok=True)\n", "\n", " seq = 0\n", " with open(manifest_file, 'w') as fout:\n", " for json_list in output_list:\n", " # Read a specific file\n", "\n", " fname = json_list.split('/')[-1]\n", " fname = fname.split('.')[0]\n", " f_date = select_date.replace('/','-')\n", " \n", "\n", " with jsonlines.open(json_list) as read_file:\n", " for res in read_file.iter():\n", " filename = f\"{result_wav_file}/{f_date}-{fname}-{seq}.wav\"\n", " sf_data, samplerate = sf.read(io.BytesIO(base64.b64decode(res['captureData']['endpointInput']['data'])))\n", " sf.write(file=filename, data=sf_data, samplerate=samplerate)\n", "\n", " np_val = base64.b64decode(res['captureData']['endpointOutput']['data'])\n", " np_val = json.loads(np_val)\n", " \n", " # transcript = '-'.join(np_val['result'])\n", " transcript = ' '.join(np_val['result'])\n", " # transcript = np_val['result']\n", " predicted_list.append(transcript)\n", " reference_list.append(gt_list[seq])\n", "\n", " print(f\"predicted_list : {predicted_list}\")\n", " print(f\"reference_list : {reference_list}\")\n", " mounted_audio_path = filename.replace(result_wav_file, test_mount_dir)\n", "\n", " # import sox here to not require sox to be available for importing all utils.\n", " duration = sox.file_info.duration(filename)\n", "\n", " # Write the metadata to the manifest\n", " metadata = {\"audio_filepath\": mounted_audio_path, \"duration\": duration, \"pred_text\": transcript}\n", " json.dump(metadata, fout)\n", " fout.write('\\n')\n", " seq += 1\n", " \n", "\n", " pc = PunctuationCapitalization('.,?')\n", " reference_list = pc.separate_punctuation(reference_list)\n", " reference_list = pc.do_lowercase(reference_list)\n", " predicted_list = pc.do_lowercase(predicted_list)\n", " reference_list = pc.rm_punctuation(reference_list)\n", " predicted_list = pc.rm_punctuation(predicted_list)\n", " \n", "\n", " # Compute the WER\n", " cer = word_error_rate(hypotheses=predicted_list, references=reference_list, use_cer=True)\n", " wer = word_error_rate(hypotheses=predicted_list, references=reference_list, use_cer=False)\n", "\n", " use_cer = False\n", "\n", " if use_cer:\n", " metric_name = 'CER'\n", " metric_value = cer\n", " else:\n", " metric_name = 'WER'\n", " metric_value = wer\n", "\n", " print(f\" tolerance : {tolerance}\")\n", " print(f\" tolerance : {type(tolerance)}\")\n", " print(f\" metric_value : {metric_value}\")\n", " print(f\" metric_value : {type(metric_value)}\")\n", "\n", "\n", " if tolerance is not None:\n", " if metric_value > tolerance:\n", " print(f\"Got {metric_name} of {metric_value}, which was higher than tolerance={tolerance}\")\n", " start_retraining_codepipeline()\n", "\n", " print(f'Got {metric_name} of {metric_value}. Tolerance was {tolerance}')\n", " else:\n", " print(f'Got {metric_name} of {metric_value}')\n", "\n", " print(f'Dataset WER/CER ' + str(round(100 * wer, 2)) + \"%/\" + str(round(100 * cer, 2)) + \"%\")\n", "\n", " wer_result = wer\n", "\n", "\n", "if __name__ == '__main__':\n", " main()" ] }, { "cell_type": "code", "execution_count": null, "id": "9a226f07-136a-4958-9d3f-76c1d1ccea60", "metadata": { "tags": [] }, "outputs": [], "source": [ "output_s3uri" ] }, { "cell_type": "code", "execution_count": null, "id": "b1d67d8c-b365-48f5-a5dc-a861e09895d3", "metadata": {}, "outputs": [], "source": [ "!aws s3 sync opt/ml/processing/input $output_s3uri" ] }, { "cell_type": "code", "execution_count": null, "id": "10a909b2-51e7-4f6d-8d7a-54aca105d609", "metadata": {}, "outputs": [], "source": [ "## Retraining\n", "pm.put_params(key=\"-\".join([prefix, \"RETRAIN\"]), value=True, overwrite=True)\n" ] }, { "cell_type": "code", "execution_count": null, "id": "ea21da3f-8551-4727-a0d0-61f7f6f07918", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "availableInstances": [ { "_defaultOrder": 0, "_isFastLaunch": true, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 4, "name": "ml.t3.medium", "vcpuNum": 2 }, { "_defaultOrder": 1, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 8, "name": "ml.t3.large", "vcpuNum": 2 }, { "_defaultOrder": 2, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 16, "name": "ml.t3.xlarge", "vcpuNum": 4 }, { "_defaultOrder": 3, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 32, "name": "ml.t3.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 4, "_isFastLaunch": true, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 8, "name": "ml.m5.large", "vcpuNum": 2 }, { "_defaultOrder": 5, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 16, "name": "ml.m5.xlarge", "vcpuNum": 4 }, { "_defaultOrder": 6, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 32, "name": "ml.m5.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 7, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 64, "name": "ml.m5.4xlarge", "vcpuNum": 16 }, { "_defaultOrder": 8, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 128, "name": "ml.m5.8xlarge", "vcpuNum": 32 }, { "_defaultOrder": 9, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 192, "name": "ml.m5.12xlarge", "vcpuNum": 48 }, { "_defaultOrder": 10, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 256, "name": "ml.m5.16xlarge", "vcpuNum": 64 }, { "_defaultOrder": 11, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 384, "name": "ml.m5.24xlarge", "vcpuNum": 96 }, { "_defaultOrder": 12, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 8, "name": "ml.m5d.large", "vcpuNum": 2 }, { "_defaultOrder": 13, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 16, "name": "ml.m5d.xlarge", "vcpuNum": 4 }, { "_defaultOrder": 14, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 32, "name": "ml.m5d.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 15, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 64, "name": "ml.m5d.4xlarge", "vcpuNum": 16 }, { "_defaultOrder": 16, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 128, "name": "ml.m5d.8xlarge", "vcpuNum": 32 }, { "_defaultOrder": 17, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 192, "name": "ml.m5d.12xlarge", "vcpuNum": 48 }, { "_defaultOrder": 18, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 256, "name": "ml.m5d.16xlarge", "vcpuNum": 64 }, { "_defaultOrder": 19, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 384, "name": "ml.m5d.24xlarge", "vcpuNum": 96 }, { "_defaultOrder": 20, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": true, "memoryGiB": 0, "name": "ml.geospatial.interactive", "supportedImageNames": [ "sagemaker-geospatial-v1-0" ], "vcpuNum": 0 }, { "_defaultOrder": 21, "_isFastLaunch": true, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 4, "name": "ml.c5.large", "vcpuNum": 2 }, { "_defaultOrder": 22, "_isFastLaunch": false, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 8, "name": "ml.c5.xlarge", "vcpuNum": 4 }, { "_defaultOrder": 23, "_isFastLaunch": false, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 16, "name": "ml.c5.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 24, "_isFastLaunch": false, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 32, "name": "ml.c5.4xlarge", "vcpuNum": 16 }, { "_defaultOrder": 25, "_isFastLaunch": false, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 72, "name": "ml.c5.9xlarge", "vcpuNum": 36 }, { "_defaultOrder": 26, "_isFastLaunch": false, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 96, "name": "ml.c5.12xlarge", "vcpuNum": 48 }, { "_defaultOrder": 27, "_isFastLaunch": false, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 144, "name": "ml.c5.18xlarge", "vcpuNum": 72 }, { "_defaultOrder": 28, "_isFastLaunch": false, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 192, "name": "ml.c5.24xlarge", "vcpuNum": 96 }, { "_defaultOrder": 29, "_isFastLaunch": true, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 16, "name": "ml.g4dn.xlarge", "vcpuNum": 4 }, { "_defaultOrder": 30, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 32, "name": "ml.g4dn.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 31, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 64, "name": "ml.g4dn.4xlarge", "vcpuNum": 16 }, { "_defaultOrder": 32, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 128, "name": "ml.g4dn.8xlarge", "vcpuNum": 32 }, { "_defaultOrder": 33, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 4, "hideHardwareSpecs": false, "memoryGiB": 192, "name": "ml.g4dn.12xlarge", "vcpuNum": 48 }, { "_defaultOrder": 34, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 256, "name": "ml.g4dn.16xlarge", "vcpuNum": 64 }, { "_defaultOrder": 35, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 61, "name": "ml.p3.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 36, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 4, "hideHardwareSpecs": false, "memoryGiB": 244, "name": "ml.p3.8xlarge", "vcpuNum": 32 }, { "_defaultOrder": 37, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 8, "hideHardwareSpecs": false, "memoryGiB": 488, "name": "ml.p3.16xlarge", "vcpuNum": 64 }, { "_defaultOrder": 38, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 8, "hideHardwareSpecs": false, "memoryGiB": 768, "name": "ml.p3dn.24xlarge", "vcpuNum": 96 }, { "_defaultOrder": 39, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 16, "name": "ml.r5.large", "vcpuNum": 2 }, { "_defaultOrder": 40, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 32, "name": "ml.r5.xlarge", "vcpuNum": 4 }, { "_defaultOrder": 41, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 64, "name": "ml.r5.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 42, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 128, "name": "ml.r5.4xlarge", "vcpuNum": 16 }, { "_defaultOrder": 43, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 256, "name": "ml.r5.8xlarge", "vcpuNum": 32 }, { "_defaultOrder": 44, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 384, "name": "ml.r5.12xlarge", "vcpuNum": 48 }, { "_defaultOrder": 45, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 512, "name": "ml.r5.16xlarge", "vcpuNum": 64 }, { "_defaultOrder": 46, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 768, "name": "ml.r5.24xlarge", "vcpuNum": 96 }, { "_defaultOrder": 47, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 16, "name": "ml.g5.xlarge", "vcpuNum": 4 }, { "_defaultOrder": 48, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 32, "name": "ml.g5.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 49, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 64, "name": "ml.g5.4xlarge", "vcpuNum": 16 }, { "_defaultOrder": 50, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 128, "name": "ml.g5.8xlarge", "vcpuNum": 32 }, { "_defaultOrder": 51, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 256, "name": "ml.g5.16xlarge", "vcpuNum": 64 }, { "_defaultOrder": 52, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 4, "hideHardwareSpecs": false, "memoryGiB": 192, "name": "ml.g5.12xlarge", "vcpuNum": 48 }, { "_defaultOrder": 53, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 4, "hideHardwareSpecs": false, "memoryGiB": 384, "name": "ml.g5.24xlarge", "vcpuNum": 96 }, { "_defaultOrder": 54, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 8, "hideHardwareSpecs": false, "memoryGiB": 768, "name": "ml.g5.48xlarge", "vcpuNum": 192 } ], "instance_type": "ml.g4dn.xlarge", "kernelspec": { "display_name": "Python 3 (PyTorch 1.13 Python 3.9 GPU Optimized)", "language": "python", "name": "python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-west-2:236514542706:image/pytorch-1.13-gpu-py39" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.16" } }, "nbformat": 4, "nbformat_minor": 5 }