{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "![Pipeline](./img/generative_ai_pipeline_rlhf_plus.png)" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "svmem(total=66814242816, available=64650002432, percent=3.2, used=1483964416, free=52365144064, active=2943295488, inactive=9972576256, buffers=2768896, cached=12962365440, shared=913408, slab=942276608)\n" ] } ], "source": [ "import psutil\n", "\n", "notebook_memory = psutil.virtual_memory()\n", "print(notebook_memory)\n", "\n", "if notebook_memory.total < 32 * 1000 * 1000 * 1000:\n", " print('*******************************************') \n", " print('YOU ARE NOT USING THE CORRECT INSTANCE TYPE')\n", " print('PLEASE CHANGE INSTANCE TYPE TO m5.2xlarge ')\n", " print('*******************************************')\n", "else:\n", " correct_instance_type=True" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Quantitative Results with ROUGE Metric\n", "\n", "The [ROUGE metric](https://en.wikipedia.org/wiki/ROUGE_(metric)) helps quantify the validity of summarizations produced by models. It compares summarizations to a \"baseline\" summary which is usually created by a human. While not perfect, it does give an indication to the overall increase in summarization effectiveness that we have accomplished by fine-tuning." ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001b[0m\u001b[33m\n", "\u001b[0mNote: you may need to restart the kernel to use updated packages.\n" ] } ], "source": [ "%pip install --disable-pip-version-check -q \\\n", " transformers==4.27.2 \\\n", " datasets==2.9.0 \\\n", " accelerate==0.17.0 \\\n", " promptsource==0.2.3 \\\n", " evaluate==0.4.0 \\\n", " trl==0.4.1 \\\n", " rouge_score==0.1.2 \\\n", " loralib==0.1.1" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Create prompts for few-shot, one-shot, zero-shot inference on sample data" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "dataset_templates_name = 'amazon_us_reviews/Wireless_v1_00'\n", "prompt_template_name = 'Generate review headline based on review body'" ] }, { "cell_type": "code", "execution_count": 8, "metadata": { "tags": [] }, "outputs": [ { "data": { "text/html": [ "
Clean data in minutes
Automatically visualize data, and improve data quality in a few clicks. Learn more
Remind me later
Don't show again
" ], "text/plain": [ "<__main__.DiscoveryWidget at 0x7fa3b05e7690>" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Shape of dataframe (145427, 15)\n" ] }, { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
marketplacecustomer_idreview_idproduct_idproduct_parentproduct_titleproduct_categorystar_ratinghelpful_votestotal_votesvineverified_purchasereview_headlinereview_bodyreview_date
0US21269168RSH1OZ87OYK92B013PURRZW603406193Madden NFL 16 - Xbox One Digital CodeDigital_Video_Games223NNA slight improvement from last year.I keep buying madden every year hoping they ge...2015-08-31
1US133437R1WFOQ3N9BO65IB00F4CEHNK341969535Xbox Live Gift CardDigital_Video_Games500NYFive StarsAwesome2015-08-31
2US45765011R3YOOS71KM5M9B00DNHLFQA951665344Command & Conquer The Ultimate Collection [Ins...Digital_Video_Games500NYHail to the great Yuri!If you are prepping for the end of the world t...2015-08-31
3US113118R3R14UATT3OUFUB004RMK5QG395682204Playstation Plus SubscriptionDigital_Video_Games500NYFive StarsPerfect2015-08-31
4US22151364RV2W9SGDNQA2CB00G9BNLQE640460561Saints Row IV - Enter The Dominatrix [Online G...Digital_Video_Games500NYFive StarsAwesome!2015-08-31
\n", "
" ], "text/plain": [ " marketplace customer_id review_id product_id product_parent \\\n", "0 US 21269168 RSH1OZ87OYK92 B013PURRZW 603406193 \n", "1 US 133437 R1WFOQ3N9BO65I B00F4CEHNK 341969535 \n", "2 US 45765011 R3YOOS71KM5M9 B00DNHLFQA 951665344 \n", "3 US 113118 R3R14UATT3OUFU B004RMK5QG 395682204 \n", "4 US 22151364 RV2W9SGDNQA2C B00G9BNLQE 640460561 \n", "\n", " product_title product_category \\\n", "0 Madden NFL 16 - Xbox One Digital Code Digital_Video_Games \n", "1 Xbox Live Gift Card Digital_Video_Games \n", "2 Command & Conquer The Ultimate Collection [Ins... Digital_Video_Games \n", "3 Playstation Plus Subscription Digital_Video_Games \n", "4 Saints Row IV - Enter The Dominatrix [Online G... Digital_Video_Games \n", "\n", " star_rating helpful_votes total_votes vine verified_purchase \\\n", "0 2 2 3 N N \n", "1 5 0 0 N Y \n", "2 5 0 0 N Y \n", "3 5 0 0 N Y \n", "4 5 0 0 N Y \n", "\n", " review_headline \\\n", "0 A slight improvement from last year. \n", "1 Five Stars \n", "2 Hail to the great Yuri! \n", "3 Five Stars \n", "4 Five Stars \n", "\n", " review_body review_date \n", "0 I keep buying madden every year hoping they ge... 2015-08-31 \n", "1 Awesome 2015-08-31 \n", "2 If you are prepping for the end of the world t... 2015-08-31 \n", "3 Perfect 2015-08-31 \n", "4 Awesome! 2015-08-31 " ] }, "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import pandas as pd\n", "import csv\n", "file = './data-tsv/amazon_reviews_us_Digital_Video_Games_v1_00.tsv.gz'\n", "\n", "# Read the file\n", "df = pd.read_csv(file, delimiter=\"\\t\", quoting=csv.QUOTE_NONE, compression=\"gzip\")\n", "\n", "df.isna().values.any()\n", "df = df.dropna()\n", "df = df.reset_index(drop=True) \n", "\n", "print(\"Shape of dataframe {}\".format(df.shape))\n", "\n", "# Convert Pandas dataframes into Datasets\n", "import datasets\n", "from datasets import Dataset\n", "\n", "# Create Dataset objects (Arrow PyTables) from Pandas dataframes\n", "dataset = Dataset.from_pandas(df)\n", "df.head()" ] }, { "cell_type": "code", "execution_count": 9, "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "*** Available prompts:\n", "Generate review headline based on review body\n", "Generate review based on rating and category\n", "Given the review headline return a categorical rating\n", "Generate review headline based on rating\n", "Given the review body return a categorical rating\n" ] } ], "source": [ "from promptsource.templates import DatasetTemplates\n", "prompt_templates = DatasetTemplates(dataset_templates_name) \n", "\n", "print('*** Available prompts:')\n", "\n", "for template in prompt_templates.templates.values():\n", " print(template.get_name())" ] }, { "cell_type": "code", "execution_count": 10, "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "** Selected prompt name: Generate review headline based on review body\n" ] } ], "source": [ "from pprint import pprint\n", "\n", "prompt = prompt_templates[prompt_template_name]\n", "print('** Selected prompt name: {}'.format(prompt_template_name))" ] }, { "cell_type": "code", "execution_count": 11, "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "** Available prompt answers: None\n" ] } ], "source": [ "print('** Available prompt answers: {}'.format(prompt.answer_choices))" ] }, { "cell_type": "code", "execution_count": 12, "metadata": { "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "** Selected prompt template:\n", "{'answer_choices': None,\n", " 'id': '5feaa0d7-e4e0-46cc-8517-e00bfa7fd00e',\n", " 'jinja': 'Give a short sentence describing the following product review:\\n'\n", " '{{review_body}} \\n'\n", " '|||\\n'\n", " '{{review_headline}}',\n", " 'metadata': ,\n", " 'name': 'Generate review headline based on review body',\n", " 'reference': 'Generate review headline based on review body'}\n" ] } ], "source": [ "print('** Selected prompt template:')\n", "pprint(prompt.__dict__)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Qualitative " ] }, { "cell_type": "code", "execution_count": 13, "metadata": { "tags": [] }, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "da7c4589e8034b5a91c1856a5c024b43", "version_major": 2, "version_minor": 0 }, "text/plain": [ " 0%| | 0/1000 [00:00 200) \\\n", " .select(range(1000)).map(lambda row : {'prompt': prompt.apply(row)[0], 'label': prompt.apply(row)[1]})" ] }, { "cell_type": "code", "execution_count": 14, "metadata": { "tags": [] }, "outputs": [], "source": [ "prompts = prompts_and_labels['prompt']\n", "human_baseline_summaries = prompts_and_labels['review_headline']" ] }, { "cell_type": "code", "execution_count": 15, "metadata": { "tags": [] }, "outputs": [], "source": [ "# for prompt_label in dataset:\n", "# prompt = prompt_label['prompt']\n", "# inputs = tokenizer(prompt, return_tensors='pt')\n", "\n", "# response = tokenizer.decode(model.generate(inputs[\"input_ids\"], \n", "# max_new_tokens=200,\n", "# do_sample=True, \n", "# top_k=50, \n", "# top_p=0.9\n", "# )[0],\n", "# skip_special_tokens=True)\n", "\n", "# print('PROMPT: {}'.format(prompt))\n", "# print('RESPONSE: {}'.format(response))\n", "# print('EXPECTED RESPONSE: {}'.format(prompt_label['label']))\n", "# print('----')" ] }, { "cell_type": "code", "execution_count": 16, "metadata": { "tags": [] }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/opt/conda/lib/python3.7/site-packages/transformers/models/t5/tokenization_t5_fast.py:165: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n", "For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n", "- Be aware that you SHOULD NOT rely on t5-base automatically truncating your input to 512 when padding/encoding.\n", "- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n", "- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n", " FutureWarning,\n" ] } ], "source": [ "from transformers import AutoTokenizer, AutoModelForSeq2SeqLM\n", "\n", "pretrained_model_checkpoint='t5-base'\n", "pretrained_model_tokenizer = AutoTokenizer.from_pretrained(pretrained_model_checkpoint, use_fast=True)\n", "pretrained_model = AutoModelForSeq2SeqLM.from_pretrained(pretrained_model_checkpoint)\n", "\n", "instruct_fine_tuned_model_checkpoint='google/flan-t5-base'\n", "instruct_fine_tuned_model_tokenizer = AutoTokenizer.from_pretrained(instruct_fine_tuned_model_checkpoint, use_fast=True)\n", "instruct_fine_tuned_model = AutoModelForSeq2SeqLM.from_pretrained(instruct_fine_tuned_model_checkpoint)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Quantitative Results with ROUGE Metric\n", "\n", "The [ROUGE metric](https://en.wikipedia.org/wiki/ROUGE_(metric)) helps quantify the validity of summarizations produced by models. It compares summarizations to a \"baseline\" summary which is usually created by a human. While not perfect, it does give an indication to the overall increase in summarization effectiveness that we have accomplished by fine-tuning.# ROUGE evaluation of summaries" ] }, { "cell_type": "code", "execution_count": 17, "metadata": { "tags": [] }, "outputs": [], "source": [ "import evaluate\n", "\n", "rouge = evaluate.load('rouge')" ] }, { "cell_type": "code", "execution_count": 19, "metadata": { "tags": [] }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Token indices sequence length is longer than the specified maximum sequence length for this model (607 > 512). Running this sequence through the model will result in indexing errors\n" ] } ], "source": [ "from transformers import GenerationConfig\n", "\n", "pretrained_model_summaries = []\n", "instruct_fine_tuned_model_summaries = []\n", "\n", "for idx, prompt in enumerate(prompts):\n", " input_ids = pretrained_model_tokenizer(prompt, return_tensors=\"pt\").input_ids\n", "\n", " pretrained_model_outputs = pretrained_model.generate(input_ids=input_ids, generation_config=GenerationConfig(max_new_tokens=200))\n", " pretrained_model_text_output = pretrained_model_tokenizer.decode(pretrained_model_outputs[0], skip_special_tokens=True)\n", " pretrained_model_summaries.append(pretrained_model_text_output)\n", "\n", " instruct_fine_tuned_model_outputs = instruct_fine_tuned_model.generate(input_ids=input_ids, generation_config=GenerationConfig(max_new_tokens=200))\n", " instruct_fine_tuned_model_text_output = instruct_fine_tuned_model_tokenizer.decode(instruct_fine_tuned_model_outputs[0], skip_special_tokens=True)\n", " instruct_fine_tuned_model_summaries.append(instruct_fine_tuned_model_text_output)" ] }, { "cell_type": "code", "execution_count": 20, "metadata": { "tags": [] }, "outputs": [ { "data": { "text/plain": [ "{'rouge1': 0.007368872409147647,\n", " 'rouge2': 0.00381036656171144,\n", " 'rougeL': 0.006487574402761279,\n", " 'rougeLsum': 0.006745900840542784}" ] }, "execution_count": 20, "metadata": {}, "output_type": "execute_result" } ], "source": [ "pretrained_model_results = rouge.compute(\n", " predictions=pretrained_model_summaries,\n", " references=human_baseline_summaries[0:len(pretrained_model_summaries)],\n", " use_aggregator=True,\n", " use_stemmer=True,\n", ")\n", "pretrained_model_results" ] }, { "cell_type": "code", "execution_count": 21, "metadata": { "tags": [] }, "outputs": [ { "data": { "text/plain": [ "{'rouge1': 0.09712394312163732,\n", " 'rouge2': 0.053035563434343486,\n", " 'rougeL': 0.09372375672342996,\n", " 'rougeLsum': 0.09386530138173066}" ] }, "execution_count": 21, "metadata": {}, "output_type": "execute_result" } ], "source": [ "instruct_fine_tuned_model_results = rouge.compute(\n", " predictions=instruct_fine_tuned_model_summaries,\n", " references=human_baseline_summaries[0:len(instruct_fine_tuned_model_summaries)],\n", " use_aggregator=True,\n", " use_stemmer=True,\n", ")\n", "instruct_fine_tuned_model_results" ] }, { "cell_type": "code", "execution_count": 22, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Stored 'pretrained_model_checkpoint' (str)\n" ] } ], "source": [ "%store pretrained_model_checkpoint" ] }, { "cell_type": "code", "execution_count": 23, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Stored 'instruct_fine_tuned_model_checkpoint' (str)\n" ] } ], "source": [ "%store instruct_fine_tuned_model_checkpoint" ] }, { "cell_type": "code", "execution_count": 24, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Stored 'dataset_templates_name' (str)\n" ] } ], "source": [ "%store dataset_templates_name" ] }, { "cell_type": "code", "execution_count": 25, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Stored 'prompt_template_name' (str)\n" ] } ], "source": [ "%store prompt_template_name" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Release Resources" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "%%html\n", "\n", "

Shutting down your kernel for this notebook to release resources.

\n", "\n", " \n", "" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "availableInstances": [ { "_defaultOrder": 0, "_isFastLaunch": true, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 4, "name": "ml.t3.medium", "vcpuNum": 2 }, { "_defaultOrder": 1, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 8, "name": "ml.t3.large", "vcpuNum": 2 }, { "_defaultOrder": 2, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 16, "name": "ml.t3.xlarge", "vcpuNum": 4 }, { "_defaultOrder": 3, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 32, "name": "ml.t3.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 4, "_isFastLaunch": true, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 8, "name": "ml.m5.large", "vcpuNum": 2 }, { "_defaultOrder": 5, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 16, "name": "ml.m5.xlarge", "vcpuNum": 4 }, { "_defaultOrder": 6, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 32, "name": "ml.m5.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 7, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 64, "name": "ml.m5.4xlarge", "vcpuNum": 16 }, { "_defaultOrder": 8, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 128, "name": "ml.m5.8xlarge", "vcpuNum": 32 }, { "_defaultOrder": 9, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 192, "name": "ml.m5.12xlarge", "vcpuNum": 48 }, { "_defaultOrder": 10, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 256, "name": "ml.m5.16xlarge", "vcpuNum": 64 }, { "_defaultOrder": 11, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 384, "name": "ml.m5.24xlarge", "vcpuNum": 96 }, { "_defaultOrder": 12, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 8, "name": "ml.m5d.large", "vcpuNum": 2 }, { "_defaultOrder": 13, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 16, "name": "ml.m5d.xlarge", "vcpuNum": 4 }, { "_defaultOrder": 14, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 32, "name": "ml.m5d.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 15, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 64, "name": "ml.m5d.4xlarge", "vcpuNum": 16 }, { "_defaultOrder": 16, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 128, "name": "ml.m5d.8xlarge", "vcpuNum": 32 }, { "_defaultOrder": 17, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 192, "name": "ml.m5d.12xlarge", "vcpuNum": 48 }, { "_defaultOrder": 18, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 256, "name": "ml.m5d.16xlarge", "vcpuNum": 64 }, { "_defaultOrder": 19, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 384, "name": "ml.m5d.24xlarge", "vcpuNum": 96 }, { "_defaultOrder": 20, "_isFastLaunch": false, "category": "General purpose", "gpuNum": 0, "hideHardwareSpecs": true, "memoryGiB": 0, "name": "ml.geospatial.interactive", "supportedImageNames": [ "sagemaker-geospatial-v1-0" ], "vcpuNum": 0 }, { "_defaultOrder": 21, "_isFastLaunch": true, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 4, "name": "ml.c5.large", "vcpuNum": 2 }, { "_defaultOrder": 22, "_isFastLaunch": false, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 8, "name": "ml.c5.xlarge", "vcpuNum": 4 }, { "_defaultOrder": 23, "_isFastLaunch": false, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 16, "name": "ml.c5.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 24, "_isFastLaunch": false, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 32, "name": "ml.c5.4xlarge", "vcpuNum": 16 }, { "_defaultOrder": 25, "_isFastLaunch": false, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 72, "name": "ml.c5.9xlarge", "vcpuNum": 36 }, { "_defaultOrder": 26, "_isFastLaunch": false, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 96, "name": "ml.c5.12xlarge", "vcpuNum": 48 }, { "_defaultOrder": 27, "_isFastLaunch": false, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 144, "name": "ml.c5.18xlarge", "vcpuNum": 72 }, { "_defaultOrder": 28, "_isFastLaunch": false, "category": "Compute optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 192, "name": "ml.c5.24xlarge", "vcpuNum": 96 }, { "_defaultOrder": 29, "_isFastLaunch": true, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 16, "name": "ml.g4dn.xlarge", "vcpuNum": 4 }, { "_defaultOrder": 30, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 32, "name": "ml.g4dn.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 31, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 64, "name": "ml.g4dn.4xlarge", "vcpuNum": 16 }, { "_defaultOrder": 32, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 128, "name": "ml.g4dn.8xlarge", "vcpuNum": 32 }, { "_defaultOrder": 33, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 4, "hideHardwareSpecs": false, "memoryGiB": 192, "name": "ml.g4dn.12xlarge", "vcpuNum": 48 }, { "_defaultOrder": 34, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 256, "name": "ml.g4dn.16xlarge", "vcpuNum": 64 }, { "_defaultOrder": 35, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 61, "name": "ml.p3.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 36, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 4, "hideHardwareSpecs": false, "memoryGiB": 244, "name": "ml.p3.8xlarge", "vcpuNum": 32 }, { "_defaultOrder": 37, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 8, "hideHardwareSpecs": false, "memoryGiB": 488, "name": "ml.p3.16xlarge", "vcpuNum": 64 }, { "_defaultOrder": 38, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 8, "hideHardwareSpecs": false, "memoryGiB": 768, "name": "ml.p3dn.24xlarge", "vcpuNum": 96 }, { "_defaultOrder": 39, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 16, "name": "ml.r5.large", "vcpuNum": 2 }, { "_defaultOrder": 40, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 32, "name": "ml.r5.xlarge", "vcpuNum": 4 }, { "_defaultOrder": 41, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 64, "name": "ml.r5.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 42, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 128, "name": "ml.r5.4xlarge", "vcpuNum": 16 }, { "_defaultOrder": 43, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 256, "name": "ml.r5.8xlarge", "vcpuNum": 32 }, { "_defaultOrder": 44, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 384, "name": "ml.r5.12xlarge", "vcpuNum": 48 }, { "_defaultOrder": 45, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 512, "name": "ml.r5.16xlarge", "vcpuNum": 64 }, { "_defaultOrder": 46, "_isFastLaunch": false, "category": "Memory Optimized", "gpuNum": 0, "hideHardwareSpecs": false, "memoryGiB": 768, "name": "ml.r5.24xlarge", "vcpuNum": 96 }, { "_defaultOrder": 47, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 16, "name": "ml.g5.xlarge", "vcpuNum": 4 }, { "_defaultOrder": 48, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 32, "name": "ml.g5.2xlarge", "vcpuNum": 8 }, { "_defaultOrder": 49, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 64, "name": "ml.g5.4xlarge", "vcpuNum": 16 }, { "_defaultOrder": 50, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 128, "name": "ml.g5.8xlarge", "vcpuNum": 32 }, { "_defaultOrder": 51, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 1, "hideHardwareSpecs": false, "memoryGiB": 256, "name": "ml.g5.16xlarge", "vcpuNum": 64 }, { "_defaultOrder": 52, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 4, "hideHardwareSpecs": false, "memoryGiB": 192, "name": "ml.g5.12xlarge", "vcpuNum": 48 }, { "_defaultOrder": 53, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 4, "hideHardwareSpecs": false, "memoryGiB": 384, "name": "ml.g5.24xlarge", "vcpuNum": 96 }, { "_defaultOrder": 54, "_isFastLaunch": false, "category": "Accelerated computing", "gpuNum": 8, "hideHardwareSpecs": false, "memoryGiB": 768, "name": "ml.g5.48xlarge", "vcpuNum": 192 } ], "instance_type": "ml.g5.4xlarge", "kernelspec": { "display_name": "Python 3 (Data Science)", "language": "python", "name": "python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-1:081325390199:image/datascience-1.0" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.10" } }, "nbformat": 4, "nbformat_minor": 4 }