/*
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
using System;
using System.Globalization;
using System.IO;
using System.Collections.Generic;
using System.Linq;
using System.Net;
using System.Text;
using System.Threading;
using System.Xml;
using System.Xml.Serialization;
using Amazon.Runtime;
using Amazon.Runtime.Internal;
using Amazon.Runtime.SharedInterfaces;
using Amazon.Util;
using Amazon.EC2.Model;
namespace Amazon.EC2.Import
{
#if BCL
///
/// Callback signature to report progress on the manifest creation and upload process.
///
/// Describes the current in-progress task
/// If not null, the percentage completion of the image file upload
public delegate void ImportProgressCallback(string message, int? percentComplete);
///
/// Helper class to import a virtual machine image or disk image to Amazon EC2
/// using Amazon S3 to hold the in-flight artifacts.
///
public class DiskImageImporter
{
const string ManifestFileVersion = "2010-11-15";
const string ManifestFileImporterName = "ec2-upload-disk-image";
const string ManifestFileImporterVersion = "1.0.0";
const string ManifestFileImporterRelease = ManifestFileVersion;
const string ManifestSuffix = "manifest.xml";
const string PartSuffix = "part";
const long OneMb = 1024 * 1024;
const long OneGb = 1024 * OneMb;
const long DefaultPartSize = 10 * OneMb;
///
/// The default number of threads that will be used to upload the parts comprising
/// the image file. Each thread will consume a 10MB buffer to process the part data.
///
public const int DefaultUploadThreads = 10;
///
/// The default validity period for the signed Amazon S3 URLs that allow Amazon EC2
/// to access the manifest.
///
public const int DefaultUrlExpirationInDays = 30;
///
/// The maximum number of threads that can be used to upload the image file parts.
///
public const int MaxUploadThreads = 30;
///
///
/// The number of threads to use to upload the image file parts to Amazon S3.
/// Each thread will consume a 10MB buffer to process the part data.
///
///
/// Default: 10. Max: 30.
///
///
public int UploadThreads
{
get { return _uploadThreads; }
set
{
if (value < 1)
throw new ArgumentOutOfRangeException("Expected value between 1 and " + MaxUploadThreads);
_uploadThreads = value > MaxUploadThreads ? MaxUploadThreads : value;
}
}
///
/// The constructed manifest describing the import artifacts.
///
public ImportManifestRoot ImportManifest { get; private set; }
///
/// The absolute path to the image file being imported.
///
public string ImageFilePath { get; private set; }
///
/// The Amazon S3 object key to the uploaded manifest file.
///
public string ManifestFileKey { get; protected set; }
///
///
/// Controls Amazon S3 clean-up of image file and import manifest artifacts
/// if an error occurs during upload.
///
///
/// By default, if an error occurs when uploading the image file to S3 the
/// importer will exit and leave what has been uploaded still present in S3.
/// This enables a 'resume mode' where the importer can be run again to
/// continue upload of the remaining parts based on inspection of the bucket
/// vs the manifest. Note that this does leave the potential for storage costs
/// if the user decides to abandon the import (the PowerShell cmdlets that use
/// this importer emit error messages detailing the resume/cleanup instructions).
///
///
/// If this option is set, the importer instead automatically cleans the bucket
/// of all uploaded content on error.
///
///
public bool RollbackOnUploadError { get; set; }
///
/// The maximum age, in days, before the Amazon S3 presigned urls
/// generated in the import manifest expire. The default value
/// is 30. Note that for AWS regions that require Signature Version 4
/// request signing, the maximum value will not exceed 7 days.
///
public int UrlExpirationInDays
{
get { return _urlExpirationInDays; }
set
{
if (value < 1)
throw new ArgumentOutOfRangeException("value", "Expected a value of 1 or greater.");
_urlExpirationInDays = value;
}
}
///
/// The name of the bucket that will hold the artifacts. An attempt will
/// be made to create the bucket if it does not already exist.
///
public string BucketName { get; private set; }
///
/// S3 object key beneath which all artifacts will be uploaded in
/// the bucket; by convention this is a GUID preceeded by any custom
/// prefix the user has specified.
///
public string ArtifactsKeyPrefix { get; private set; }
///
/// The region in which the import will take place.
///
public RegionEndpoint Region { get; private set; }
///
/// Client for S3 operations, created using the credentials and region scoping
/// we are handed on construction, or assigned from an existing S3 client instance.
///
public ICoreAmazonS3 S3Client { get; private set; }
///
/// Client for EC2 operations, created using the credentials and region scoping
/// we are handed on construction, or assigned from an existing EC2 client instance.
///
public IAmazonEC2 EC2Client { get; private set; }
private string _presignedManifestUrl;
private int _urlExpirationInDays = DefaultUrlExpirationInDays;
private int _uploadThreads = DefaultUploadThreads;
private static int _activeUploadWorkers = 0;
///
/// Constructs an image importer to upload and convert virtual machine image
/// or disk volumes for use with Amazon EC2. AWS credentials will be obtained from
/// the application configuration settings.
///
///
/// The region to which the artifacts will be uploaded and EC2 conversion performed.
///
///
/// The name of the Amazon S3 bucket that will contain the uploaded image and manifest. If the bucket
/// does not exist it will be created.
///
public DiskImageImporter(RegionEndpoint region, string bucketName)
{
Region = region;
BucketName = bucketName;
EC2Client = new AmazonEC2Client(Region);
S3Client = ServiceClientHelpers.CreateServiceFromAssembly(ServiceClientHelpers.S3_ASSEMBLY_NAME, ServiceClientHelpers.S3_SERVICE_CLASS_NAME, Region);
}
///
/// Constructs an image importer to upload and convert virtual machine image
/// or disk volumes for use with Amazon EC2. The importer will use the supplied
/// clients for communicating with Amazon S3 and Amazon EC2. Note that the clients
/// should be configured to use the same region and AWS account.
///
/// Client to use to upload artifacts to Amazon S3
/// Client to use to request image conversion in Amazon EC2
///
/// The name of the Amazon S3 bucket that will contain the uploaded image and manifest. If the bucket
/// does not exist it will be created.
///
public DiskImageImporter(ICoreAmazonS3 s3Client, IAmazonEC2 ec2Client, string bucketName)
{
S3Client = s3Client;
EC2Client = ec2Client;
BucketName = bucketName;
}
///
/// Constructs an image importer to upload and convert virtual machine image
/// or disk volumes for use with Amazon EC2.
///
///
/// The AWS credentials of the account that will own the uploaded artifacts.
///
///
/// The region to which the artifacts will be uploaded and EC2 conversion performed.
///
///
/// The name of the Amazon S3 bucket that will contain the uploaded image and manifest. If the bucket
/// does not exist it will be created.
///
public DiskImageImporter(AWSCredentials credentials, RegionEndpoint region, string bucketName)
{
Region = region;
BucketName = bucketName;
EC2Client = new AmazonEC2Client(credentials, Region);
S3Client = ServiceClientHelpers.CreateServiceFromAssembly(ServiceClientHelpers.S3_ASSEMBLY_NAME, ServiceClientHelpers.S3_SERVICE_CLASS_NAME, credentials, Region);
}
///
/// Uploads and requests import conversion of a virtual machine image file
/// to an Amazon EC2 instance.
///
/// The full path to the image file to be processed
///
/// The format of the image file (VMDK | RAW | VHD). If not specified, it will be inferred
/// from the extension of the image file.
///
///
/// The requested size (in GiB) for the resulting image volume. If not specified a suitable
/// value based on the size of the image file is used. Note that the minimum required boot
/// volume size for EC2 is 8GB.
///
///
/// Optional root-level key prefix that will be applied to the uploaded artifacts in S3.
/// The artifacts will be placed beneath this (or the root if not set) in a key composed
/// of a GUID.
///
/// Launch configuration settings for the imported instance
/// Optional callback delegate for upload progress reporting
///
/// The service response containing a ConversionTask object that can be used to monitor the progress of the
/// requested conversion.
///
public ImportInstanceResponse ImportInstance(string imageFilepath,
string fileFormat,
long? volumeSize,
string keyPrefix,
ImportLaunchConfiguration launchConfiguration,
ImportProgressCallback progressCallback)
{
Upload(imageFilepath, fileFormat, volumeSize, keyPrefix, progressCallback, false);
return StartInstanceConversion(launchConfiguration);
}
///
/// Uploads and requests import conversion of a virtual disk file to an Amazon EBS volume.
///
/// The full path to the image file to be processed
///
/// The format of the image file (VMDK | RAW | VHD). If not specified, it will be inferred
/// from the extension of the image file.
///
///
/// The requested size (in GiB) for the resulting image volume. If not specified a suitable
/// value based on the size of the image file is used.
///
///
/// Optional root-level key prefix that will be applied to the uploaded artifacts in S3.
/// The artifacts will be placed beneath this (or the root if not set) in a key composed
/// of a GUID.
///
/// The Availability Zone for the resulting Amazon EBS volume.
/// An optional description for the volume being imported.
/// Optional callback delegate for upload progress reporting
///
/// The service response containing a ConversionTask object that can be used to monitor the progress of the
/// requested conversion.
///
public ImportVolumeResponse ImportVolume(string imageFilepath,
string fileFormat,
long? volumeSize,
string keyPrefix,
string availabilityZone,
string description,
ImportProgressCallback progressCallback)
{
Upload(imageFilepath, fileFormat, volumeSize, keyPrefix, progressCallback, false);
return StartVolumeConversion(availabilityZone, description);
}
///
///
/// Constructs the import manifest for the image and then uploads it and the
/// virtual machine image or disk image to Amazon S3. The S3 key to the uploaded
/// manifest file is returned.
///
///
/// If an error occurs during upload of the image file the RetainArtifactsOnUploadError
/// property governs whether the partially uploaded content is deleted or retained. If
/// the content is retained, the import can be resumed. By default the
/// RetainArtifactsOnUploadError property is false and the content deleted, avoiding
/// storage charges for potentially orphaned content if the command is not re-run.
///
///
/// The full path to the image file to be processed
/// The S3 object key of the uploaded manifest file
public string Upload(string imageFilepath)
{
return Upload(imageFilepath, null, null, null, null, false);
}
///
///
/// Constructs the import manifest for the image and then uploads it and the
/// virtual machine image or disk image to Amazon S3. The S3 key to the uploaded
/// manifest file is returned.
///
///
/// If an error occurs during upload of the image file the RetainArtifactsOnUploadError
/// property governs whether the partially uploaded content is deleted or retained. If
/// the content is retained, the import can be resumed. By default the
/// RetainArtifactsOnUploadError property is false and the content deleted, avoiding
/// storage charges for potentially orphaned content if the command is not re-run.
///
///
/// The full path to the image file to be processed
///
/// The format of the image file (VMDK | RAW | VHD). If not specified, it will be inferred
/// from the extension of the image file.
///
///
/// The requested size (in GiB) for the resulting image volume. If not specified a suitable
/// value based on the size of the image file is used. Note that if importing a disk image that
/// will be used as an EC2 instance, the minimum required boot volume size is 8GB.
///
///
/// Optional root-level key prefix that will be applied to the uploaded artifacts in S3.
/// The artifacts will be placed beneath this (or the root if not set) in a key composed
/// of a GUID.
///
/// Optional callback delegate for upload progress reporting
///
/// Set this to true if a previous upload failed part-way through processing and RetainArtifactsOnUploadError
/// was set to true so the partially uploaded content was retained. The existing manifest will
/// be inspected and uploads will resume of the retaining content.
///
/// The S3 object key of the uploaded manifest file
public string Upload(string imageFilepath,
string fileFormat,
long? volumeSize,
string keyPrefix,
ImportProgressCallback progressCallback,
bool resumeUpload)
{
ImageFilePath = imageFilepath;
if (!resumeUpload)
{
var guidPart = Guid.NewGuid().ToString("D");
ArtifactsKeyPrefix
= string.IsNullOrEmpty(keyPrefix)
? guidPart
: string.Format(CultureInfo.InvariantCulture, "{0}/{1}", keyPrefix, guidPart);
ImportManifest = CreateImportManifest(fileFormat, volumeSize);
if (!S3Client.DoesS3BucketExist(BucketName))
S3Client.EnsureBucketExists(this.BucketName);
UploadManifest(progressCallback);
}
UploadImageParts(progressCallback);
return ManifestFileKey;
}
///
/// Constructs an importer instance for a previously uploaded manifest. The manifest is downloaded using
/// a new Amazon S3 client constructed for the specified region and deserialized, ready for use in
/// constructing the appropriate ImportInstance or ImportVolume request to Amazon EC2.
///
///
/// The AWS credentials for the account that owns or has access to the bucket containing the manifest file.
///
/// The region in which the Amazon S3 client used for download will be constructed.
/// The name of the bucket containing the manifest file.
/// The S3 object key of the manifest file.
///
/// Set this to true if a previous upload failed part-way through processing and RetainArtifactsOnUploadError
/// was set to true so the partially uploaded content was retained. The existing manifest will
/// be inspected and uploads can then resume to process the retaining content.
///
/// Initialized importer instance containing a deserialized manifest
public static DiskImageImporter FromManifest(AWSCredentials credentials,
RegionEndpoint region,
string bucketName,
string manifestFileKey,
bool resumingUpload)
{
try
{
var importer = new DiskImageImporter(credentials, region, bucketName)
{
ManifestFileKey = manifestFileKey
};
importer.DeserializeManifestFromS3();
if (resumingUpload)
importer.DetermineRemainingUploads();
return importer;
}
catch (XmlException e)
{
throw new DiskImageImporterException(DiskImportErrorStage.ManifestInspection,
"Failed to deserialize the downloaded manifest",
e);
}
catch (Exception e)
{
var msg = string.Format(CultureInfo.InvariantCulture,
"Failed to download the specified manifest from bucket {0} with key {1}",
bucketName,
manifestFileKey);
throw new DiskImageImporterException(DiskImportErrorStage.ManifestInspection, msg, e);
}
}
///
/// Initiates a conversion task to yield a new Amazon EC2 instance for a set of image file
/// artifacts uploaded previously to Amazon S3.
///
/// Launch configuration settings for the imported instance
///
/// The service response containing a ConversionTask object that can be used to monitor the progress of the
/// requested conversion.
///
public ImportInstanceResponse StartInstanceConversion(ImportLaunchConfiguration launchConfiguration)
{
if (string.IsNullOrEmpty(ManifestFileKey))
throw new InvalidOperationException("No Amazon S3 object key available; have the image artifacts been uploaded?");
var diskImage = PopulateDiskImage(ManifestFileKey, launchConfiguration.Description);
var launchSpecification = PopulateLaunchSpecificationInstance(launchConfiguration);
var request = new ImportInstanceRequest
{
Description = string.IsNullOrEmpty(launchConfiguration.Description) ? null : launchConfiguration.Description,
LaunchSpecification = launchSpecification,
Platform = string.IsNullOrEmpty(launchConfiguration.Platform) ? null : launchConfiguration.Platform
};
request.DiskImages.Add(diskImage);
// allow any exception to propagate to the caller; this allows the calling tool to
// assist the user with re-executing the command by showing the appropriate command line
// or remediation needed to avoid re-uploading the artifacts
try
{
return EC2Client.ImportInstance(request);
}
catch (Exception e)
{
throw new DiskImageImporterException(DiskImportErrorStage.SendingImportRequest, e);
}
}
///
/// Initiates a conversion task to yield a new EBS volume for a set of image file
/// artifacts uploaded previously to Amazon S3.
///
/// The Availability Zone for the resulting Amazon EBS volume.
/// An optional description for the volume being imported.
///
/// The service response containing a ConversionTask object that can be used to monitor the progress of the
/// requested conversion.
///
public ImportVolumeResponse StartVolumeConversion(string availabilityZone, string description)
{
if (string.IsNullOrEmpty(ManifestFileKey))
throw new InvalidOperationException("No Amazon S3 object key available; have the image artifacts been uploaded?");
var diskImageDetail = PopulateDiskImageDetail(ManifestFileKey);
var volumeDetail = PopulateVolumeDetail();
var request = new ImportVolumeRequest
{
AvailabilityZone = availabilityZone,
Description = string.IsNullOrEmpty(description) ? null : description,
Image = diskImageDetail,
Volume = volumeDetail
};
// allow any exception to propagate to the caller; this allows the calling tool to
// assist the user with re-executing the command by showing the appropriate command line
// or remediation needed to avoid re-uploading the artifacts
return EC2Client.ImportVolume(request);
}
///
/// Helper to populate a DiskImageDetail instance from the SDK that will be used in an ImportVolume
/// API request using data previously recorded in the import manifest.
///
/// The Amazon S3 object key of the manifest file.
/// Populated DiskImageDetail instance.
public DiskImageDetail PopulateDiskImageDetail(string manifestFileKey)
{
return new DiskImageDetail
{
ImportManifestUrl = GetPresignedManifestUrl(manifestFileKey),
Format = ImportManifest.FileFormat,
Bytes = ImportManifest.ImportData.Size
};
}
///
/// Helper to populate a DiskImage instance from the SDK that will be used in an ImportInstance
/// API request using data previously recorded in the import manifest.
///
/// The Amazon S3 object key of the manifest file.
/// Optional description for the image.
/// Populated DiskImage instance.
public DiskImage PopulateDiskImage(string manifestFileKey, string description)
{
if (ImportManifest == null)
throw new InvalidOperationException("Expected import manifest to have been created prior to call.");
var diskImageDetail = PopulateDiskImageDetail(manifestFileKey);
var volumeDetail = PopulateVolumeDetail();
return new DiskImage
{
Description = description,
Image = diskImageDetail,
Volume = volumeDetail
};
}
///
/// Helper to populate a VolumeDetail instance from the SDK that will be used in an
/// ImportInstance or ImportVolume API request using data previously recorded in the
/// import manifest.
///
/// Populated VolumeDetail instance.
public VolumeDetail PopulateVolumeDetail()
{
return new VolumeDetail
{
Size = ImportManifest.ImportData.VolumeSize
};
}
///
/// Helper to populate an SDK ImportInstanceLaunchSpecification instance that
/// will be used in an ImportInstance API request.
///
/// Settings for the new LaunchSpecificationInstance
/// Populated ImportInstanceLaunchSpecification instance.
public static ImportInstanceLaunchSpecification PopulateLaunchSpecificationInstance(ImportLaunchConfiguration config)
{
var launchSpecification = new ImportInstanceLaunchSpecification
{
Architecture = config.Architecture,
InstanceType = config.InstanceType,
Monitoring = config.EnableMonitoring,
};
if (config.SecurityGroupNames != null)
launchSpecification.GroupNames.AddRange(config.SecurityGroupNames);
if (!string.IsNullOrEmpty(config.AvailabilityZone))
launchSpecification.Placement = new Placement { AvailabilityZone = config.AvailabilityZone };
if (!string.IsNullOrEmpty(config.SubnetId))
launchSpecification.SubnetId = config.SubnetId;
if (!string.IsNullOrEmpty(config.PrivateIpAddress))
launchSpecification.PrivateIpAddress = config.PrivateIpAddress;
if (config.InstanceInitiatedShutdownBehavior != null)
launchSpecification.InstanceInitiatedShutdownBehavior = config.InstanceInitiatedShutdownBehavior;
if (!string.IsNullOrEmpty(config.AdditionalInfo))
launchSpecification.AdditionalInfo = config.AdditionalInfo;
return launchSpecification;
}
///
/// Downloads the manifest from Amazon S3 and deserializes the contents.
///
void DeserializeManifestFromS3()
{
var stream = S3Client.GetObjectStream(this.BucketName, ManifestFileKey, null);
var serializer = new XmlSerializer(typeof(ImportManifestRoot));
ImportManifest = (ImportManifestRoot)serializer.Deserialize(stream);
}
///
/// Analyzes the parts list of the manifest to determine which object parts exist
/// in S3. Used when instantiating an importer from an existing manifest of a failed
/// uploaded (RetainArtifactsOnUploadError set true to keep the partially uploaded content
/// allowing uploads to be resumed part-way through).
///
///
/// Since the manifest contains a set of presigned urls to each part we can make use of those
/// to determine whether a part has been uploaded or not.
///
void DetermineRemainingUploads()
{
foreach (var part in ImportManifest.ImportData.PartsList.PartInstances)
{
try
{
var request = WebRequest.Create(part.HeadUrl);
request.Method = "HEAD";
var response = request.GetResponse();
response.Close();
// if the HEAD request worked, log that the part can be skipped during resumption
part.UploadCompleted = true;
}
catch
{
// always clear the state on exception so we'll retry parts we failed to HEAD
// even if we thought we'd completed them successfully
part.UploadCompleted = false;
}
}
}
///
/// Returns the maximum age, in days, to use for expiry for Amazon S3 presigned
/// urls. For regions that require Signature Version 4 request signing, this is
/// limited to 7 days.
///
int UrlExpiryPeriod
{
get
{
if (Region != null &&
(Region.GetEndpointForService("s3").SignatureVersionOverride == null || Region.GetEndpointForService("s3").SignatureVersionOverride == "4")
&& UrlExpirationInDays > 7)
return 7;
return UrlExpirationInDays;
}
}
///
/// Constructs the presigned url to the import manifest in S3.
///
/// The S3 object key of the manifest file.
/// Presigned url to the manifest file object.
string GetPresignedManifestUrl(string manifestFileKey)
{
if (string.IsNullOrEmpty(_presignedManifestUrl))
_presignedManifestUrl = S3Client.GeneratePreSignedURL(this.BucketName, manifestFileKey, AWSSDKUtils.CorrectedUtcNow.AddDays(UrlExpiryPeriod), null);
return _presignedManifestUrl;
}
///
/// Constructs the S3 object key for the manifest artifact. This will combine the root key,
/// any optional prefix the user has requested be applied and the filename of the artifact,
/// plus a fixed extension.
///
/// The path to the image file
void ConstructManifestArtifactKey(string imageFilepath)
{
if (string.IsNullOrEmpty(ManifestFileKey))
ManifestFileKey = string.Format(CultureInfo.InvariantCulture,
"{0}/{1}{2}",
ArtifactsKeyPrefix,
Path.GetFileName(imageFilepath),
ManifestSuffix);
}
///
/// Serialize and upload the constructed import manifest using the supplied S3 client
/// with optional progress callback.
///
/// Optional callback to track upload progress.
void UploadManifest(ImportProgressCallback progressCallback = null)
{
if (string.IsNullOrEmpty(ManifestFileKey))
throw new InvalidOperationException("Expected ManifestFileKey to have been constructed");
using (var manifestStream = new MemoryStream())
{
if (progressCallback != null)
progressCallback("Creating import manifest...", null);
// Get as close to the xml sent by the existing ec2 cli as possible, so no namespaces,
// Unix linefeeds and a standalone instruction, plus indentation of 4 spaces. This makes
// comparison of the two manifests easier if needed for debugging.
var xmlContext = new XmlSerializer(typeof(ImportManifestRoot));
var namespaces = new XmlSerializerNamespaces();
namespaces.Add("", "");
var writerCfg = new XmlWriterSettings
{
Encoding = new UTF8Encoding(false),
NewLineChars = "\n",
NewLineHandling = NewLineHandling.Replace,
Indent = true,
IndentChars = " "
};
using (var writer = XmlWriter.Create(manifestStream, writerCfg))
{
writer.WriteProcessingInstruction("xml", "version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"");
xmlContext.Serialize(writer, ImportManifest, namespaces);
}
if (progressCallback != null)
progressCallback("Creating and uploading import manifest...", null);
// even though this is the first artifact to be uploaded and as such, if it fails,
// there is nothing to clean up catch any exception so we can tell the user there
// is no clean up required (for users concerned about storage costs).
try
{
S3Client.UploadObjectFromStream(BucketName, ManifestFileKey, manifestStream, null);
}
catch (Exception e)
{
throw new DiskImageImporterException(DiskImportErrorStage.UploadingManifest,
"Upload of the image file manifest to Amazon S3 failed.\nThere are no orphaned objects requiring manual deletion.",
e);
}
}
}
///
/// Uploads the image file to S3 as a series of distinct 10MB objects, as required by EC2,
/// using the constructed import manifest as a guide. If any part fails to upload, we clean
/// up successfully uploaded parts before returning the error to the caller.
///
/// Optional callback to track upload progress.
void UploadImageParts(ImportProgressCallback progressCallback = null)
{
var imageFileinfo = new FileInfo(ImageFilePath);
var partsList = ImportManifest.ImportData.PartsList;
var activityMessage = string.Format(CultureInfo.InvariantCulture,
"Uploading image file ({0:N0} bytes across {1:N0} parts).",
imageFileinfo.Length,
partsList.Count);
if (progressCallback != null)
progressCallback(activityMessage, 0);
// spin up the threads to handle the parts
using (var fs = new FileStream(ImageFilePath, FileMode.Open, FileAccess.Read))
{
// CountdownEvent and CancellationToken would be ideal here but they
// are not in .net 3.5
var partUploadedEvent = new AutoResetEvent(false);
for (var i = 0; i < UploadThreads; i++)
{
var workerState = new ImagePartUploadState
{
S3Client = S3Client,
BucketName = BucketName,
PartsList = partsList,
ImageFileStream = fs,
PartProcessed = partUploadedEvent
};
ThreadPool.QueueUserWorkItem(UploadImageFilePart, workerState);
}
// Rather than rely on keeping a count of completed uploads to track progress,
// which could get out of sync if two threads fire the event at the same time,
// we scan and count progress on each event signal - that's been more reliable.
// Also, to allow for a loss of network connection we also have a timeout on
// the wait so we avoid any possibility of the event not being signalled due to
// the workers aborting without signalling.
while (true)
{
partUploadedEvent.WaitOne(5000);
if (partsList.HasFailedUploads)
break;
if (progressCallback != null)
{
var processedParts = partsList.PartInstances.Count(part => part.UploadCompleted);
progressCallback(activityMessage, (int)((double)processedParts / partsList.Count * 100));
}
if (_activeUploadWorkers == 0)
break;
}
}
if (!partsList.HasFailedUploads)
{
if (progressCallback != null)
progressCallback("Image file upload completed.", null);
}
else
{
if (progressCallback != null)
{
progressCallback("One or more image file parts failed to upload"
+ (RollbackOnUploadError ? ", rolling back bucket content..." : string.Empty), null);
}
// wait until all of the workers are done before we start any possible clean up
while (_activeUploadWorkers > 0)
{
Thread.Sleep(500);
}
var msg = new StringBuilder("Upload of the image file artifacts to Amazon S3 failed.\r\n");
if (RollbackOnUploadError)
{
var allRemoved = RemoveUploadedArtifacts(ManifestFileKey, partsList.PartInstances);
if (allRemoved)
msg.Append("All content that had been uploaded has been successfully removed."
+ "\r\n"
+ "No further clean-up is required.\r\n");
else
msg.AppendFormat("Some content that had been uploaded could not be successfully removed."
+ "\r\n"
+ "Inspect the bucket content for objects with keyprefix"
+ "\r\n"
+ "'{0}'\r\nand delete them.\r\n",
ArtifactsKeyPrefix);
}
else
{
msg.Append("All content that had been uploaded successfully has been retained; the import can be resumed.\r\n");
}
throw new DiskImageImporterException(DiskImportErrorStage.UploadingImageFile, msg.ToString());
}
}
///
/// Removes the manifest and iterates through the parts list to see which parts had been completed when
/// failures occur and removes those objects to avoid storage cost to the user (if the user retries the
/// command, a different root key guid will be generated leading to potential orphans).
///
/// The object key of the manifest file.
/// The set of parts that should have been uploaded
/// True if all objects were successfully deleted, false if objects remain that the user should manually clean up
bool RemoveUploadedArtifacts(string manifestFileKey, IEnumerable partsList)
{
var allRemoved = true;
try
{
S3Client.Delete(this.BucketName, manifestFileKey, null);
}
catch (Exception)
{
allRemoved = false;
}
var keysToDelete = (from part in partsList where part.UploadCompleted select part.Key).ToList();
var keyIndex = 0;
while (keyIndex < keysToDelete.Count)
{
var batchOfKeys = new List();
while (keyIndex < keysToDelete.Count && batchOfKeys.Count <= 1000)
{
keysToDelete.Add(keysToDelete[keyIndex++]);
}
try
{
S3Client.Deletes(this.BucketName, batchOfKeys, null);
}
catch
{
allRemoved = false;
}
}
return allRemoved;
}
///
/// Threadpool delegate to process image file parts one by one and upload to
/// Amazon S3. If an error occurs, we abandon the part and continue to
/// seek new ones (even though we won't process them, only signal). This
/// allows all threads to exit before we attempt error clean up.
///
/// ImagePartUploadState instance to process
static void UploadImageFilePart(object state)
{
Interlocked.Increment(ref _activeUploadWorkers);
var uploadStateInfo = state as ImagePartUploadState;
if (uploadStateInfo == null)
throw new ArgumentException("Expected ImagePartUploadState instance to process");
var buffer = new byte[DefaultPartSize];
var part = uploadStateInfo.PartsList.FetchNextPartForUpload(uploadStateInfo.ImageFileStream, ref buffer);
var uploadSucceeded = true;
while (part != null && uploadSucceeded)
{
try
{
if (!part.UploadCompleted) // if we're resuming, skip what was uploaded OK
{
using (var ms = new MemoryStream(buffer, 0, (int)part.ByteRange.Extent))
{
// implement an additional retry mode above and beyond what the
// SDKs standard http handling performs, to account for uploads
// on networks with considerable jitter
const int maxRetries = 5;
var attempt = 1;
while (!part.UploadCompleted && attempt <= maxRetries)
{
try
{
uploadStateInfo.S3Client.UploadObjectFromStream(uploadStateInfo.BucketName, part.Key, ms,
new Dictionary {{"AutoCloseStream", false}});
part.UploadCompleted = true;
}
catch (Exception)
{
attempt++;
}
}
if (!part.UploadCompleted)
throw new DiskImageImporterException(DiskImportErrorStage.UploadingImageFile,
"Failed to upload part " + part.Index + " after " + maxRetries + " retries.");
}
}
}
catch (Exception)
{
uploadSucceeded = false;
uploadStateInfo.PartsList.RegisterUploadFailure();
}
finally
{
if (uploadSucceeded)
{
uploadStateInfo.PartProcessed.Set();
part = uploadStateInfo.PartsList.FetchNextPartForUpload(uploadStateInfo.ImageFileStream, ref buffer);
}
}
}
Interlocked.Decrement(ref _activeUploadWorkers);
}
///
/// Constructs the object hierarchy that will be serialized to a single manifest
/// file describing the import.
///
///
/// The file format of the image file. If not specified, it will be inferred from the image
/// file extension. Valid values: VMDK | RAW | VHD.
///
///
/// The requested size, in GiB, of the resulting volume in EC2. If not specified a suitable
/// value will be used based on the size of the supplied image file.
///
/// Import manifest ready for serialization and upload.
ImportManifestRoot CreateImportManifest(string fileFormat,
long? volumeSize)
{
try
{
var urlExpiration = AWSSDKUtils.CorrectedUtcNow.AddDays(UrlExpiryPeriod);
ConstructManifestArtifactKey(ImageFilePath);
var format = fileFormat;
if (string.IsNullOrEmpty(format))
{
var ext = AWSSDKUtils.GetExtension(ImageFilePath);
if (string.IsNullOrEmpty(ext))
throw new ArgumentException("The image filename does not have an exception, so file format cannot be inferred.");
format = ext.TrimStart('.');
}
var manifest = new ImportManifestRoot
{
Version = ManifestFileVersion,
FileFormat = format.ToUpper(CultureInfo.InvariantCulture),
ImporterField = new ImporterInfo
{
Name = ManifestFileImporterName,
Version = ManifestFileImporterVersion,
Release = ManifestFileImporterRelease
},
SelfDestructUrl = S3Client.GeneratePreSignedURL(
BucketName,
ManifestFileKey,
urlExpiration,
new Dictionary{{"Verb", "DELETE"}}),
ImportData = ConstructImportPartsList(volumeSize, urlExpiration)
};
return manifest;
}
catch (Exception e)
{
throw new DiskImageImporterException(DiskImportErrorStage.GeneratingManifest, e);
}
}
///
/// Walks the byte ranges of the image file to construct the logical parts we'll use to perform
/// the upload
///
/// The requested size of the volume in EC2
/// The time at which the presigned urls for the parts should expire.
/// Manifest subcomponent describing the part structure.
Import ConstructImportPartsList(long? volumeSize, DateTime urlExpiration)
{
var imageFilename = Path.GetFileName(ImageFilePath);
var imageFileinfo = new FileInfo(ImageFilePath);
var diskImageSize = imageFileinfo.Length;
var partCount = (int)Math.Ceiling((double)diskImageSize / DefaultPartSize);
var parts = new ImageFileParts { Count = partCount };
var partKeyPrefix = string.Format(CultureInfo.InvariantCulture,
"{0}/{1}.",
ArtifactsKeyPrefix,
imageFilename);
long partStartOffset = 0;
for (var i = 0; i < partCount; i++)
{
var partEndOffset = partStartOffset + DefaultPartSize - 1;
if (partEndOffset >= diskImageSize)
partEndOffset = diskImageSize - 1;
var partKey = string.Concat(partKeyPrefix, PartSuffix, i);
var part = new ImageFilePart
{
Index = i,
ByteRange = new ImageFilePartByteRange { Start = partStartOffset, End = partEndOffset },
Key = partKey,
HeadUrl = S3Client.GeneratePreSignedURL(
BucketName,
partKey,
urlExpiration,
new Dictionary { { "Verb", "HEAD" } }),
GetUrl = S3Client.GeneratePreSignedURL(
BucketName,
partKey,
urlExpiration,
new Dictionary { { "Verb", "GET" } }),
DeleteUrl = S3Client.GeneratePreSignedURL(
BucketName,
partKey,
urlExpiration,
new Dictionary { { "Verb", "DELETE" } })
};
parts.PartInstances.Add(part);
partStartOffset += DefaultPartSize;
}
return new Import
{
Size = diskImageSize,
VolumeSize = VolumeSizeFor(diskImageSize, volumeSize),
PartsList = parts
};
}
///
/// Computes the size of the volume to hold the image in EC2, with optional override
/// by the user. Observation of the EC2 CLI shows a preferred minimum size for boot
/// volumes of 8GB.
///
/// The size of the image we're processing, in bytes
/// Optional size requested by the user
static long VolumeSizeFor(long diskImageSize, long? requestedSize)
{
if (requestedSize.HasValue && requestedSize.Value > 0)
return requestedSize.Value;
var volumeSize = diskImageSize;
if (volumeSize < 8*OneGb)
volumeSize = 8*OneGb;
return (long)Math.Ceiling((double)volumeSize / (double)OneGb);
}
}
#region Manifest Serialization/Artifact Upload
///
///
/// Serialization class for processing the import and conversion of a virtual machine image to
/// a new Amazon EC2 instance, or a disk image to a new EBS volume.
///
///
/// This class represents the root of the data contained in the conversion manifest.
///
///
[XmlType("manifest")]
public class ImportManifestRoot
{
///
/// Gets and sets the Version property.
///
[XmlElement("version")]
public String Version { get; set; }
///
/// Gets and sets the FileFormat property.
///
[XmlElement("file-format")]
public string FileFormat { get; set; }
///
/// Gets and sets the ImporterField property.
///
[XmlElement("importer")]
public ImporterInfo ImporterField { get; set; }
///
/// Gets and sets the SelfDestructUrl property.
///
[XmlElement("self-destruct-url")]
public string SelfDestructUrl { get; set; }
///
/// Gets and sets the ImportData property.
///
[XmlElement("import")]
public Import ImportData { get; set; }
}
///
///
/// Serialization class for processing the import and conversion of a virtual machine image to
/// a new Amazon EC2 instance, or a disk image to a new EBS volume.
///
///
/// This class represents version and requester metadata for the conversion.
///
///
[XmlType("importer")]
public class ImporterInfo
{
///
/// Gets and sets the Name property.
///
[XmlElement("name")]
public string Name { get; set; }
///
/// Gets and sets the Version property.
///
[XmlElement("version")]
public string Version { get; set; }
///
/// Gets and sets the Release property.
///
[XmlElement("release")]
public string Release { get; set; }
}
///
///
/// Serialization class for processing the import and conversion of a virtual machine image to
/// a new Amazon EC2 instance, or a disk image to a new EBS volume.
///
///
/// This class represents the root of the data describing the data slices of the image file that are
/// to be uploaded to Amazon S3 and the requested size of the volume to hold the data after conversion.
///
///
public class Import
{
///
/// Disk image size
///
[XmlElement("size")]
public long Size { get; set; }
///
/// The EC2 volume size
///
[XmlElement("volume-size")]
public long VolumeSize { get; set; }
///
/// The list of parts.
///
[XmlElement("parts")]
public ImageFileParts PartsList { get; set; }
}
///
///
/// Serialization class for processing the import and conversion of a virtual machine image to
/// a new Amazon EC2 instance, or a disk image to a new EBS volume.
///
///
/// This class represents the list of 'slices' of the image file that is to be uploaded to
/// Amazon S3, together with helper to return the next slice for processing during the upload
/// process.
///
///
[XmlType("parts")]
public class ImageFileParts
{
///
/// The collection of parts making up the image.
///
[XmlElement(ElementName = "part")]
public List PartInstances { get; set; }
///
/// The number of parts in the image.
///
[XmlAttribute("count")]
public int Count { get; set; }
///
/// Initializes an empty collection of parts
///
public ImageFileParts()
{
PartInstances = new List();
}
///
/// Returns the next part to be uploaded, with its data loaded into the
/// supplied buffer.
///
///
/// We want to serialize read access to the image file stream so that we do not
/// 'thrash' or encounter problems with a non-seekable stream, but at the same
/// time parallelize the part uploads. Reading the data prior to returning the
/// part instance data satisfies this requirement.
///
/// The stream wrapping the image file.
/// Buffer into which the part data will be read.
///
/// The part to upload or null if the image file has now been consumed.
///
///
/// If an error occurs during a part upload, we currently cease yielding parts
/// to all workers, causing them to expire cleanly 'asap'. This strategy allows
/// us to change to a 'greedy' approach in future where we simply skip over the
/// part in error and do as much uploading as we can before exit.
///
internal ImageFilePart FetchNextPartForUpload(Stream imageFileStream, ref byte[] buffer)
{
ImageFilePart nextPart = null;
try
{
lock (_syncLock)
{
if (_failedUploads > 0) // ensures all workers quit asap
return null;
if (_nextPartForUpload < PartInstances.Count)
{
nextPart = PartInstances[_nextPartForUpload];
var offset = 0;
var bytesRemaining = (int)nextPart.ByteRange.Extent;
while (bytesRemaining > 0)
{
var bytesRead = imageFileStream.Read(buffer, offset, bytesRemaining);
if (bytesRead > 0)
{
offset += bytesRead;
bytesRemaining -= bytesRead;
}
else
{
throw new InvalidOperationException("Encountered unexpected end of stream");
}
}
_nextPartForUpload++;
}
}
}
catch
{
Interlocked.Increment(ref _failedUploads);
}
return nextPart;
}
///
/// Allows the upload threadpool workers to register that one or
/// more parts failed to upload successfully.
///
internal void RegisterUploadFailure()
{
Interlocked.Increment(ref _failedUploads);
}
///
/// True if any image file part failed to upload successfully.
///
internal bool HasFailedUploads
{
get
{
var ret = false;
lock (_syncLock)
{
ret = _failedUploads > 0;
}
return ret;
}
}
private readonly object _syncLock = new object();
private int _nextPartForUpload;
private int _failedUploads;
}
///
///
/// Serialization class for processing the import and conversion of a virtual machine image to
/// a new Amazon EC2 instance, or a disk image to a new EBS volume.
///
///
/// This class represents a single 'slice' of the image file that is to be uploaded to
/// Amazon S3.
///
///
[XmlType("part")]
public class ImageFilePart
{
///
/// The range of bytes representing the part.
///
[XmlElement("byte-range")]
public ImageFilePartByteRange ByteRange { get; set; }
///
/// The Amazon S3 object key of the part.
///
[XmlElement("key")]
public string Key { get; set; }
///
/// The Url for head requests.
///
[XmlElement("head-url")]
public string HeadUrl { get; set; }
///
/// The Url for get requests.
///
[XmlElement("get-url")]
public string GetUrl { get; set; }
///
/// The Url for delete requests.
///
[XmlElement("delete-url")]
public string DeleteUrl { get; set; }
///
/// The sequence number of the part within the whole image.
///
[XmlAttribute(AttributeName = "index")]
public int Index { get; set; }
///
/// Indicates whether the part upload completed successfully.
///
internal bool UploadCompleted { get; set; }
}
///
///
/// Serialization class for processing the import and conversion of a virtual machine image to
/// a new Amazon EC2 instance, or a disk image to a new EBS volume.
///
///
/// This class represents the offset and extent of a single 'slice' of the image file that is to
/// be uploaded to Amazon S3.
///
///
public class ImageFilePartByteRange
{
///
/// The byte offset indicating the end of the range.
///
[XmlAttribute("end")]
public long End { get; set; }
///
/// The byte offset indicating the start of the range.
///
[XmlAttribute("start")]
public long Start { get; set; }
///
/// The length of the byte range.
///
[XmlIgnore]
public long Extent { get { return End - Start + 1; } }
}
#endregion
///
/// State class used to pass data about the image part to be processed to the
/// thread pool worker.
///
internal class ImagePartUploadState
{
public ICoreAmazonS3 S3Client { get; set; }
public string BucketName { get; set; }
public ImageFileParts PartsList { get; set; }
public Stream ImageFileStream { get; set; }
public AutoResetEvent PartProcessed { get; set; }
}
///
/// Error enum for disk import
///
public enum DiskImportErrorStage
{
///
/// Error was detected during construction of the import
/// manifest/analysis of the image file
///
GeneratingManifest,
///
/// Error was detected during download/deserialization of
/// a previously uploaded manifest
///
ManifestInspection,
///
/// Error was detected during upload of the import manifest
/// to S3
///
UploadingManifest,
///
/// Error was detected during upload of the part artifacts making
/// up the image file
///
UploadingImageFile,
///
/// Error was detected during the sending of the request to EC2
/// to start conversion of the uploaded image file
///
SendingImportRequest
}
///
/// Wraps errors returned from the importer
///
[Serializable]
public class DiskImageImporterException : Exception
{
///
/// What stage the importer was at when the error occurred
///
public DiskImportErrorStage Stage { get; private set; }
internal DiskImageImporterException(DiskImportErrorStage stage, string message, Exception innerException)
: base(message, innerException)
{
Stage = stage;
}
internal DiskImageImporterException(DiskImportErrorStage stage, string message)
: this(stage, message, null)
{
}
internal DiskImageImporterException(DiskImportErrorStage stage, Exception innerException)
: this(stage, string.Empty, innerException)
{
}
///
/// Constructs a new instance of the DiskImageImporterException class with serialized data.
///
/// The that holds the serialized object data about the exception being thrown.
/// The that contains contextual information about the source or destination.
/// The parameter is null.
/// The class name is null or is zero (0).
protected DiskImageImporterException(System.Runtime.Serialization.SerializationInfo info, System.Runtime.Serialization.StreamingContext context)
: base(info, context)
{
if (info != null)
{
this.Stage = (DiskImportErrorStage)info.GetValue("Stage", typeof(DiskImportErrorStage));
}
}
///
/// Sets the with information about the exception.
///
/// The that holds the serialized object data about the exception being thrown.
/// The that contains contextual information about the source or destination.
/// The parameter is a null reference (Nothing in Visual Basic).
#if BCL35
[System.Security.Permissions.SecurityPermission(
System.Security.Permissions.SecurityAction.LinkDemand,
Flags = System.Security.Permissions.SecurityPermissionFlag.SerializationFormatter)]
#endif
[System.Security.SecurityCritical]
//// These FxCop rules are giving false-positives for this method
//[System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Security", "CA2123:OverrideLinkDemandsShouldBeIdenticalToBase")]
//[System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Security", "CA2134:MethodsMustOverrideWithConsistentTransparencyFxCopRule")]
public override void GetObjectData(System.Runtime.Serialization.SerializationInfo info, System.Runtime.Serialization.StreamingContext context)
{
base.GetObjectData(info, context);
if (info != null)
{
info.AddValue("Stage", this.Stage);
}
}
}
///
/// Settings configuring the EC2 instance launch phase after an image file has been
/// uploaded.
///
public class ImportLaunchConfiguration
{
///
/// The Amazon EC2 instance type to launch for the conversion
///
public InstanceType InstanceType { get; set; }
///
/// The instance operating system. Valid values: Windows | Linux
///
public PlatformValues Platform { get; set; }
///
/// The architecture of the instance. Valid values: i386 | x86_64
///
public ArchitectureValues Architecture { get; set; }
///
/// Optional description for the instance being imported.
///
public string Description { get; set; }
///
/// Optional availability zone to launch the instance into. If not specified one will be chosen for you
/// by EC2.
///
public string AvailabilityZone { get; set; }
///
/// One or more security group names. This is not supported for VMs imported into a VPC, which are
/// assigned the default security group. After a VM is imported into a VPC, you can change the instance
/// to use another security group.
///
public ICollection SecurityGroupNames { get; set; }
///
/// [EC2-VPC] Optionally, you can use this parameter to assign the instance a specific available IP address
/// from the IP address range of the subnet.
///
public string PrivateIpAddress { get; set; }
///
/// [EC2-VPC] The ID of the subnet to launch the instance into.
///
public string SubnetId { get; set; }
///
/// Indicates whether to enable detailed monitoring for the instance.
///
public bool EnableMonitoring { get; set; }
///
/// Indicates whether an instance stops or terminates when you initiate shutdown from the instance (using
/// the operating system command for system shutdown). Valid values: stop | terminate.
///
public ShutdownBehavior InstanceInitiatedShutdownBehavior { get; set; }
///
/// Reserved for internal use.
///
public string AdditionalInfo { get; set; }
}
#endif
}