/* * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.fsx.model; import java.io.Serializable; import javax.annotation.Generated; import com.amazonaws.protocol.StructuredPojo; import com.amazonaws.protocol.ProtocolMarshaller; /** *
* Used to specify changes to the OpenZFS configuration for the volume that you are updating. *
* * @see AWS API Documentation */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class UpdateOpenZFSVolumeConfiguration implements Serializable, Cloneable, StructuredPojo { /** *
* The amount of storage in gibibytes (GiB) to reserve from the parent volume. You can't reserve more storage than
* the parent volume has reserved. You can specify a value of -1
to unset a volume's storage capacity
* reservation.
*
* The maximum amount of storage in gibibytes (GiB) that the volume can use from its parent. You can specify a quota
* larger than the storage on the parent volume. You can specify a value of -1
to unset a volume's
* storage capacity quota.
*
* Specifies the record size of an OpenZFS volume, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, * 512, or 1024 KiB. The default is 128 KiB. Most workloads should use the default record size. Database workflows * can benefit from a smaller record size, while streaming workflows can benefit from a larger record size. For * additional guidance on when to set a custom record size, see Tips for * maximizing performance in the Amazon FSx for OpenZFS User Guide. *
*/ private Integer recordSizeKiB; /** *
* Specifies the method used to compress the data on the volume. The compression type is NONE
by
* default.
*
* NONE
- Doesn't compress the data on the volume. NONE
is the default.
*
* ZSTD
- Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm. Compared
* to LZ4, Z-Standard provides a better compression ratio to minimize on-disk storage utilization.
*
* LZ4
- Compresses the data in the volume using the LZ4 compression algorithm. Compared to Z-Standard,
* LZ4 is less compute-intensive and delivers higher write throughput speeds.
*
* The configuration object for mounting a Network File System (NFS) file system. *
*/ private java.util.List* An object specifying how much storage users or groups can use on the volume. *
*/ private java.util.List* A Boolean value indicating whether the volume is read-only. *
*/ private Boolean readOnly; /** *
* The amount of storage in gibibytes (GiB) to reserve from the parent volume. You can't reserve more storage than
* the parent volume has reserved. You can specify a value of -1
to unset a volume's storage capacity
* reservation.
*
-1
to unset a volume's
* storage capacity reservation.
*/
public void setStorageCapacityReservationGiB(Integer storageCapacityReservationGiB) {
this.storageCapacityReservationGiB = storageCapacityReservationGiB;
}
/**
*
* The amount of storage in gibibytes (GiB) to reserve from the parent volume. You can't reserve more storage than
* the parent volume has reserved. You can specify a value of -1
to unset a volume's storage capacity
* reservation.
*
-1
to unset a
* volume's storage capacity reservation.
*/
public Integer getStorageCapacityReservationGiB() {
return this.storageCapacityReservationGiB;
}
/**
*
* The amount of storage in gibibytes (GiB) to reserve from the parent volume. You can't reserve more storage than
* the parent volume has reserved. You can specify a value of -1
to unset a volume's storage capacity
* reservation.
*
-1
to unset a volume's
* storage capacity reservation.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public UpdateOpenZFSVolumeConfiguration withStorageCapacityReservationGiB(Integer storageCapacityReservationGiB) {
setStorageCapacityReservationGiB(storageCapacityReservationGiB);
return this;
}
/**
*
* The maximum amount of storage in gibibytes (GiB) that the volume can use from its parent. You can specify a quota
* larger than the storage on the parent volume. You can specify a value of -1
to unset a volume's
* storage capacity quota.
*
-1
to unset
* a volume's storage capacity quota.
*/
public void setStorageCapacityQuotaGiB(Integer storageCapacityQuotaGiB) {
this.storageCapacityQuotaGiB = storageCapacityQuotaGiB;
}
/**
*
* The maximum amount of storage in gibibytes (GiB) that the volume can use from its parent. You can specify a quota
* larger than the storage on the parent volume. You can specify a value of -1
to unset a volume's
* storage capacity quota.
*
-1
to unset
* a volume's storage capacity quota.
*/
public Integer getStorageCapacityQuotaGiB() {
return this.storageCapacityQuotaGiB;
}
/**
*
* The maximum amount of storage in gibibytes (GiB) that the volume can use from its parent. You can specify a quota
* larger than the storage on the parent volume. You can specify a value of -1
to unset a volume's
* storage capacity quota.
*
-1
to unset
* a volume's storage capacity quota.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public UpdateOpenZFSVolumeConfiguration withStorageCapacityQuotaGiB(Integer storageCapacityQuotaGiB) {
setStorageCapacityQuotaGiB(storageCapacityQuotaGiB);
return this;
}
/**
* * Specifies the record size of an OpenZFS volume, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, * 512, or 1024 KiB. The default is 128 KiB. Most workloads should use the default record size. Database workflows * can benefit from a smaller record size, while streaming workflows can benefit from a larger record size. For * additional guidance on when to set a custom record size, see Tips for * maximizing performance in the Amazon FSx for OpenZFS User Guide. *
* * @param recordSizeKiB * Specifies the record size of an OpenZFS volume, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, * 128, 256, 512, or 1024 KiB. The default is 128 KiB. Most workloads should use the default record size. * Database workflows can benefit from a smaller record size, while streaming workflows can benefit from a * larger record size. For additional guidance on when to set a custom record size, see Tips for * maximizing performance in the Amazon FSx for OpenZFS User Guide. */ public void setRecordSizeKiB(Integer recordSizeKiB) { this.recordSizeKiB = recordSizeKiB; } /** ** Specifies the record size of an OpenZFS volume, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, * 512, or 1024 KiB. The default is 128 KiB. Most workloads should use the default record size. Database workflows * can benefit from a smaller record size, while streaming workflows can benefit from a larger record size. For * additional guidance on when to set a custom record size, see Tips for * maximizing performance in the Amazon FSx for OpenZFS User Guide. *
* * @return Specifies the record size of an OpenZFS volume, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, * 128, 256, 512, or 1024 KiB. The default is 128 KiB. Most workloads should use the default record size. * Database workflows can benefit from a smaller record size, while streaming workflows can benefit from a * larger record size. For additional guidance on when to set a custom record size, see Tips * for maximizing performance in the Amazon FSx for OpenZFS User Guide. */ public Integer getRecordSizeKiB() { return this.recordSizeKiB; } /** ** Specifies the record size of an OpenZFS volume, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, 128, 256, * 512, or 1024 KiB. The default is 128 KiB. Most workloads should use the default record size. Database workflows * can benefit from a smaller record size, while streaming workflows can benefit from a larger record size. For * additional guidance on when to set a custom record size, see Tips for * maximizing performance in the Amazon FSx for OpenZFS User Guide. *
* * @param recordSizeKiB * Specifies the record size of an OpenZFS volume, in kibibytes (KiB). Valid values are 4, 8, 16, 32, 64, * 128, 256, 512, or 1024 KiB. The default is 128 KiB. Most workloads should use the default record size. * Database workflows can benefit from a smaller record size, while streaming workflows can benefit from a * larger record size. For additional guidance on when to set a custom record size, see Tips for * maximizing performance in the Amazon FSx for OpenZFS User Guide. * @return Returns a reference to this object so that method calls can be chained together. */ public UpdateOpenZFSVolumeConfiguration withRecordSizeKiB(Integer recordSizeKiB) { setRecordSizeKiB(recordSizeKiB); return this; } /** *
* Specifies the method used to compress the data on the volume. The compression type is NONE
by
* default.
*
* NONE
- Doesn't compress the data on the volume. NONE
is the default.
*
* ZSTD
- Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm. Compared
* to LZ4, Z-Standard provides a better compression ratio to minimize on-disk storage utilization.
*
* LZ4
- Compresses the data in the volume using the LZ4 compression algorithm. Compared to Z-Standard,
* LZ4 is less compute-intensive and delivers higher write throughput speeds.
*
NONE
by
* default.
*
* NONE
- Doesn't compress the data on the volume. NONE
is the default.
*
* ZSTD
- Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm.
* Compared to LZ4, Z-Standard provides a better compression ratio to minimize on-disk storage utilization.
*
* LZ4
- Compresses the data in the volume using the LZ4 compression algorithm. Compared to
* Z-Standard, LZ4 is less compute-intensive and delivers higher write throughput speeds.
*
* Specifies the method used to compress the data on the volume. The compression type is NONE
by
* default.
*
* NONE
- Doesn't compress the data on the volume. NONE
is the default.
*
* ZSTD
- Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm. Compared
* to LZ4, Z-Standard provides a better compression ratio to minimize on-disk storage utilization.
*
* LZ4
- Compresses the data in the volume using the LZ4 compression algorithm. Compared to Z-Standard,
* LZ4 is less compute-intensive and delivers higher write throughput speeds.
*
NONE
* by default.
*
* NONE
- Doesn't compress the data on the volume. NONE
is the default.
*
* ZSTD
- Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm.
* Compared to LZ4, Z-Standard provides a better compression ratio to minimize on-disk storage utilization.
*
* LZ4
- Compresses the data in the volume using the LZ4 compression algorithm. Compared to
* Z-Standard, LZ4 is less compute-intensive and delivers higher write throughput speeds.
*
* Specifies the method used to compress the data on the volume. The compression type is NONE
by
* default.
*
* NONE
- Doesn't compress the data on the volume. NONE
is the default.
*
* ZSTD
- Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm. Compared
* to LZ4, Z-Standard provides a better compression ratio to minimize on-disk storage utilization.
*
* LZ4
- Compresses the data in the volume using the LZ4 compression algorithm. Compared to Z-Standard,
* LZ4 is less compute-intensive and delivers higher write throughput speeds.
*
NONE
by
* default.
*
* NONE
- Doesn't compress the data on the volume. NONE
is the default.
*
* ZSTD
- Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm.
* Compared to LZ4, Z-Standard provides a better compression ratio to minimize on-disk storage utilization.
*
* LZ4
- Compresses the data in the volume using the LZ4 compression algorithm. Compared to
* Z-Standard, LZ4 is less compute-intensive and delivers higher write throughput speeds.
*
* Specifies the method used to compress the data on the volume. The compression type is NONE
by
* default.
*
* NONE
- Doesn't compress the data on the volume. NONE
is the default.
*
* ZSTD
- Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm. Compared
* to LZ4, Z-Standard provides a better compression ratio to minimize on-disk storage utilization.
*
* LZ4
- Compresses the data in the volume using the LZ4 compression algorithm. Compared to Z-Standard,
* LZ4 is less compute-intensive and delivers higher write throughput speeds.
*
NONE
by
* default.
*
* NONE
- Doesn't compress the data on the volume. NONE
is the default.
*
* ZSTD
- Compresses the data in the volume using the Zstandard (ZSTD) compression algorithm.
* Compared to LZ4, Z-Standard provides a better compression ratio to minimize on-disk storage utilization.
*
* LZ4
- Compresses the data in the volume using the LZ4 compression algorithm. Compared to
* Z-Standard, LZ4 is less compute-intensive and delivers higher write throughput speeds.
*
* The configuration object for mounting a Network File System (NFS) file system. *
* * @return The configuration object for mounting a Network File System (NFS) file system. */ public java.util.List* The configuration object for mounting a Network File System (NFS) file system. *
* * @param nfsExports * The configuration object for mounting a Network File System (NFS) file system. */ public void setNfsExports(java.util.Collection* The configuration object for mounting a Network File System (NFS) file system. *
** NOTE: This method appends the values to the existing list (if any). Use * {@link #setNfsExports(java.util.Collection)} or {@link #withNfsExports(java.util.Collection)} if you want to * override the existing values. *
* * @param nfsExports * The configuration object for mounting a Network File System (NFS) file system. * @return Returns a reference to this object so that method calls can be chained together. */ public UpdateOpenZFSVolumeConfiguration withNfsExports(OpenZFSNfsExport... nfsExports) { if (this.nfsExports == null) { setNfsExports(new java.util.ArrayList* The configuration object for mounting a Network File System (NFS) file system. *
* * @param nfsExports * The configuration object for mounting a Network File System (NFS) file system. * @return Returns a reference to this object so that method calls can be chained together. */ public UpdateOpenZFSVolumeConfiguration withNfsExports(java.util.Collection* An object specifying how much storage users or groups can use on the volume. *
* * @return An object specifying how much storage users or groups can use on the volume. */ public java.util.List* An object specifying how much storage users or groups can use on the volume. *
* * @param userAndGroupQuotas * An object specifying how much storage users or groups can use on the volume. */ public void setUserAndGroupQuotas(java.util.Collection* An object specifying how much storage users or groups can use on the volume. *
** NOTE: This method appends the values to the existing list (if any). Use * {@link #setUserAndGroupQuotas(java.util.Collection)} or {@link #withUserAndGroupQuotas(java.util.Collection)} if * you want to override the existing values. *
* * @param userAndGroupQuotas * An object specifying how much storage users or groups can use on the volume. * @return Returns a reference to this object so that method calls can be chained together. */ public UpdateOpenZFSVolumeConfiguration withUserAndGroupQuotas(OpenZFSUserOrGroupQuota... userAndGroupQuotas) { if (this.userAndGroupQuotas == null) { setUserAndGroupQuotas(new java.util.ArrayList* An object specifying how much storage users or groups can use on the volume. *
* * @param userAndGroupQuotas * An object specifying how much storage users or groups can use on the volume. * @return Returns a reference to this object so that method calls can be chained together. */ public UpdateOpenZFSVolumeConfiguration withUserAndGroupQuotas(java.util.Collection* A Boolean value indicating whether the volume is read-only. *
* * @param readOnly * A Boolean value indicating whether the volume is read-only. */ public void setReadOnly(Boolean readOnly) { this.readOnly = readOnly; } /** ** A Boolean value indicating whether the volume is read-only. *
* * @return A Boolean value indicating whether the volume is read-only. */ public Boolean getReadOnly() { return this.readOnly; } /** ** A Boolean value indicating whether the volume is read-only. *
* * @param readOnly * A Boolean value indicating whether the volume is read-only. * @return Returns a reference to this object so that method calls can be chained together. */ public UpdateOpenZFSVolumeConfiguration withReadOnly(Boolean readOnly) { setReadOnly(readOnly); return this; } /** ** A Boolean value indicating whether the volume is read-only. *
* * @return A Boolean value indicating whether the volume is read-only. */ public Boolean isReadOnly() { return this.readOnly; } /** * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be * redacted from this string using a placeholder value. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getStorageCapacityReservationGiB() != null) sb.append("StorageCapacityReservationGiB: ").append(getStorageCapacityReservationGiB()).append(","); if (getStorageCapacityQuotaGiB() != null) sb.append("StorageCapacityQuotaGiB: ").append(getStorageCapacityQuotaGiB()).append(","); if (getRecordSizeKiB() != null) sb.append("RecordSizeKiB: ").append(getRecordSizeKiB()).append(","); if (getDataCompressionType() != null) sb.append("DataCompressionType: ").append(getDataCompressionType()).append(","); if (getNfsExports() != null) sb.append("NfsExports: ").append(getNfsExports()).append(","); if (getUserAndGroupQuotas() != null) sb.append("UserAndGroupQuotas: ").append(getUserAndGroupQuotas()).append(","); if (getReadOnly() != null) sb.append("ReadOnly: ").append(getReadOnly()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof UpdateOpenZFSVolumeConfiguration == false) return false; UpdateOpenZFSVolumeConfiguration other = (UpdateOpenZFSVolumeConfiguration) obj; if (other.getStorageCapacityReservationGiB() == null ^ this.getStorageCapacityReservationGiB() == null) return false; if (other.getStorageCapacityReservationGiB() != null && other.getStorageCapacityReservationGiB().equals(this.getStorageCapacityReservationGiB()) == false) return false; if (other.getStorageCapacityQuotaGiB() == null ^ this.getStorageCapacityQuotaGiB() == null) return false; if (other.getStorageCapacityQuotaGiB() != null && other.getStorageCapacityQuotaGiB().equals(this.getStorageCapacityQuotaGiB()) == false) return false; if (other.getRecordSizeKiB() == null ^ this.getRecordSizeKiB() == null) return false; if (other.getRecordSizeKiB() != null && other.getRecordSizeKiB().equals(this.getRecordSizeKiB()) == false) return false; if (other.getDataCompressionType() == null ^ this.getDataCompressionType() == null) return false; if (other.getDataCompressionType() != null && other.getDataCompressionType().equals(this.getDataCompressionType()) == false) return false; if (other.getNfsExports() == null ^ this.getNfsExports() == null) return false; if (other.getNfsExports() != null && other.getNfsExports().equals(this.getNfsExports()) == false) return false; if (other.getUserAndGroupQuotas() == null ^ this.getUserAndGroupQuotas() == null) return false; if (other.getUserAndGroupQuotas() != null && other.getUserAndGroupQuotas().equals(this.getUserAndGroupQuotas()) == false) return false; if (other.getReadOnly() == null ^ this.getReadOnly() == null) return false; if (other.getReadOnly() != null && other.getReadOnly().equals(this.getReadOnly()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getStorageCapacityReservationGiB() == null) ? 0 : getStorageCapacityReservationGiB().hashCode()); hashCode = prime * hashCode + ((getStorageCapacityQuotaGiB() == null) ? 0 : getStorageCapacityQuotaGiB().hashCode()); hashCode = prime * hashCode + ((getRecordSizeKiB() == null) ? 0 : getRecordSizeKiB().hashCode()); hashCode = prime * hashCode + ((getDataCompressionType() == null) ? 0 : getDataCompressionType().hashCode()); hashCode = prime * hashCode + ((getNfsExports() == null) ? 0 : getNfsExports().hashCode()); hashCode = prime * hashCode + ((getUserAndGroupQuotas() == null) ? 0 : getUserAndGroupQuotas().hashCode()); hashCode = prime * hashCode + ((getReadOnly() == null) ? 0 : getReadOnly().hashCode()); return hashCode; } @Override public UpdateOpenZFSVolumeConfiguration clone() { try { return (UpdateOpenZFSVolumeConfiguration) super.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e); } } @com.amazonaws.annotation.SdkInternalApi @Override public void marshall(ProtocolMarshaller protocolMarshaller) { com.amazonaws.services.fsx.model.transform.UpdateOpenZFSVolumeConfigurationMarshaller.getInstance().marshall(this, protocolMarshaller); } }