/* * Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.cloudwatchevidently.model; import java.io.Serializable; import javax.annotation.Generated; import com.amazonaws.AmazonWebServiceRequest; /** * * @see AWS API * Documentation */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class GetExperimentResultsRequest extends com.amazonaws.AmazonWebServiceRequest implements Serializable, Cloneable { /** *
* The statistic used to calculate experiment results. Currently the only valid value is mean
, which
* uses the mean of the collected values as the statistic.
*
* The date and time that the experiment ended, if it is completed. This must be no longer than 30 days after the * experiment start time. *
*/ private java.util.Date endTime; /** ** The name of the experiment to retrieve the results of. *
*/ private String experiment; /** ** The names of the experiment metrics that you want to see the results of. *
*/ private java.util.List* In seconds, the amount of time to aggregate results together. *
*/ private Long period; /** ** The name or ARN of the project that contains the experiment that you want to see the results of. *
*/ private String project; /** *
* The names of the report types that you want to see. Currently, BayesianInference
is the only valid
* value.
*
* The statistics that you want to see in the returned results. *
*
* PValue
specifies to use p-values for the results. A p-value is used in hypothesis testing to measure
* how often you are willing to make a mistake in rejecting the null hypothesis. A general practice is to reject the
* null hypothesis and declare that the results are statistically significant when the p-value is less than 0.05.
*
* ConfidenceInterval
specifies a confidence interval for the results. The confidence interval
* represents the range of values for the chosen metric that is likely to contain the true difference between the
* baseStat
of a variation and the baseline. Evidently returns the 95% confidence interval.
*
* TreatmentEffect
is the difference in the statistic specified by the baseStat
parameter
* between each variation and the default variation.
*
* BaseStat
returns the statistical values collected for the metric for each variation. The statistic
* uses the same statistic specified in the baseStat
parameter. Therefore, if baseStat
is
* mean
, this returns the mean of the values collected for each variation.
*
* The date and time that the experiment started. *
*/ private java.util.Date startTime; /** ** The names of the experiment treatments that you want to see the results for. *
*/ private java.util.List
* The statistic used to calculate experiment results. Currently the only valid value is mean
, which
* uses the mean of the collected values as the statistic.
*
mean
,
* which uses the mean of the collected values as the statistic.
* @see ExperimentBaseStat
*/
public void setBaseStat(String baseStat) {
this.baseStat = baseStat;
}
/**
*
* The statistic used to calculate experiment results. Currently the only valid value is mean
, which
* uses the mean of the collected values as the statistic.
*
mean
,
* which uses the mean of the collected values as the statistic.
* @see ExperimentBaseStat
*/
public String getBaseStat() {
return this.baseStat;
}
/**
*
* The statistic used to calculate experiment results. Currently the only valid value is mean
, which
* uses the mean of the collected values as the statistic.
*
mean
,
* which uses the mean of the collected values as the statistic.
* @return Returns a reference to this object so that method calls can be chained together.
* @see ExperimentBaseStat
*/
public GetExperimentResultsRequest withBaseStat(String baseStat) {
setBaseStat(baseStat);
return this;
}
/**
*
* The statistic used to calculate experiment results. Currently the only valid value is mean
, which
* uses the mean of the collected values as the statistic.
*
mean
,
* which uses the mean of the collected values as the statistic.
* @return Returns a reference to this object so that method calls can be chained together.
* @see ExperimentBaseStat
*/
public GetExperimentResultsRequest withBaseStat(ExperimentBaseStat baseStat) {
this.baseStat = baseStat.toString();
return this;
}
/**
* * The date and time that the experiment ended, if it is completed. This must be no longer than 30 days after the * experiment start time. *
* * @param endTime * The date and time that the experiment ended, if it is completed. This must be no longer than 30 days after * the experiment start time. */ public void setEndTime(java.util.Date endTime) { this.endTime = endTime; } /** ** The date and time that the experiment ended, if it is completed. This must be no longer than 30 days after the * experiment start time. *
* * @return The date and time that the experiment ended, if it is completed. This must be no longer than 30 days * after the experiment start time. */ public java.util.Date getEndTime() { return this.endTime; } /** ** The date and time that the experiment ended, if it is completed. This must be no longer than 30 days after the * experiment start time. *
* * @param endTime * The date and time that the experiment ended, if it is completed. This must be no longer than 30 days after * the experiment start time. * @return Returns a reference to this object so that method calls can be chained together. */ public GetExperimentResultsRequest withEndTime(java.util.Date endTime) { setEndTime(endTime); return this; } /** ** The name of the experiment to retrieve the results of. *
* * @param experiment * The name of the experiment to retrieve the results of. */ public void setExperiment(String experiment) { this.experiment = experiment; } /** ** The name of the experiment to retrieve the results of. *
* * @return The name of the experiment to retrieve the results of. */ public String getExperiment() { return this.experiment; } /** ** The name of the experiment to retrieve the results of. *
* * @param experiment * The name of the experiment to retrieve the results of. * @return Returns a reference to this object so that method calls can be chained together. */ public GetExperimentResultsRequest withExperiment(String experiment) { setExperiment(experiment); return this; } /** ** The names of the experiment metrics that you want to see the results of. *
* * @return The names of the experiment metrics that you want to see the results of. */ public java.util.List* The names of the experiment metrics that you want to see the results of. *
* * @param metricNames * The names of the experiment metrics that you want to see the results of. */ public void setMetricNames(java.util.Collection* The names of the experiment metrics that you want to see the results of. *
** NOTE: This method appends the values to the existing list (if any). Use * {@link #setMetricNames(java.util.Collection)} or {@link #withMetricNames(java.util.Collection)} if you want to * override the existing values. *
* * @param metricNames * The names of the experiment metrics that you want to see the results of. * @return Returns a reference to this object so that method calls can be chained together. */ public GetExperimentResultsRequest withMetricNames(String... metricNames) { if (this.metricNames == null) { setMetricNames(new java.util.ArrayList* The names of the experiment metrics that you want to see the results of. *
* * @param metricNames * The names of the experiment metrics that you want to see the results of. * @return Returns a reference to this object so that method calls can be chained together. */ public GetExperimentResultsRequest withMetricNames(java.util.Collection* In seconds, the amount of time to aggregate results together. *
* * @param period * In seconds, the amount of time to aggregate results together. */ public void setPeriod(Long period) { this.period = period; } /** ** In seconds, the amount of time to aggregate results together. *
* * @return In seconds, the amount of time to aggregate results together. */ public Long getPeriod() { return this.period; } /** ** In seconds, the amount of time to aggregate results together. *
* * @param period * In seconds, the amount of time to aggregate results together. * @return Returns a reference to this object so that method calls can be chained together. */ public GetExperimentResultsRequest withPeriod(Long period) { setPeriod(period); return this; } /** ** The name or ARN of the project that contains the experiment that you want to see the results of. *
* * @param project * The name or ARN of the project that contains the experiment that you want to see the results of. */ public void setProject(String project) { this.project = project; } /** ** The name or ARN of the project that contains the experiment that you want to see the results of. *
* * @return The name or ARN of the project that contains the experiment that you want to see the results of. */ public String getProject() { return this.project; } /** ** The name or ARN of the project that contains the experiment that you want to see the results of. *
* * @param project * The name or ARN of the project that contains the experiment that you want to see the results of. * @return Returns a reference to this object so that method calls can be chained together. */ public GetExperimentResultsRequest withProject(String project) { setProject(project); return this; } /** *
* The names of the report types that you want to see. Currently, BayesianInference
is the only valid
* value.
*
BayesianInference
is the only
* valid value.
* @see ExperimentReportName
*/
public java.util.List
* The names of the report types that you want to see. Currently, BayesianInference
is the only valid
* value.
*
BayesianInference
is the only
* valid value.
* @see ExperimentReportName
*/
public void setReportNames(java.util.Collection
* The names of the report types that you want to see. Currently, BayesianInference
is the only valid
* value.
*
* NOTE: This method appends the values to the existing list (if any). Use * {@link #setReportNames(java.util.Collection)} or {@link #withReportNames(java.util.Collection)} if you want to * override the existing values. *
* * @param reportNames * The names of the report types that you want to see. Currently,BayesianInference
is the only
* valid value.
* @return Returns a reference to this object so that method calls can be chained together.
* @see ExperimentReportName
*/
public GetExperimentResultsRequest withReportNames(String... reportNames) {
if (this.reportNames == null) {
setReportNames(new java.util.ArrayList
* The names of the report types that you want to see. Currently, BayesianInference
is the only valid
* value.
*
BayesianInference
is the only
* valid value.
* @return Returns a reference to this object so that method calls can be chained together.
* @see ExperimentReportName
*/
public GetExperimentResultsRequest withReportNames(java.util.Collection
* The names of the report types that you want to see. Currently, BayesianInference
is the only valid
* value.
*
BayesianInference
is the only
* valid value.
* @return Returns a reference to this object so that method calls can be chained together.
* @see ExperimentReportName
*/
public GetExperimentResultsRequest withReportNames(ExperimentReportName... reportNames) {
java.util.ArrayList* The statistics that you want to see in the returned results. *
*
* PValue
specifies to use p-values for the results. A p-value is used in hypothesis testing to measure
* how often you are willing to make a mistake in rejecting the null hypothesis. A general practice is to reject the
* null hypothesis and declare that the results are statistically significant when the p-value is less than 0.05.
*
* ConfidenceInterval
specifies a confidence interval for the results. The confidence interval
* represents the range of values for the chosen metric that is likely to contain the true difference between the
* baseStat
of a variation and the baseline. Evidently returns the 95% confidence interval.
*
* TreatmentEffect
is the difference in the statistic specified by the baseStat
parameter
* between each variation and the default variation.
*
* BaseStat
returns the statistical values collected for the metric for each variation. The statistic
* uses the same statistic specified in the baseStat
parameter. Therefore, if baseStat
is
* mean
, this returns the mean of the values collected for each variation.
*
* PValue
specifies to use p-values for the results. A p-value is used in hypothesis testing to
* measure how often you are willing to make a mistake in rejecting the null hypothesis. A general practice
* is to reject the null hypothesis and declare that the results are statistically significant when the
* p-value is less than 0.05.
*
* ConfidenceInterval
specifies a confidence interval for the results. The confidence interval
* represents the range of values for the chosen metric that is likely to contain the true difference
* between the baseStat
of a variation and the baseline. Evidently returns the 95% confidence
* interval.
*
* TreatmentEffect
is the difference in the statistic specified by the baseStat
* parameter between each variation and the default variation.
*
* BaseStat
returns the statistical values collected for the metric for each variation. The
* statistic uses the same statistic specified in the baseStat
parameter. Therefore, if
* baseStat
is mean
, this returns the mean of the values collected for each
* variation.
*
* The statistics that you want to see in the returned results. *
*
* PValue
specifies to use p-values for the results. A p-value is used in hypothesis testing to measure
* how often you are willing to make a mistake in rejecting the null hypothesis. A general practice is to reject the
* null hypothesis and declare that the results are statistically significant when the p-value is less than 0.05.
*
* ConfidenceInterval
specifies a confidence interval for the results. The confidence interval
* represents the range of values for the chosen metric that is likely to contain the true difference between the
* baseStat
of a variation and the baseline. Evidently returns the 95% confidence interval.
*
* TreatmentEffect
is the difference in the statistic specified by the baseStat
parameter
* between each variation and the default variation.
*
* BaseStat
returns the statistical values collected for the metric for each variation. The statistic
* uses the same statistic specified in the baseStat
parameter. Therefore, if baseStat
is
* mean
, this returns the mean of the values collected for each variation.
*
* PValue
specifies to use p-values for the results. A p-value is used in hypothesis testing to
* measure how often you are willing to make a mistake in rejecting the null hypothesis. A general practice
* is to reject the null hypothesis and declare that the results are statistically significant when the
* p-value is less than 0.05.
*
* ConfidenceInterval
specifies a confidence interval for the results. The confidence interval
* represents the range of values for the chosen metric that is likely to contain the true difference between
* the baseStat
of a variation and the baseline. Evidently returns the 95% confidence interval.
*
* TreatmentEffect
is the difference in the statistic specified by the baseStat
* parameter between each variation and the default variation.
*
* BaseStat
returns the statistical values collected for the metric for each variation. The
* statistic uses the same statistic specified in the baseStat
parameter. Therefore, if
* baseStat
is mean
, this returns the mean of the values collected for each
* variation.
*
* The statistics that you want to see in the returned results. *
*
* PValue
specifies to use p-values for the results. A p-value is used in hypothesis testing to measure
* how often you are willing to make a mistake in rejecting the null hypothesis. A general practice is to reject the
* null hypothesis and declare that the results are statistically significant when the p-value is less than 0.05.
*
* ConfidenceInterval
specifies a confidence interval for the results. The confidence interval
* represents the range of values for the chosen metric that is likely to contain the true difference between the
* baseStat
of a variation and the baseline. Evidently returns the 95% confidence interval.
*
* TreatmentEffect
is the difference in the statistic specified by the baseStat
parameter
* between each variation and the default variation.
*
* BaseStat
returns the statistical values collected for the metric for each variation. The statistic
* uses the same statistic specified in the baseStat
parameter. Therefore, if baseStat
is
* mean
, this returns the mean of the values collected for each variation.
*
* NOTE: This method appends the values to the existing list (if any). Use * {@link #setResultStats(java.util.Collection)} or {@link #withResultStats(java.util.Collection)} if you want to * override the existing values. *
* * @param resultStats * The statistics that you want to see in the returned results. *
* PValue
specifies to use p-values for the results. A p-value is used in hypothesis testing to
* measure how often you are willing to make a mistake in rejecting the null hypothesis. A general practice
* is to reject the null hypothesis and declare that the results are statistically significant when the
* p-value is less than 0.05.
*
* ConfidenceInterval
specifies a confidence interval for the results. The confidence interval
* represents the range of values for the chosen metric that is likely to contain the true difference between
* the baseStat
of a variation and the baseline. Evidently returns the 95% confidence interval.
*
* TreatmentEffect
is the difference in the statistic specified by the baseStat
* parameter between each variation and the default variation.
*
* BaseStat
returns the statistical values collected for the metric for each variation. The
* statistic uses the same statistic specified in the baseStat
parameter. Therefore, if
* baseStat
is mean
, this returns the mean of the values collected for each
* variation.
*
* The statistics that you want to see in the returned results. *
*
* PValue
specifies to use p-values for the results. A p-value is used in hypothesis testing to measure
* how often you are willing to make a mistake in rejecting the null hypothesis. A general practice is to reject the
* null hypothesis and declare that the results are statistically significant when the p-value is less than 0.05.
*
* ConfidenceInterval
specifies a confidence interval for the results. The confidence interval
* represents the range of values for the chosen metric that is likely to contain the true difference between the
* baseStat
of a variation and the baseline. Evidently returns the 95% confidence interval.
*
* TreatmentEffect
is the difference in the statistic specified by the baseStat
parameter
* between each variation and the default variation.
*
* BaseStat
returns the statistical values collected for the metric for each variation. The statistic
* uses the same statistic specified in the baseStat
parameter. Therefore, if baseStat
is
* mean
, this returns the mean of the values collected for each variation.
*
* PValue
specifies to use p-values for the results. A p-value is used in hypothesis testing to
* measure how often you are willing to make a mistake in rejecting the null hypothesis. A general practice
* is to reject the null hypothesis and declare that the results are statistically significant when the
* p-value is less than 0.05.
*
* ConfidenceInterval
specifies a confidence interval for the results. The confidence interval
* represents the range of values for the chosen metric that is likely to contain the true difference between
* the baseStat
of a variation and the baseline. Evidently returns the 95% confidence interval.
*
* TreatmentEffect
is the difference in the statistic specified by the baseStat
* parameter between each variation and the default variation.
*
* BaseStat
returns the statistical values collected for the metric for each variation. The
* statistic uses the same statistic specified in the baseStat
parameter. Therefore, if
* baseStat
is mean
, this returns the mean of the values collected for each
* variation.
*
* The statistics that you want to see in the returned results. *
*
* PValue
specifies to use p-values for the results. A p-value is used in hypothesis testing to measure
* how often you are willing to make a mistake in rejecting the null hypothesis. A general practice is to reject the
* null hypothesis and declare that the results are statistically significant when the p-value is less than 0.05.
*
* ConfidenceInterval
specifies a confidence interval for the results. The confidence interval
* represents the range of values for the chosen metric that is likely to contain the true difference between the
* baseStat
of a variation and the baseline. Evidently returns the 95% confidence interval.
*
* TreatmentEffect
is the difference in the statistic specified by the baseStat
parameter
* between each variation and the default variation.
*
* BaseStat
returns the statistical values collected for the metric for each variation. The statistic
* uses the same statistic specified in the baseStat
parameter. Therefore, if baseStat
is
* mean
, this returns the mean of the values collected for each variation.
*
* PValue
specifies to use p-values for the results. A p-value is used in hypothesis testing to
* measure how often you are willing to make a mistake in rejecting the null hypothesis. A general practice
* is to reject the null hypothesis and declare that the results are statistically significant when the
* p-value is less than 0.05.
*
* ConfidenceInterval
specifies a confidence interval for the results. The confidence interval
* represents the range of values for the chosen metric that is likely to contain the true difference between
* the baseStat
of a variation and the baseline. Evidently returns the 95% confidence interval.
*
* TreatmentEffect
is the difference in the statistic specified by the baseStat
* parameter between each variation and the default variation.
*
* BaseStat
returns the statistical values collected for the metric for each variation. The
* statistic uses the same statistic specified in the baseStat
parameter. Therefore, if
* baseStat
is mean
, this returns the mean of the values collected for each
* variation.
*
* The date and time that the experiment started. *
* * @param startTime * The date and time that the experiment started. */ public void setStartTime(java.util.Date startTime) { this.startTime = startTime; } /** ** The date and time that the experiment started. *
* * @return The date and time that the experiment started. */ public java.util.Date getStartTime() { return this.startTime; } /** ** The date and time that the experiment started. *
* * @param startTime * The date and time that the experiment started. * @return Returns a reference to this object so that method calls can be chained together. */ public GetExperimentResultsRequest withStartTime(java.util.Date startTime) { setStartTime(startTime); return this; } /** ** The names of the experiment treatments that you want to see the results for. *
* * @return The names of the experiment treatments that you want to see the results for. */ public java.util.List* The names of the experiment treatments that you want to see the results for. *
* * @param treatmentNames * The names of the experiment treatments that you want to see the results for. */ public void setTreatmentNames(java.util.Collection* The names of the experiment treatments that you want to see the results for. *
** NOTE: This method appends the values to the existing list (if any). Use * {@link #setTreatmentNames(java.util.Collection)} or {@link #withTreatmentNames(java.util.Collection)} if you want * to override the existing values. *
* * @param treatmentNames * The names of the experiment treatments that you want to see the results for. * @return Returns a reference to this object so that method calls can be chained together. */ public GetExperimentResultsRequest withTreatmentNames(String... treatmentNames) { if (this.treatmentNames == null) { setTreatmentNames(new java.util.ArrayList* The names of the experiment treatments that you want to see the results for. *
* * @param treatmentNames * The names of the experiment treatments that you want to see the results for. * @return Returns a reference to this object so that method calls can be chained together. */ public GetExperimentResultsRequest withTreatmentNames(java.util.Collection