/* * SPDX-License-Identifier: Apache-2.0 * * The OpenSearch Contributors require contributions made to * this file be licensed under the Apache-2.0 license or a * compatible open source license. */ /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Modifications Copyright OpenSearch Contributors. See * GitHub history for details. */ package org.opensearch.index.translog; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.Version; import org.opensearch.common.Nullable; import org.opensearch.common.UUIDs; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.common.io.stream.ReleasableBytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.lucene.uid.Versions; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.concurrent.ReleasableLock; import org.opensearch.common.util.io.IOUtils; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; import org.opensearch.core.common.Strings; import org.opensearch.index.IndexSettings; import org.opensearch.index.VersionType; import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.MissingHistoryOperationsException; import org.opensearch.index.mapper.IdFieldMapper; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.Uid; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.AbstractIndexShardComponent; import org.opensearch.index.shard.IndexShardComponent; import org.opensearch.core.index.shard.ShardId; import java.io.Closeable; import java.io.EOFException; import java.io.IOException; import java.nio.channels.FileChannel; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; import java.nio.file.StandardOpenOption; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Objects; import java.util.Optional; import java.util.OptionalLong; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.function.LongConsumer; import java.util.function.LongSupplier; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; import java.util.stream.Stream; import static org.opensearch.index.translog.TranslogConfig.EMPTY_TRANSLOG_BUFFER_SIZE; /** * A Translog is a per index shard component that records all non-committed index operations in a durable manner. * In OpenSearch there is one Translog instance per {@link org.opensearch.index.engine.InternalEngine}. * Additionally, the engine also records a {@link #TRANSLOG_UUID_KEY} with each commit to ensure a strong * association between the lucene index an the transaction log file. This UUID is used to prevent accidental recovery from a transaction * log that belongs to a * different engine. *
* Each Translog has only one translog file open for writes at any time referenced by a translog generation ID. This ID is written to a * {@code translog.ckp} file that is designed to fit in a single disk block such that a write of the file is atomic. The checkpoint file * is written on each fsync operation of the translog and records the number of operations written, the current translog's file generation, * its fsynced offset in bytes, and other important statistics. *
** When the current translog file reaches a certain size ({@link IndexSettings#INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING}, or when * a clear separation between old and new operations (upon change in primary term), the current file is reopened for read only and a new * write only file is created. Any non-current, read only translog file always has a {@code translog-${gen}.ckp} associated with it * which is an fsynced copy of its last {@code translog.ckp} such that in disaster recovery last fsynced offsets, number of * operation etc. are still preserved. *
* * @opensearch.internal */ public abstract class Translog extends AbstractIndexShardComponent implements IndexShardComponent, Closeable { /* * TODO * - we might need something like a deletion policy to hold on to more than one translog eventually (I think sequence IDs needs this) * but we can refactor as we go * - use a simple BufferedOutputStream to write stuff and fold BufferedTranslogWriter into it's super class... the tricky bit is we * need to be able to do random access reads even from the buffer * - we need random exception on the FileSystem API tests for all this. * - we need to page align the last write before we sync, we can take advantage of ensureSynced for this since we might have already * fsynced far enough */ public static final String TRANSLOG_UUID_KEY = "translog_uuid"; public static final String TRANSLOG_FILE_PREFIX = "translog-"; public static final String TRANSLOG_FILE_SUFFIX = ".tlog"; public static final String CHECKPOINT_SUFFIX = ".ckp"; public static final String CHECKPOINT_FILE_NAME = "translog" + CHECKPOINT_SUFFIX; static final Pattern PARSE_STRICT_ID_PATTERN = Pattern.compile("^" + TRANSLOG_FILE_PREFIX + "(\\d+)(\\.tlog)$"); public static final int DEFAULT_HEADER_SIZE_IN_BYTES = TranslogHeader.headerSizeInBytes(UUIDs.randomBase64UUID()); // the list of translog readers is guaranteed to be in order of translog generation protected final Listnull
.
*/
public Operation readOperation(Location location) throws IOException {
try (ReleasableLock ignored = readLock.acquire()) {
ensureOpen();
if (location.generation < getMinFileGeneration()) {
return null;
}
if (current.generation == location.generation) {
// no need to fsync here the read operation will ensure that buffers are written to disk
// if they are still in RAM and we are reading onto that position
return current.read(location);
} else {
// read backwards - it's likely we need to read on that is recent
for (int i = readers.size() - 1; i >= 0; i--) {
TranslogReader translogReader = readers.get(i);
if (translogReader.generation == location.generation) {
return translogReader.read(location);
}
}
}
} catch (final Exception ex) {
closeOnTragicEvent(ex);
throw ex;
}
return null;
}
private Snapshot newMultiSnapshot(TranslogSnapshot[] snapshots) throws IOException {
final Closeable onClose;
if (snapshots.length == 0) {
onClose = () -> {};
} else {
assert Arrays.stream(snapshots).map(BaseTranslogReader::getGeneration).min(Long::compareTo).get() == snapshots[0].generation
: "first reader generation of " + snapshots + " is not the smallest";
onClose = acquireTranslogGenFromDeletionPolicy(snapshots[0].generation);
}
boolean success = false;
try {
Snapshot result = new MultiSnapshot(snapshots, onClose);
success = true;
return result;
} finally {
if (success == false) {
onClose.close();
}
}
}
private Stream extends BaseTranslogReader> readersAboveMinSeqNo(long minSeqNo) {
assert readLock.isHeldByCurrentThread() || writeLock.isHeldByCurrentThread()
: "callers of readersAboveMinSeqNo must hold a lock: readLock ["
+ readLock.isHeldByCurrentThread()
+ "], writeLock ["
+ readLock.isHeldByCurrentThread()
+ "]";
return Stream.concat(readers.stream(), Stream.of(current)).filter(reader -> minSeqNo <= reader.getCheckpoint().maxEffectiveSeqNo());
}
/**
* Acquires a lock on the translog files, preventing them from being trimmed
*/
public Closeable acquireRetentionLock() {
try (ReleasableLock lock = readLock.acquire()) {
ensureOpen();
final long viewGen = getMinFileGeneration();
return acquireTranslogGenFromDeletionPolicy(viewGen);
}
}
private Closeable acquireTranslogGenFromDeletionPolicy(long viewGen) {
Releasable toClose = deletionPolicy.acquireTranslogGen(viewGen);
return () -> {
try {
toClose.close();
} finally {
trimUnreferencedReaders();
closeFilesIfNoPendingRetentionLocks();
}
};
}
/**
* Sync's the translog.
*/
public void sync() throws IOException {
try (ReleasableLock lock = readLock.acquire()) {
if (closed.get() == false) {
current.sync();
}
} catch (final Exception ex) {
closeOnTragicEvent(ex);
throw ex;
}
}
/**
* Returns true
if an fsync is required to ensure durability of the translogs operations or it's metadata.
*/
public boolean syncNeeded() {
try (ReleasableLock lock = readLock.acquire()) {
return current.syncNeeded();
}
}
/** package private for testing */
public static String getFilename(long generation) {
return TRANSLOG_FILE_PREFIX + generation + TRANSLOG_FILE_SUFFIX;
}
public static String getCommitCheckpointFileName(long generation) {
return TRANSLOG_FILE_PREFIX + generation + CHECKPOINT_SUFFIX;
}
/**
* Trims translog for terms of files below belowTerm
and seq# above aboveSeqNo
.
* Effectively it moves max visible seq# {@link Checkpoint#trimmedAboveSeqNo} therefore {@link TranslogSnapshot} skips those operations.
*/
public void trimOperations(long belowTerm, long aboveSeqNo) throws IOException {
assert aboveSeqNo >= SequenceNumbers.NO_OPS_PERFORMED : "aboveSeqNo has to a valid sequence number";
try (ReleasableLock lock = writeLock.acquire()) {
ensureOpen();
if (current.getPrimaryTerm() < belowTerm) {
throw new IllegalArgumentException(
"Trimming the translog can only be done for terms lower than the current one. "
+ "Trim requested for term [ "
+ belowTerm
+ " ] , current is [ "
+ current.getPrimaryTerm()
+ " ]"
);
}
// we assume that the current translog generation doesn't have trimmable ops. Verify that.
assert current.assertNoSeqAbove(belowTerm, aboveSeqNo);
// update all existed ones (if it is necessary) as checkpoint and reader are immutable
final Listtrue
iff this call caused an actual sync operation otherwise false
*/
public abstract boolean ensureSynced(Location location) throws IOException;
/**
* Ensures that all locations in the given stream have been synced / written to the underlying storage.
* This method allows for internal optimization to minimize the amount of fsync operations if multiple
* locations must be synced.
*
* @return Returns true
iff this call caused an actual sync operation otherwise false
*/
public boolean ensureSynced(Streamnull
if we reached the end.
*/
Translog.Operation next() throws IOException;
}
/**
* A filtered snapshot consisting of only operations whose sequence numbers are in the given range
* between {@code fromSeqNo} (inclusive) and {@code toSeqNo} (inclusive). This filtered snapshot
* shares the same underlying resources with the {@code delegate} snapshot, therefore we should not
* use the {@code delegate} after passing it to this filtered snapshot.
*
* @opensearch.internal
*/
private static final class SeqNoFilterSnapshot implements Snapshot {
private final Snapshot delegate;
private int filteredOpsCount;
private int opsCount;
private boolean requiredFullRange;
private final long fromSeqNo; // inclusive
private final long toSeqNo; // inclusive
SeqNoFilterSnapshot(Snapshot delegate, long fromSeqNo, long toSeqNo, boolean requiredFullRange) {
assert fromSeqNo <= toSeqNo : "from_seq_no[" + fromSeqNo + "] > to_seq_no[" + toSeqNo + "]";
this.delegate = delegate;
this.fromSeqNo = fromSeqNo;
this.toSeqNo = toSeqNo;
this.requiredFullRange = requiredFullRange;
}
@Override
public int totalOperations() {
return delegate.totalOperations();
}
@Override
public int skippedOperations() {
return filteredOpsCount + delegate.skippedOperations();
}
@Override
public Operation next() throws IOException, MissingHistoryOperationsException {
Translog.Operation op;
while ((op = delegate.next()) != null) {
if (fromSeqNo <= op.seqNo() && op.seqNo() <= toSeqNo) {
opsCount++;
return op;
} else {
filteredOpsCount++;
}
}
if (requiredFullRange && (toSeqNo - fromSeqNo + 1) != opsCount) {
throw new MissingHistoryOperationsException(
"Not all operations between from_seqno [" + fromSeqNo + "] " + "and to_seqno [" + toSeqNo + "] found"
);
}
return null;
}
@Override
public void close() throws IOException {
delegate.close();
}
}
/**
* A generic interface representing an operation performed on the transaction log.
* Each is associated with a type.
*
* @opensearch.internal
*/
public interface Operation {
/**
* The type of operation
*
* @opensearch.internal
*/
enum Type {
@Deprecated
CREATE((byte) 1),
INDEX((byte) 2),
DELETE((byte) 3),
NO_OP((byte) 4);
private final byte id;
Type(byte id) {
this.id = id;
}
public byte id() {
return this.id;
}
public static Type fromId(byte id) {
switch (id) {
case 1:
return CREATE;
case 2:
return INDEX;
case 3:
return DELETE;
case 4:
return NO_OP;
default:
throw new IllegalArgumentException("no type mapped for [" + id + "]");
}
}
}
Type opType();
long estimateSize();
Source getSource();
long seqNo();
long primaryTerm();
/**
* Reads the type and the operation from the given stream. The operation must be written with
* {@link Operation#writeOperation(StreamOutput, Operation)}
*/
static Operation readOperation(final StreamInput input) throws IOException {
final Translog.Operation.Type type = Translog.Operation.Type.fromId(input.readByte());
switch (type) {
case CREATE:
// the de-serialization logic in Index was identical to that of Create when create was deprecated
case INDEX:
return new Index(input);
case DELETE:
return new Delete(input);
case NO_OP:
return new NoOp(input);
default:
throw new AssertionError("no case for [" + type + "]");
}
}
/**
* Writes the type and translog operation to the given stream
*/
static void writeOperation(final StreamOutput output, final Operation operation) throws IOException {
output.writeByte(operation.opType().id());
switch (operation.opType()) {
case CREATE:
// the serialization logic in Index was identical to that of Create when create was deprecated
case INDEX:
((Index) operation).write(output);
break;
case DELETE:
((Delete) operation).write(output);
break;
case NO_OP:
((NoOp) operation).write(output);
break;
default:
throw new AssertionError("no case for [" + operation.opType() + "]");
}
}
}
/**
* The source in the translog
*
* @opensearch.internal
*/
public static class Source {
public final BytesReference source;
public final String routing;
public Source(BytesReference source, String routing) {
this.source = source;
this.routing = routing;
}
}
/**
* Indexing operation
*
* @opensearch.internal
*/
public static class Index implements Operation {
public static final int FORMAT_6_0 = 8; // since 6.0.0
public static final int FORMAT_NO_PARENT = FORMAT_6_0 + 1; // since 7.0
public static final int FORMAT_NO_VERSION_TYPE = FORMAT_NO_PARENT + 1;
public static final int FORMAT_NO_DOC_TYPE = FORMAT_NO_VERSION_TYPE + 1;
public static final int SERIALIZATION_FORMAT = FORMAT_NO_DOC_TYPE;
private final String id;
private final long autoGeneratedIdTimestamp;
private final long seqNo;
private final long primaryTerm;
private final long version;
private final BytesReference source;
private final String routing;
private Index(final StreamInput in) throws IOException {
final int format = in.readVInt(); // SERIALIZATION_FORMAT
assert format >= FORMAT_6_0 : "format was: " + format;
id = in.readString();
if (format < FORMAT_NO_DOC_TYPE) {
in.readString();
// can't assert that this is _doc because pre 2.0 indexes can have any name for a type
}
source = in.readBytesReference();
routing = in.readOptionalString();
if (format < FORMAT_NO_PARENT) {
in.readOptionalString(); // _parent
}
this.version = in.readLong();
if (format < FORMAT_NO_VERSION_TYPE) {
in.readByte(); // _version_type
}
this.autoGeneratedIdTimestamp = in.readLong();
seqNo = in.readLong();
primaryTerm = in.readLong();
}
public Index(Engine.Index index, Engine.IndexResult indexResult) {
this.id = index.id();
this.source = index.source();
this.routing = index.routing();
this.seqNo = indexResult.getSeqNo();
this.primaryTerm = index.primaryTerm();
this.version = indexResult.getVersion();
this.autoGeneratedIdTimestamp = index.getAutoGeneratedIdTimestamp();
}
public Index(String id, long seqNo, long primaryTerm, byte[] source) {
this(id, seqNo, primaryTerm, Versions.MATCH_ANY, source, null, -1);
}
public Index(String id, long seqNo, long primaryTerm, long version, byte[] source, String routing, long autoGeneratedIdTimestamp) {
this.id = id;
this.source = new BytesArray(source);
this.seqNo = seqNo;
this.primaryTerm = primaryTerm;
this.version = version;
this.routing = routing;
this.autoGeneratedIdTimestamp = autoGeneratedIdTimestamp;
}
@Override
public Type opType() {
return Type.INDEX;
}
@Override
public long estimateSize() {
return (2 * id.length()) + source.length() + (routing != null ? 2 * routing.length() : 0) + (4 * Long.BYTES); // timestamp,
// seq_no,
// primary_term,
// and version
}
public String id() {
return this.id;
}
public String routing() {
return this.routing;
}
public BytesReference source() {
return this.source;
}
@Override
public long seqNo() {
return seqNo;
}
@Override
public long primaryTerm() {
return primaryTerm;
}
public long version() {
return this.version;
}
@Override
public Source getSource() {
return new Source(source, routing);
}
private void write(final StreamOutput out) throws IOException {
final int format = out.getVersion().onOrAfter(Version.V_2_0_0) ? SERIALIZATION_FORMAT : FORMAT_NO_VERSION_TYPE;
out.writeVInt(format);
out.writeString(id);
if (format < FORMAT_NO_DOC_TYPE) {
out.writeString(MapperService.SINGLE_MAPPING_NAME);
}
out.writeBytesReference(source);
out.writeOptionalString(routing);
if (format < FORMAT_NO_PARENT) {
out.writeOptionalString(null); // _parent
}
out.writeLong(version);
if (format < FORMAT_NO_VERSION_TYPE) {
out.writeByte(VersionType.EXTERNAL.getValue());
}
out.writeLong(autoGeneratedIdTimestamp);
out.writeLong(seqNo);
out.writeLong(primaryTerm);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Index index = (Index) o;
if (version != index.version
|| seqNo != index.seqNo
|| primaryTerm != index.primaryTerm
|| id.equals(index.id) == false
|| autoGeneratedIdTimestamp != index.autoGeneratedIdTimestamp
|| source.equals(index.source) == false) {
return false;
}
if (routing != null ? !routing.equals(index.routing) : index.routing != null) {
return false;
}
return true;
}
@Override
public int hashCode() {
int result = id.hashCode();
result = 31 * result + Long.hashCode(seqNo);
result = 31 * result + Long.hashCode(primaryTerm);
result = 31 * result + Long.hashCode(version);
result = 31 * result + source.hashCode();
result = 31 * result + (routing != null ? routing.hashCode() : 0);
result = 31 * result + Long.hashCode(autoGeneratedIdTimestamp);
return result;
}
@Override
public String toString() {
return "Index{"
+ "id='"
+ id
+ '\''
+ ", seqNo="
+ seqNo
+ ", primaryTerm="
+ primaryTerm
+ ", version="
+ version
+ ", autoGeneratedIdTimestamp="
+ autoGeneratedIdTimestamp
+ '}';
}
public long getAutoGeneratedIdTimestamp() {
return autoGeneratedIdTimestamp;
}
}
/**
* Delete operation
*
* @opensearch.internal
*/
public static class Delete implements Operation {
private static final int FORMAT_6_0 = 4; // 6.0 - *
public static final int FORMAT_NO_PARENT = FORMAT_6_0 + 1; // since 7.0
public static final int FORMAT_NO_VERSION_TYPE = FORMAT_NO_PARENT + 1;
public static final int FORMAT_NO_DOC_TYPE = FORMAT_NO_VERSION_TYPE + 1;
public static final int SERIALIZATION_FORMAT = FORMAT_NO_DOC_TYPE;
private final String id;
private final long seqNo;
private final long primaryTerm;
private final long version;
private Delete(final StreamInput in) throws IOException {
final int format = in.readVInt();// SERIALIZATION_FORMAT
assert format >= FORMAT_6_0 : "format was: " + format;
if (format < FORMAT_NO_DOC_TYPE) {
in.readString();
// Can't assert that this is _doc because pre 2.0 indexes can have any name for a type
}
id = in.readString();
if (format < FORMAT_NO_DOC_TYPE) {
final String docType = in.readString();
assert docType.equals(IdFieldMapper.NAME) : docType + " != " + IdFieldMapper.NAME;
in.readBytesRef(); // uid
}
this.version = in.readLong();
if (format < FORMAT_NO_VERSION_TYPE) {
in.readByte(); // versionType
}
seqNo = in.readLong();
primaryTerm = in.readLong();
}
public Delete(Engine.Delete delete, Engine.DeleteResult deleteResult) {
this(delete.id(), deleteResult.getSeqNo(), delete.primaryTerm(), deleteResult.getVersion());
}
/** utility for testing */
public Delete(String id, long seqNo, long primaryTerm) {
this(id, seqNo, primaryTerm, Versions.MATCH_ANY);
}
public Delete(String id, long seqNo, long primaryTerm, long version) {
this.id = Objects.requireNonNull(id);
this.seqNo = seqNo;
this.primaryTerm = primaryTerm;
this.version = version;
}
@Override
public Type opType() {
return Type.DELETE;
}
@Override
public long estimateSize() {
return (id.length() * 2) + (3 * Long.BYTES); // seq_no, primary_term,
// and version;
}
public String id() {
return id;
}
@Override
public long seqNo() {
return seqNo;
}
@Override
public long primaryTerm() {
return primaryTerm;
}
public long version() {
return this.version;
}
@Override
public Source getSource() {
throw new IllegalStateException("trying to read doc source from delete operation");
}
private void write(final StreamOutput out) throws IOException {
final int format = out.getVersion().onOrAfter(Version.V_2_0_0) ? SERIALIZATION_FORMAT : FORMAT_NO_VERSION_TYPE;
out.writeVInt(format);
if (format < FORMAT_NO_DOC_TYPE) {
out.writeString(MapperService.SINGLE_MAPPING_NAME);
}
out.writeString(id);
if (format < FORMAT_NO_DOC_TYPE) {
out.writeString(IdFieldMapper.NAME);
out.writeBytesRef(Uid.encodeId(id));
}
out.writeLong(version);
if (format < FORMAT_NO_VERSION_TYPE) {
out.writeByte(VersionType.EXTERNAL.getValue());
}
out.writeLong(seqNo);
out.writeLong(primaryTerm);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Delete delete = (Delete) o;
return version == delete.version && seqNo == delete.seqNo && primaryTerm == delete.primaryTerm;
}
@Override
public int hashCode() {
int result = Long.hashCode(seqNo);
result = 31 * result + Long.hashCode(primaryTerm);
result = 31 * result + Long.hashCode(version);
return result;
}
@Override
public String toString() {
return "Delete{" + "seqNo=" + seqNo + ", primaryTerm=" + primaryTerm + ", version=" + version + '}';
}
}
/**
* Translog no op
*
* @opensearch.internal
*/
public static class NoOp implements Operation {
private final long seqNo;
private final long primaryTerm;
private final String reason;
@Override
public long seqNo() {
return seqNo;
}
@Override
public long primaryTerm() {
return primaryTerm;
}
public String reason() {
return reason;
}
private NoOp(final StreamInput in) throws IOException {
seqNo = in.readLong();
primaryTerm = in.readLong();
reason = in.readString();
}
public NoOp(final long seqNo, final long primaryTerm, final String reason) {
assert seqNo > SequenceNumbers.NO_OPS_PERFORMED;
assert primaryTerm >= 0;
assert reason != null;
this.seqNo = seqNo;
this.primaryTerm = primaryTerm;
this.reason = reason;
}
private void write(final StreamOutput out) throws IOException {
out.writeLong(seqNo);
out.writeLong(primaryTerm);
out.writeString(reason);
}
@Override
public Type opType() {
return Type.NO_OP;
}
@Override
public long estimateSize() {
return 2 * reason.length() + 2 * Long.BYTES;
}
@Override
public Source getSource() {
throw new UnsupportedOperationException("source does not exist for a no-op");
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
final NoOp that = (NoOp) obj;
return seqNo == that.seqNo && primaryTerm == that.primaryTerm && reason.equals(that.reason);
}
@Override
public int hashCode() {
return 31 * 31 * Long.hashCode(seqNo) + 31 * Long.hashCode(primaryTerm) + reason().hashCode();
}
@Override
public String toString() {
return "NoOp{" + "seqNo=" + seqNo + ", primaryTerm=" + primaryTerm + ", reason='" + reason + '\'' + '}';
}
}
/**
* How to sync the translog
*
* @opensearch.internal
*/
public enum Durability {
/**
* Async durability - translogs are synced based on a time interval.
*/
ASYNC,
/**
* Request durability - translogs are synced for each high level request (bulk, index, delete)
*/
REQUEST
}
static void verifyChecksum(BufferedChecksumStreamInput in) throws IOException {
// This absolutely must come first, or else reading the checksum becomes part of the checksum
long expectedChecksum = in.getChecksum();
long readChecksum = Integer.toUnsignedLong(in.readInt());
if (readChecksum != expectedChecksum) {
throw new TranslogCorruptedException(
in.getSource(),
"checksum verification failed - expected: 0x"
+ Long.toHexString(expectedChecksum)
+ ", got: 0x"
+ Long.toHexString(readChecksum)
);
}
}
/**
* Reads a list of operations written with {@link #writeOperations(StreamOutput, List)}
*/
public static List