[vfs] content writing moved out of lock

since VFS content storage based on append-only-log, writing/reading of content doesn't need external locking -> could be moved out of locks, there used -> makes the locked region shorter, thus reducing contention

GitOrigin-RevId: f5432f7d4039092b1c333fe21d7b327553668903
This commit is contained in:
Ruslan Cheremin
2024-06-29 13:27:21 +02:00
committed by intellij-monorepo-bot
parent eb2e311653
commit bd2766f742
4 changed files with 74 additions and 65 deletions

View File

@@ -1337,9 +1337,9 @@ public final class FSRecordsImpl implements Closeable {
} }
} }
int storeUnlinkedContent(byte[] bytes) { int storeUnlinkedContent(@NotNull ByteArraySequence content) {
try { try {
return contentAccessor.allocateContentRecordAndStore(bytes); return contentAccessor.allocateContentRecordAndStore(content);
} }
catch (IOException e) { catch (IOException e) {
throw handleError(e); throw handleError(e);

View File

@@ -1,4 +1,4 @@
// Copyright 2000-2023 JetBrains s.r.o. and contributors. Use of this source code is governed by the Apache 2.0 license. // Copyright 2000-2024 JetBrains s.r.o. and contributors. Use of this source code is governed by the Apache 2.0 license.
package com.intellij.openapi.vfs.newvfs.persistent; package com.intellij.openapi.vfs.newvfs.persistent;
import com.intellij.openapi.util.io.BufferExposingByteArrayOutputStream; import com.intellij.openapi.util.io.BufferExposingByteArrayOutputStream;
@@ -43,14 +43,14 @@ public final class PersistentFSContentAccessor {
PersistentFSConnection.ensureIdIsValid(fileId); PersistentFSConnection.ensureIdIsValid(fileId);
PersistentFSRecordsStorage records = connection.getRecords(); PersistentFSRecordsStorage records = connection.getRecords();
int contentRecordId = connection.getContents().storeRecord(content); int contentRecordId = allocateContentRecordAndStore(content);
records.setContentRecordId(fileId, contentRecordId); records.setContentRecordId(fileId, contentRecordId);
} }
int allocateContentRecordAndStore(byte @NotNull [] content) throws IOException { int allocateContentRecordAndStore(@NotNull ByteArraySequence content) throws IOException {
return connection.getContents().storeRecord(new ByteArraySequence(content)); return connection.getContents().storeRecord(content);
} }
@ApiStatus.Obsolete @ApiStatus.Obsolete

View File

@@ -486,13 +486,9 @@ public final class PersistentFSImpl extends PersistentFS implements Disposable {
return vfsPeer.writeContent(getFileId(file), contentOfFixedSize); return vfsPeer.writeContent(getFileId(file), contentOfFixedSize);
} }
private void writeContent(@NotNull VirtualFile file, @NotNull ByteArraySequence content, boolean contentOfFixedSize) {
vfsPeer.writeContent(getFileId(file), content, contentOfFixedSize);
}
@Override @Override
public int storeUnlinkedContent(byte @NotNull [] bytes) { public int storeUnlinkedContent(byte @NotNull [] bytes) {
return vfsPeer.storeUnlinkedContent(bytes); return vfsPeer.storeUnlinkedContent(new ByteArraySequence(bytes));
} }
@SuppressWarnings("removal") @SuppressWarnings("removal")
@@ -802,7 +798,6 @@ public final class PersistentFSImpl extends PersistentFS implements Disposable {
int fileId = getFileId(file); int fileId = getFileId(file);
boolean outdated;
long length; long length;
int contentRecordId; int contentRecordId;
@@ -813,8 +808,8 @@ public final class PersistentFSImpl extends PersistentFS implements Disposable {
boolean mustReloadLength = BitUtil.isSet(flags, Flags.MUST_RELOAD_LENGTH); boolean mustReloadLength = BitUtil.isSet(flags, Flags.MUST_RELOAD_LENGTH);
boolean mustReloadContent = BitUtil.isSet(flags, Flags.MUST_RELOAD_CONTENT); boolean mustReloadContent = BitUtil.isSet(flags, Flags.MUST_RELOAD_CONTENT);
length = mustReloadLength ? -1 : vfsPeer.getLength(fileId); length = mustReloadLength ? -1 : vfsPeer.getLength(fileId);
outdated = (length == -1) || mustReloadContent; boolean contentOutdated = (length == -1) || mustReloadContent;
if (outdated) { if (contentOutdated) {
contentRecordId = -1; contentRecordId = -1;
} }
else { else {
@@ -827,37 +822,26 @@ public final class PersistentFSImpl extends PersistentFS implements Disposable {
myInputLock.readLock().unlock(); myInputLock.readLock().unlock();
} }
//VFS content storage is append-only, hence reading doesn't require locks: if (contentRecordId <= 0) {
InputStream contentStream = (contentRecordId > 0) ? vfsPeer.readContentById(contentRecordId) : null;
if (contentStream == null) {
NewVirtualFileSystem fs = getFileSystem(file); NewVirtualFileSystem fs = getFileSystem(file);
byte[] content; byte[] content = fs.contentsToByteArray(file);
if (outdated) {
// In this case, the file can have an out-of-date length. So, update it first (needed for the correct work of `contentsToByteArray`)
// See IDEA-90813 for possible bugs.
//TODO RC: why fs.contentToByteArray() relies on VFS.length value? FS should be a source of truth here, not VFS.
// LocalFileSystemBase/LocalFileSystemImpl both relies on VirtualFile.getLength() but only for sanity checks,
// i.e. to check the length is not too big and not negative. This is strange, since LocalFS is the only FS
// impl that relies on VFS length, instead of checking the actual file length.
// Without that strange design choice the 2 branches here collapse into 1, and the following writeContent()
// could be merged with storeContentToStorage() reducing code duplication
setLength(fileId, fs.getLength(file));
content = fs.contentsToByteArray(file);
}
else {
// a bit of optimization
content = fs.contentsToByteArray(file);
setLength(fileId, content.length);
}
if (mayCacheContent && shouldCache(content.length)) { if (mayCacheContent && shouldCache(content.length)) {
//VFS content storage is append-only, hence storing could be done outside the lock:
int newContentRecordId = vfsPeer.storeUnlinkedContent(new ByteArraySequence(content));
myInputLock.writeLock().lock(); myInputLock.writeLock().lock();
try { try {
writeContent(file, ByteArraySequence.create(content), /*contentOfFixedSize: */ fs.isReadOnly()); updateContentId(fileId, newContentRecordId, content.length);
setFlag(file, Flags.MUST_RELOAD_CONTENT, false); }
finally {
myInputLock.writeLock().unlock();
}
}
else {
myInputLock.writeLock().lock();
try {
setLength(fileId, content.length);
} }
finally { finally {
myInputLock.writeLock().unlock(); myInputLock.writeLock().unlock();
@@ -867,6 +851,8 @@ public final class PersistentFSImpl extends PersistentFS implements Disposable {
return content; return content;
} }
//VFS content storage is append-only, hence doesn't need lock for reading:
InputStream contentStream = vfsPeer.readContentById(contentRecordId);
try { try {
assert length >= 0 : file; assert length >= 0 : file;
return contentStream.readNBytes((int)length); return contentStream.readNBytes((int)length);
@@ -876,6 +862,27 @@ public final class PersistentFSImpl extends PersistentFS implements Disposable {
} }
} }
//@GuardedBy(myInputLock.writeLock)
private void updateContentId(int fileId,
int newContentRecordId,
int newContentLength) throws IOException {
PersistentFSConnection connection = vfsPeer.connection();
PersistentFSRecordsStorage records = connection.getRecords();
//TODO RC: ideally, this method shouldn't be protected by external myInputLock, but .updateRecord() should
// involve VFS internal locking to protect it from concurrent writes
((IPersistentFSRecordsStorage)records).updateRecord(fileId, record -> {
int flags = record.getFlags();
int newFlags = flags & ~(Flags.MUST_RELOAD_LENGTH | Flags.MUST_RELOAD_CONTENT);
if (newFlags != flags) {
record.setFlags(newFlags);
}
record.setContentRecordId(newContentRecordId);
record.setLength(newContentLength);
return true;
});
connection.markDirty();
}
@Override @Override
public byte @NotNull [] contentsToByteArray(int contentId) throws IOException { public byte @NotNull [] contentsToByteArray(int contentId) throws IOException {
//noinspection resource //noinspection resource
@@ -887,7 +894,6 @@ public final class PersistentFSImpl extends PersistentFS implements Disposable {
InputStream contentStream; InputStream contentStream;
boolean useReplicator = false; boolean useReplicator = false;
long len = 0L; long len = 0L;
boolean readOnly = false;
myInputLock.readLock().lock(); myInputLock.readLock().lock();
try { try {
@@ -904,7 +910,6 @@ public final class PersistentFSImpl extends PersistentFS implements Disposable {
if (shouldCache(len)) { if (shouldCache(len)) {
useReplicator = true; useReplicator = true;
readOnly = fs.isReadOnly();
} }
} }
} }
@@ -913,7 +918,7 @@ public final class PersistentFSImpl extends PersistentFS implements Disposable {
} }
if (useReplicator) { if (useReplicator) {
contentStream = createReplicatorAndStoreContent(file, contentStream, len, readOnly); contentStream = createReplicatorAndStoreContent(file, contentStream, len);
} }
return contentStream; return contentStream;
@@ -944,12 +949,11 @@ public final class PersistentFSImpl extends PersistentFS implements Disposable {
private @NotNull InputStream createReplicatorAndStoreContent(@NotNull VirtualFile file, private @NotNull InputStream createReplicatorAndStoreContent(@NotNull VirtualFile file,
@NotNull InputStream nativeStream, @NotNull InputStream nativeStream,
long fileLength, long fileLength) throws IOException {
boolean readOnly) {
if (nativeStream instanceof BufferExposingByteArrayInputStream byteStream) { if (nativeStream instanceof BufferExposingByteArrayInputStream byteStream) {
// optimization // optimization
byte[] bytes = byteStream.getInternalBuffer(); byte[] bytes = byteStream.getInternalBuffer();
storeContentToStorage(fileLength, file, readOnly, bytes, bytes.length); storeContentToStorage(fileLength, file, bytes, bytes.length);
return nativeStream; return nativeStream;
} }
BufferExposingByteArrayOutputStream cache = new BufferExposingByteArrayOutputStream((int)fileLength); BufferExposingByteArrayOutputStream cache = new BufferExposingByteArrayOutputStream((int)fileLength);
@@ -969,7 +973,7 @@ public final class PersistentFSImpl extends PersistentFS implements Disposable {
} }
super.close(); super.close();
if (isEndOfFileReached) { if (isEndOfFileReached) {
storeContentToStorage(fileLength, file, readOnly, cache.getInternalBuffer(), cache.size()); storeContentToStorage(fileLength, file, cache.getInternalBuffer(), cache.size());
} }
} }
finally { finally {
@@ -982,25 +986,29 @@ public final class PersistentFSImpl extends PersistentFS implements Disposable {
private void storeContentToStorage(long fileLength, private void storeContentToStorage(long fileLength,
@NotNull VirtualFile file, @NotNull VirtualFile file,
boolean readOnly,
byte @NotNull [] bytes, byte @NotNull [] bytes,
int byteLength) { int byteLength) throws IOException {
//TODO RC: we could reduce contention on this lock by extracting storing the content in VFSContentStorage outside the int fileId = getFileId(file);
// lock, and only updating file-record fields inside the lock.
// Even more reductions could be made by using (segmented) FileIdLock inside vfsPeer if (byteLength == fileLength) {
myInputLock.writeLock().lock(); ByteArraySequence newContent = new ByteArraySequence(bytes, 0, byteLength);
try { int newContentId = vfsPeer.storeUnlinkedContent(newContent);
if (byteLength == fileLength) { myInputLock.writeLock().lock();
writeContent(file, new ByteArraySequence(bytes, 0, byteLength), /*contentOfFixedSize: */ readOnly); try {
setFlag(file, Flags.MUST_RELOAD_CONTENT, false); updateContentId(fileId, newContentId, byteLength);
setFlag(file, Flags.MUST_RELOAD_LENGTH, false);
} }
else { finally {
doCleanPersistedContent(getFileId(file)); myInputLock.writeLock().unlock();
} }
} }
finally { else {
myInputLock.writeLock().unlock(); myInputLock.writeLock().lock();
try {
doCleanPersistedContent(fileId);
}
finally {
myInputLock.writeLock().unlock();
}
} }
} }
@@ -2372,11 +2380,11 @@ public final class PersistentFSImpl extends PersistentFS implements Disposable {
} }
} }
private void setFlag(@NotNull VirtualFile file, @PersistentFS.Attributes int mask, boolean value) { private void setFlag(@NotNull VirtualFile file, @Attributes int mask, boolean value) {
setFlag(getFileId(file), mask, value); setFlag(getFileId(file), mask, value);
} }
private void setFlag(int id, @PersistentFS.Attributes int mask, boolean value) { private void setFlag(int id, @Attributes int mask, boolean value) {
int oldFlags = vfsPeer.getFlags(id); int oldFlags = vfsPeer.getFlags(id);
int flags = value ? oldFlags | mask : oldFlags & ~mask; int flags = value ? oldFlags | mask : oldFlags & ~mask;

View File

@@ -1,4 +1,4 @@
// Copyright 2000-2023 JetBrains s.r.o. and contributors. Use of this source code is governed by the Apache 2.0 license. // Copyright 2000-2024 JetBrains s.r.o. and contributors. Use of this source code is governed by the Apache 2.0 license.
package com.intellij.openapi.vfs.newvfs.persistent package com.intellij.openapi.vfs.newvfs.persistent
import com.intellij.ide.IdeBundle import com.intellij.ide.IdeBundle
@@ -6,6 +6,7 @@ import com.intellij.openapi.application.ApplicationManager
import com.intellij.openapi.diagnostic.Logger import com.intellij.openapi.diagnostic.Logger
import com.intellij.openapi.util.Computable import com.intellij.openapi.util.Computable
import com.intellij.openapi.util.io.BufferExposingByteArrayOutputStream import com.intellij.openapi.util.io.BufferExposingByteArrayOutputStream
import com.intellij.openapi.util.io.ByteArraySequence
import com.intellij.openapi.util.io.DataInputOutputUtilRt import com.intellij.openapi.util.io.DataInputOutputUtilRt
import com.intellij.openapi.util.io.FileUtil import com.intellij.openapi.util.io.FileUtil
import com.intellij.openapi.vfs.newvfs.FileAttribute import com.intellij.openapi.vfs.newvfs.FileAttribute
@@ -392,7 +393,7 @@ object VfsRecoveryUtils {
when (val nextContent = snapshot.getContent(lastRecoveredContentId + 1)) { when (val nextContent = snapshot.getContent(lastRecoveredContentId + 1)) {
is NotAvailable -> break is NotAvailable -> break
is Ready -> { is Ready -> {
val result = newFsRecords.contentAccessor().allocateContentRecordAndStore(nextContent.value) val result = newFsRecords.contentAccessor().allocateContentRecordAndStore(ByteArraySequence(nextContent.value))
check(result == lastRecoveredContentId + 1) { "assumption failed: got $result, expected ${lastRecoveredContentId + 1}" } check(result == lastRecoveredContentId + 1) { "assumption failed: got $result, expected ${lastRecoveredContentId + 1}" }
lastRecoveredContentId++ lastRecoveredContentId++
} }