mirror of
https://gitflic.ru/project/openide/openide.git
synced 2026-02-04 15:06:56 +07:00
[vfs] content writing moved out of lock
since VFS content storage based on append-only-log, writing/reading of content doesn't need external locking -> could be moved out of locks, there used -> makes the locked region shorter, thus reducing contention GitOrigin-RevId: f5432f7d4039092b1c333fe21d7b327553668903
This commit is contained in:
committed by
intellij-monorepo-bot
parent
eb2e311653
commit
bd2766f742
@@ -1337,9 +1337,9 @@ public final class FSRecordsImpl implements Closeable {
|
||||
}
|
||||
}
|
||||
|
||||
int storeUnlinkedContent(byte[] bytes) {
|
||||
int storeUnlinkedContent(@NotNull ByteArraySequence content) {
|
||||
try {
|
||||
return contentAccessor.allocateContentRecordAndStore(bytes);
|
||||
return contentAccessor.allocateContentRecordAndStore(content);
|
||||
}
|
||||
catch (IOException e) {
|
||||
throw handleError(e);
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2000-2023 JetBrains s.r.o. and contributors. Use of this source code is governed by the Apache 2.0 license.
|
||||
// Copyright 2000-2024 JetBrains s.r.o. and contributors. Use of this source code is governed by the Apache 2.0 license.
|
||||
package com.intellij.openapi.vfs.newvfs.persistent;
|
||||
|
||||
import com.intellij.openapi.util.io.BufferExposingByteArrayOutputStream;
|
||||
@@ -43,14 +43,14 @@ public final class PersistentFSContentAccessor {
|
||||
PersistentFSConnection.ensureIdIsValid(fileId);
|
||||
PersistentFSRecordsStorage records = connection.getRecords();
|
||||
|
||||
int contentRecordId = connection.getContents().storeRecord(content);
|
||||
int contentRecordId = allocateContentRecordAndStore(content);
|
||||
|
||||
records.setContentRecordId(fileId, contentRecordId);
|
||||
}
|
||||
|
||||
|
||||
int allocateContentRecordAndStore(byte @NotNull [] content) throws IOException {
|
||||
return connection.getContents().storeRecord(new ByteArraySequence(content));
|
||||
int allocateContentRecordAndStore(@NotNull ByteArraySequence content) throws IOException {
|
||||
return connection.getContents().storeRecord(content);
|
||||
}
|
||||
|
||||
@ApiStatus.Obsolete
|
||||
|
||||
@@ -486,13 +486,9 @@ public final class PersistentFSImpl extends PersistentFS implements Disposable {
|
||||
return vfsPeer.writeContent(getFileId(file), contentOfFixedSize);
|
||||
}
|
||||
|
||||
private void writeContent(@NotNull VirtualFile file, @NotNull ByteArraySequence content, boolean contentOfFixedSize) {
|
||||
vfsPeer.writeContent(getFileId(file), content, contentOfFixedSize);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int storeUnlinkedContent(byte @NotNull [] bytes) {
|
||||
return vfsPeer.storeUnlinkedContent(bytes);
|
||||
return vfsPeer.storeUnlinkedContent(new ByteArraySequence(bytes));
|
||||
}
|
||||
|
||||
@SuppressWarnings("removal")
|
||||
@@ -802,7 +798,6 @@ public final class PersistentFSImpl extends PersistentFS implements Disposable {
|
||||
|
||||
int fileId = getFileId(file);
|
||||
|
||||
boolean outdated;
|
||||
long length;
|
||||
int contentRecordId;
|
||||
|
||||
@@ -813,8 +808,8 @@ public final class PersistentFSImpl extends PersistentFS implements Disposable {
|
||||
boolean mustReloadLength = BitUtil.isSet(flags, Flags.MUST_RELOAD_LENGTH);
|
||||
boolean mustReloadContent = BitUtil.isSet(flags, Flags.MUST_RELOAD_CONTENT);
|
||||
length = mustReloadLength ? -1 : vfsPeer.getLength(fileId);
|
||||
outdated = (length == -1) || mustReloadContent;
|
||||
if (outdated) {
|
||||
boolean contentOutdated = (length == -1) || mustReloadContent;
|
||||
if (contentOutdated) {
|
||||
contentRecordId = -1;
|
||||
}
|
||||
else {
|
||||
@@ -827,37 +822,26 @@ public final class PersistentFSImpl extends PersistentFS implements Disposable {
|
||||
myInputLock.readLock().unlock();
|
||||
}
|
||||
|
||||
//VFS content storage is append-only, hence reading doesn't require locks:
|
||||
InputStream contentStream = (contentRecordId > 0) ? vfsPeer.readContentById(contentRecordId) : null;
|
||||
|
||||
|
||||
if (contentStream == null) {
|
||||
if (contentRecordId <= 0) {
|
||||
NewVirtualFileSystem fs = getFileSystem(file);
|
||||
|
||||
byte[] content;
|
||||
if (outdated) {
|
||||
// In this case, the file can have an out-of-date length. So, update it first (needed for the correct work of `contentsToByteArray`)
|
||||
// See IDEA-90813 for possible bugs.
|
||||
//TODO RC: why fs.contentToByteArray() relies on VFS.length value? FS should be a source of truth here, not VFS.
|
||||
// LocalFileSystemBase/LocalFileSystemImpl both relies on VirtualFile.getLength() but only for sanity checks,
|
||||
// i.e. to check the length is not too big and not negative. This is strange, since LocalFS is the only FS
|
||||
// impl that relies on VFS length, instead of checking the actual file length.
|
||||
// Without that strange design choice the 2 branches here collapse into 1, and the following writeContent()
|
||||
// could be merged with storeContentToStorage() reducing code duplication
|
||||
setLength(fileId, fs.getLength(file));
|
||||
content = fs.contentsToByteArray(file);
|
||||
}
|
||||
else {
|
||||
// a bit of optimization
|
||||
content = fs.contentsToByteArray(file);
|
||||
setLength(fileId, content.length);
|
||||
}
|
||||
byte[] content = fs.contentsToByteArray(file);
|
||||
|
||||
if (mayCacheContent && shouldCache(content.length)) {
|
||||
//VFS content storage is append-only, hence storing could be done outside the lock:
|
||||
int newContentRecordId = vfsPeer.storeUnlinkedContent(new ByteArraySequence(content));
|
||||
myInputLock.writeLock().lock();
|
||||
try {
|
||||
writeContent(file, ByteArraySequence.create(content), /*contentOfFixedSize: */ fs.isReadOnly());
|
||||
setFlag(file, Flags.MUST_RELOAD_CONTENT, false);
|
||||
updateContentId(fileId, newContentRecordId, content.length);
|
||||
}
|
||||
finally {
|
||||
myInputLock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
else {
|
||||
myInputLock.writeLock().lock();
|
||||
try {
|
||||
setLength(fileId, content.length);
|
||||
}
|
||||
finally {
|
||||
myInputLock.writeLock().unlock();
|
||||
@@ -867,6 +851,8 @@ public final class PersistentFSImpl extends PersistentFS implements Disposable {
|
||||
return content;
|
||||
}
|
||||
|
||||
//VFS content storage is append-only, hence doesn't need lock for reading:
|
||||
InputStream contentStream = vfsPeer.readContentById(contentRecordId);
|
||||
try {
|
||||
assert length >= 0 : file;
|
||||
return contentStream.readNBytes((int)length);
|
||||
@@ -876,6 +862,27 @@ public final class PersistentFSImpl extends PersistentFS implements Disposable {
|
||||
}
|
||||
}
|
||||
|
||||
//@GuardedBy(myInputLock.writeLock)
|
||||
private void updateContentId(int fileId,
|
||||
int newContentRecordId,
|
||||
int newContentLength) throws IOException {
|
||||
PersistentFSConnection connection = vfsPeer.connection();
|
||||
PersistentFSRecordsStorage records = connection.getRecords();
|
||||
//TODO RC: ideally, this method shouldn't be protected by external myInputLock, but .updateRecord() should
|
||||
// involve VFS internal locking to protect it from concurrent writes
|
||||
((IPersistentFSRecordsStorage)records).updateRecord(fileId, record -> {
|
||||
int flags = record.getFlags();
|
||||
int newFlags = flags & ~(Flags.MUST_RELOAD_LENGTH | Flags.MUST_RELOAD_CONTENT);
|
||||
if (newFlags != flags) {
|
||||
record.setFlags(newFlags);
|
||||
}
|
||||
record.setContentRecordId(newContentRecordId);
|
||||
record.setLength(newContentLength);
|
||||
return true;
|
||||
});
|
||||
connection.markDirty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte @NotNull [] contentsToByteArray(int contentId) throws IOException {
|
||||
//noinspection resource
|
||||
@@ -887,7 +894,6 @@ public final class PersistentFSImpl extends PersistentFS implements Disposable {
|
||||
InputStream contentStream;
|
||||
boolean useReplicator = false;
|
||||
long len = 0L;
|
||||
boolean readOnly = false;
|
||||
|
||||
myInputLock.readLock().lock();
|
||||
try {
|
||||
@@ -904,7 +910,6 @@ public final class PersistentFSImpl extends PersistentFS implements Disposable {
|
||||
|
||||
if (shouldCache(len)) {
|
||||
useReplicator = true;
|
||||
readOnly = fs.isReadOnly();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -913,7 +918,7 @@ public final class PersistentFSImpl extends PersistentFS implements Disposable {
|
||||
}
|
||||
|
||||
if (useReplicator) {
|
||||
contentStream = createReplicatorAndStoreContent(file, contentStream, len, readOnly);
|
||||
contentStream = createReplicatorAndStoreContent(file, contentStream, len);
|
||||
}
|
||||
|
||||
return contentStream;
|
||||
@@ -944,12 +949,11 @@ public final class PersistentFSImpl extends PersistentFS implements Disposable {
|
||||
|
||||
private @NotNull InputStream createReplicatorAndStoreContent(@NotNull VirtualFile file,
|
||||
@NotNull InputStream nativeStream,
|
||||
long fileLength,
|
||||
boolean readOnly) {
|
||||
long fileLength) throws IOException {
|
||||
if (nativeStream instanceof BufferExposingByteArrayInputStream byteStream) {
|
||||
// optimization
|
||||
byte[] bytes = byteStream.getInternalBuffer();
|
||||
storeContentToStorage(fileLength, file, readOnly, bytes, bytes.length);
|
||||
storeContentToStorage(fileLength, file, bytes, bytes.length);
|
||||
return nativeStream;
|
||||
}
|
||||
BufferExposingByteArrayOutputStream cache = new BufferExposingByteArrayOutputStream((int)fileLength);
|
||||
@@ -969,7 +973,7 @@ public final class PersistentFSImpl extends PersistentFS implements Disposable {
|
||||
}
|
||||
super.close();
|
||||
if (isEndOfFileReached) {
|
||||
storeContentToStorage(fileLength, file, readOnly, cache.getInternalBuffer(), cache.size());
|
||||
storeContentToStorage(fileLength, file, cache.getInternalBuffer(), cache.size());
|
||||
}
|
||||
}
|
||||
finally {
|
||||
@@ -982,27 +986,31 @@ public final class PersistentFSImpl extends PersistentFS implements Disposable {
|
||||
|
||||
private void storeContentToStorage(long fileLength,
|
||||
@NotNull VirtualFile file,
|
||||
boolean readOnly,
|
||||
byte @NotNull [] bytes,
|
||||
int byteLength) {
|
||||
//TODO RC: we could reduce contention on this lock by extracting storing the content in VFSContentStorage outside the
|
||||
// lock, and only updating file-record fields inside the lock.
|
||||
// Even more reductions could be made by using (segmented) FileIdLock inside vfsPeer
|
||||
int byteLength) throws IOException {
|
||||
int fileId = getFileId(file);
|
||||
|
||||
if (byteLength == fileLength) {
|
||||
ByteArraySequence newContent = new ByteArraySequence(bytes, 0, byteLength);
|
||||
int newContentId = vfsPeer.storeUnlinkedContent(newContent);
|
||||
myInputLock.writeLock().lock();
|
||||
try {
|
||||
if (byteLength == fileLength) {
|
||||
writeContent(file, new ByteArraySequence(bytes, 0, byteLength), /*contentOfFixedSize: */ readOnly);
|
||||
setFlag(file, Flags.MUST_RELOAD_CONTENT, false);
|
||||
setFlag(file, Flags.MUST_RELOAD_LENGTH, false);
|
||||
}
|
||||
else {
|
||||
doCleanPersistedContent(getFileId(file));
|
||||
}
|
||||
updateContentId(fileId, newContentId, byteLength);
|
||||
}
|
||||
finally {
|
||||
myInputLock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
else {
|
||||
myInputLock.writeLock().lock();
|
||||
try {
|
||||
doCleanPersistedContent(fileId);
|
||||
}
|
||||
finally {
|
||||
myInputLock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Method is obsolete, migrate to {@link #contentHashIfStored(VirtualFile)} instance method */
|
||||
@TestOnly
|
||||
@@ -2372,11 +2380,11 @@ public final class PersistentFSImpl extends PersistentFS implements Disposable {
|
||||
}
|
||||
}
|
||||
|
||||
private void setFlag(@NotNull VirtualFile file, @PersistentFS.Attributes int mask, boolean value) {
|
||||
private void setFlag(@NotNull VirtualFile file, @Attributes int mask, boolean value) {
|
||||
setFlag(getFileId(file), mask, value);
|
||||
}
|
||||
|
||||
private void setFlag(int id, @PersistentFS.Attributes int mask, boolean value) {
|
||||
private void setFlag(int id, @Attributes int mask, boolean value) {
|
||||
int oldFlags = vfsPeer.getFlags(id);
|
||||
int flags = value ? oldFlags | mask : oldFlags & ~mask;
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Copyright 2000-2023 JetBrains s.r.o. and contributors. Use of this source code is governed by the Apache 2.0 license.
|
||||
// Copyright 2000-2024 JetBrains s.r.o. and contributors. Use of this source code is governed by the Apache 2.0 license.
|
||||
package com.intellij.openapi.vfs.newvfs.persistent
|
||||
|
||||
import com.intellij.ide.IdeBundle
|
||||
@@ -6,6 +6,7 @@ import com.intellij.openapi.application.ApplicationManager
|
||||
import com.intellij.openapi.diagnostic.Logger
|
||||
import com.intellij.openapi.util.Computable
|
||||
import com.intellij.openapi.util.io.BufferExposingByteArrayOutputStream
|
||||
import com.intellij.openapi.util.io.ByteArraySequence
|
||||
import com.intellij.openapi.util.io.DataInputOutputUtilRt
|
||||
import com.intellij.openapi.util.io.FileUtil
|
||||
import com.intellij.openapi.vfs.newvfs.FileAttribute
|
||||
@@ -392,7 +393,7 @@ object VfsRecoveryUtils {
|
||||
when (val nextContent = snapshot.getContent(lastRecoveredContentId + 1)) {
|
||||
is NotAvailable -> break
|
||||
is Ready -> {
|
||||
val result = newFsRecords.contentAccessor().allocateContentRecordAndStore(nextContent.value)
|
||||
val result = newFsRecords.contentAccessor().allocateContentRecordAndStore(ByteArraySequence(nextContent.value))
|
||||
check(result == lastRecoveredContentId + 1) { "assumption failed: got $result, expected ${lastRecoveredContentId + 1}" }
|
||||
lastRecoveredContentId++
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user