mirror of
https://gitflic.ru/project/openide/openide.git
synced 2026-02-04 15:06:56 +07:00
[vfs] moved IO outside of lock in .contentsToByteArray()
GitOrigin-RevId: cdad04875185c165cefb4a79d18e0879b14b1693
This commit is contained in:
committed by
intellij-monorepo-bot
parent
e2447d3dba
commit
237319782b
@@ -1,4 +1,4 @@
|
||||
// Copyright 2000-2023 JetBrains s.r.o. and contributors. Use of this source code is governed by the Apache 2.0 license.
|
||||
// Copyright 2000-2024 JetBrains s.r.o. and contributors. Use of this source code is governed by the Apache 2.0 license.
|
||||
package com.intellij.openapi.vfs.newvfs.persistent;
|
||||
|
||||
import it.unimi.dsi.fastutil.ints.IntOpenHashSet;
|
||||
@@ -15,6 +15,18 @@ final class FileRecordLock {
|
||||
|
||||
//Ideally, each fileId should have its own lock, but this is too expensive, so we use segmented lock
|
||||
|
||||
//TODO RC: Currently we use FileRecordLock for updating file hierarchy -- which is relatively long process, since it
|
||||
// involves children modification. Which is why 'ReentrantLock with fileId list per segment' approach was used --
|
||||
// it allows to keep particular fileId locked for some time, without locking other fileIds, even those falling
|
||||
// into the same segment.
|
||||
// In bright future, we should use per-file-record locks for almost every file-record modification involving >1 field.
|
||||
// This is _required_ for correctness in multithreaded env -- we are currently able to bypass that requirement
|
||||
// only because usually there are some locks acquired upper the stack, and VFS implicitly piggyback on that.
|
||||
// Such per-file-record locks are most of the time short, and most of the time for-read. For that kind of locking
|
||||
// segmented StampedLock seems much more suitable -- the contention is low by itself (because most locks are short)
|
||||
// and segmentation makes it even lower, so keeping list of fileIds is not needed -- we could just lock the whole
|
||||
// segment.
|
||||
|
||||
private final SegmentLock[] segments;
|
||||
|
||||
FileRecordLock() {
|
||||
|
||||
@@ -799,22 +799,37 @@ public final class PersistentFSImpl extends PersistentFS implements Disposable {
|
||||
|
||||
@Override
|
||||
public byte @NotNull [] contentsToByteArray(@NotNull VirtualFile file, boolean mayCacheContent) throws IOException {
|
||||
InputStream contentStream;
|
||||
boolean outdated;
|
||||
int fileId;
|
||||
long length;
|
||||
|
||||
int fileId = getFileId(file);
|
||||
|
||||
boolean outdated;
|
||||
long length;
|
||||
int contentRecordId;
|
||||
|
||||
//MAYBE RC: we could reduce contention on this lock by utilising FileIdLock inside the vfsPeer.
|
||||
myInputLock.readLock().lock();
|
||||
try {
|
||||
fileId = getFileId(file);
|
||||
length = getLengthIfUpToDate(file);
|
||||
outdated = length == -1 || mustReloadContent(file);
|
||||
contentStream = outdated ? null : readContent(file);
|
||||
int flags = vfsPeer.getFlags(fileId);
|
||||
boolean mustReloadLength = BitUtil.isSet(flags, Flags.MUST_RELOAD_LENGTH);
|
||||
boolean mustReloadContent = BitUtil.isSet(flags, Flags.MUST_RELOAD_CONTENT);
|
||||
length = mustReloadLength ? -1 : vfsPeer.getLength(fileId);
|
||||
outdated = (length == -1) || mustReloadContent;
|
||||
if (outdated) {
|
||||
contentRecordId = -1;
|
||||
}
|
||||
else {
|
||||
// As soon as we got a contentId -- there is no need for locking anymore,
|
||||
// since VFSContentStorage is a thread-safe append-only storage
|
||||
contentRecordId = vfsPeer.getContentRecordId(fileId);
|
||||
}
|
||||
}
|
||||
finally {
|
||||
myInputLock.readLock().unlock();
|
||||
}
|
||||
|
||||
InputStream contentStream = (contentRecordId > 0) ? vfsPeer.readContentById(contentRecordId) : null;
|
||||
|
||||
|
||||
if (contentStream == null) {
|
||||
NewVirtualFileSystem fs = getFileSystem(file);
|
||||
|
||||
@@ -963,6 +978,9 @@ public final class PersistentFSImpl extends PersistentFS implements Disposable {
|
||||
boolean readOnly,
|
||||
byte @NotNull [] bytes,
|
||||
int byteLength) {
|
||||
//TODO RC: we could reduce contention on this lock by extracting storing the content in VFSContentStorage outside the
|
||||
// lock, and only updating file-record fields inside the lock.
|
||||
// Even more reductions could be made by using (segmented) FileIdLock inside vfsPeer
|
||||
myInputLock.writeLock().lock();
|
||||
try {
|
||||
if (byteLength == fileLength) {
|
||||
|
||||
Reference in New Issue
Block a user