# HG changeset patch # User Vladimir Voskresensky # Date 1300971564 -10800 # Node ID 8c0fcbd1f6294a96b3b8b83b9ccc9007ce08cea0 # Parent 504f24cbe535b9e467b2d11349220ca654d864ea fixing #196841 - remote FS doesn't work if any part of path to file does not have read permission diff --git a/dlight.remote.impl/src/org/netbeans/modules/remote/impl/fs/DirectoryReaderSftp.java b/dlight.remote.impl/src/org/netbeans/modules/remote/impl/fs/DirectoryReaderSftp.java --- a/dlight.remote.impl/src/org/netbeans/modules/remote/impl/fs/DirectoryReaderSftp.java +++ b/dlight.remote.impl/src/org/netbeans/modules/remote/impl/fs/DirectoryReaderSftp.java @@ -42,7 +42,6 @@ package org.netbeans.modules.remote.impl.fs; -import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -82,7 +81,7 @@ } } - public void readDirectory() throws IOException, InterruptedException, CancellationException, ExecutionException { + public void readDirectory() throws InterruptedException, CancellationException, ExecutionException { Future res = FileInfoProvider.ls(execEnv, remotePath); StatInfo[] infos = res.get(); List newEntries = new ArrayList(infos.length); diff --git a/dlight.remote.impl/src/org/netbeans/modules/remote/impl/fs/RemoteDirectory.java b/dlight.remote.impl/src/org/netbeans/modules/remote/impl/fs/RemoteDirectory.java --- a/dlight.remote.impl/src/org/netbeans/modules/remote/impl/fs/RemoteDirectory.java +++ b/dlight.remote.impl/src/org/netbeans/modules/remote/impl/fs/RemoteDirectory.java @@ -86,7 +86,7 @@ private static final boolean trace = RemoteLogger.getInstance().isLoggable(Level.FINEST); private static boolean LS_VIA_SFTP = ! Boolean.getBoolean("remote.parse.ls"); - private Reference storageRef; + private Reference storageRef = new SoftReference(null);; private static final class RefLock {} private final Object refLock = new RefLock(); @@ -407,10 +407,10 @@ } } - private DirectoryStorage getDirectoryStorageImpl(boolean force, String expectedName, String childName) throws + private DirectoryStorage getDirectoryStorageImpl(boolean forceRefresh, String expectedName, String childName) throws ConnectException, IOException, InterruptedException, CancellationException, ExecutionException { - if (force && ! ConnectionManager.getInstance().isConnectedTo(getExecutionEnvironment())) { + if (forceRefresh && ! ConnectionManager.getInstance().isConnectedTo(getExecutionEnvironment())) { //RemoteLogger.getInstance().warning("refreshDirectoryStorage is called while host is not connected"); //force = false; throw new ConnectException(); @@ -422,27 +422,33 @@ // check whether it is cached in memory synchronized (refLock) { - if (storageRef != null) { - storage = storageRef.get(); - } + storage = storageRef.get(); } - if (! force && storage != null) { - return storage; - } + boolean fromMemOrDiskCache; - boolean loaded; - if (storage == null) { // try loading from disk - loaded = false; + fromMemOrDiskCache = false; storage = new DirectoryStorage(storageFile); if (storageFile.exists()) { - Lock lock = RemoteFileSystem.getLock(getCache()).readLock(); + Lock readLock = RemoteFileSystem.getLock(getCache()).readLock(); try { - lock.lock(); + readLock.lock(); try { storage.load(); - loaded = true; + fromMemOrDiskCache = true; + // try to keep loaded cache in memory + synchronized (refLock) { + DirectoryStorage s = storageRef.get(); + // it could be cache put in memory by writer (the best content) + // or by previous reader => it's the same as loaded + if (s != null) { + if (trace) { trace("using storage that was kept by other thread"); } // NOI18N + storage = s; + } else { + storageRef = new SoftReference(storage); + } + } } catch (FormatException e) { Level level = e.isExpexted() ? Level.FINE : Level.WARNING; RemoteLogger.getInstance().log(level, "Error reading directory cache", e); // NOI18N @@ -456,46 +462,39 @@ Exceptions.printStackTrace(e); } } finally { - lock.unlock(); + readLock.unlock(); } } } else { - loaded = true; + if (trace) { trace("use memory cached storage"); } // NOI18N + fromMemOrDiskCache = true; } - if (loaded && !force) { - synchronized (refLock) { - if (storageRef != null) { + if (fromMemOrDiskCache && !forceRefresh) { + RemoteLogger.assertTrue(storage != null); + if (trace) { trace("returning cached storage"); } // NOI18N + return storage; + } + // neither memory nor disk cache helped or was request to force refresh + // proceed with reading remote content + + checkConnection(this, true); + + Lock writeLock = RemoteFileSystem.getLock(getCache()).writeLock(); + if (trace) { trace("waiting for lock"); } // NOI18N + writeLock.lock(); + try { + if (!forceRefresh) { + // it means we didn't have any storage + RemoteLogger.assertFalse(fromMemOrDiskCache); + // in case another writer thread already synchronized content while we were waiting for lock + synchronized (refLock) { DirectoryStorage s = storageRef.get(); if (s != null) { - if (trace) { trace("returning storage that was loaded by other thread"); } // NOI18N + if (trace) { trace("got storage from mem cache after waiting on writeLock: {0} expectedName={1}", getPath(), expectedName); } // NOI18N return s; } } - storageRef = new SoftReference(storage); - } - if (trace) { trace("returning just loaded storage"); } // NOI18N - return storage; - } - - // neither memory nor disk cache helped - checkConnection(this, true); - - Lock lock = RemoteFileSystem.getLock(getCache()).writeLock(); - if (trace) { trace("waiting for lock"); } // NOI18N - lock.lock(); - try { - if (!force) { - // in case another thread synchronized content while we were waiting for lock - synchronized (refLock) { - if (storageRef != null) { - DirectoryStorage stor = storageRef.get(); - if (trace) { trace("got storage: {0} -> {1}", storageRef, stor); } // NOI18N - if (stor != null) { - return stor; - } - } - } } if (!getCache().exists()) { getCache().mkdirs(); @@ -506,33 +505,24 @@ DirectoryReader directoryReader = getLsViaSftp() ? new DirectoryReaderSftp(getExecutionEnvironment(), getPath()) : new DirectoryReaderLs(getExecutionEnvironment(), getPath()); if (trace) { trace("synchronizing"); } // NOI18N + Exception problem = null; try { directoryReader.readDirectory(); } catch (FileNotFoundException ex) { throw ex; } catch (IOException ex) { + problem = ex; + } catch (ExecutionException ex) { + problem = ex; + } + if (problem != null) { if (!ConnectionManager.getInstance().isConnectedTo(getExecutionEnvironment())) { - // connection was broken while we read directory content - - // add notification and return cache if available + // connection was broken while we read directory content - add notification getFileSystem().getRemoteFileSupport().addPendingFile(this); - if (loaded && !force && storage != null) { - return storage; - } else { - throw new ConnectException(ex.getMessage()); - } + // valid cache can not be available + RemoteLogger.assertFalse(fromMemOrDiskCache && !forceRefresh && storage != null); + throw new ConnectException(problem.getMessage()); } - - } catch (ExecutionException ex) { - if (!ConnectionManager.getInstance().isConnectedTo(getExecutionEnvironment())) { - // connection was broken while we read directory content - - // add notification and return cache if available - getFileSystem().getRemoteFileSupport().addPendingFile(this); - if (loaded && !force && storage != null) { - return storage; - } else { - throw ex; - } - } } getFileSystem().incrementDirSyncCount(); Map> dupLowerNames = new HashMap>(); @@ -545,13 +535,14 @@ Set keepCacheNames = new HashSet(); List entriesToFireChanged = new ArrayList(); List entriesToFireCreated = new ArrayList(); + List filesToFireDeleted = new ArrayList(); for (DirEntry newEntry : newEntries.values()) { String cacheName; DirEntry oldEntry = storage.getEntry(newEntry.getName()); if (oldEntry == null) { changed = true; cacheName = RemoteFileSystemUtils.escapeFileName(newEntry.getName()); - if (loaded || newEntry.getName().equals(expectedName)) { + if (fromMemOrDiskCache || newEntry.getName().equals(expectedName)) { entriesToFireCreated.add(newEntry); } } else { @@ -565,11 +556,11 @@ File entryCache = new File(getCache(), oldEntry.getCache()); if (entryCache.exists()) { if (trace) { trace("removing cache for updated file {0}", entryCache.getAbsolutePath()); } // NOI18N - entryCache.delete(); + entryCache.delete(); // TODO: We must just mark it as invalid instead of physically deleting cache file... } } } else if (!equals(newEntry.getLinkTarget(), oldEntry.getLinkTarget())) { - changed = fire = true; + changed = fire = true; // TODO: we forgot old link path, probably should be passed to change event getFileSystem().getFactory().setLink(this, getPath() + '/' + newEntry.getName(), newEntry.getLinkTarget()); } else if (!newEntry.getAccessAsString().equals(oldEntry.getAccessAsString())) { changed = fire = true; @@ -578,14 +569,20 @@ } else if (!newEntry.isSameGroup(oldEntry)) { changed = fire = true; } else if (newEntry.getSize() != oldEntry.getSize()) { - changed = fire = true; + changed = fire = true;// TODO: shouldn't it be the same as time stamp change? } if (fire) { entriesToFireChanged.add(newEntry); } } else { changed = true; - invalidate(oldEntry); + FileObject removedFO = invalidate(oldEntry); + // remove old + if (removedFO != null) { + filesToFireDeleted.add(removedFO); + } + // add new + entriesToFireCreated.add(newEntry); cacheName = RemoteFileSystemUtils.escapeFileName(newEntry.getName()); } } @@ -606,13 +603,12 @@ // Check for removal for (DirEntry oldEntry : storage.list()) { if (!newEntries.containsKey(oldEntry.getName())) { - changed = true; - invalidate(oldEntry); + FileObject removedFO = invalidate(oldEntry); + if (removedFO != null) { + filesToFireDeleted.add(removedFO); + } } } - } - - if (changed) { if (hasDups) { for (Map.Entry> mapEntry : new ArrayList>>(dupLowerNames.entrySet())) { @@ -621,9 +617,10 @@ if (dupEntries.size() > 1) { for (int i = 0; i < dupEntries.size(); i++) { DirEntry entry = dupEntries.get(i); - if (keepCacheNames.contains(entry) || i == 0) { - continue; // keep the one that already exists or otherwise 0-th one + if (keepCacheNames.contains(entry)) { + continue; // keep the one that already exists } + // all duplicates will have postfix for (int j = 0; j < Integer.MAX_VALUE; j++) { String cacheName = mapEntry.getKey() + '_' + j; String lowerCacheName = cacheName.toLowerCase(); @@ -644,12 +641,18 @@ } else { storage.touch(); } + // always put new content in cache + // do it before firing events, to give liseners real content synchronized (refLock) { storageRef = new SoftReference(storage); } storageFile.setLastModified(System.currentTimeMillis()); if (trace) { trace("set lastModified to {0}", storageFile.lastModified()); } // NOI18N + // fire all event under lock if (changed) { + for (FileObject deleted : filesToFireDeleted) { + fireFileDeletedEvent(getListeners(), new FileEvent(deleted)); + } for (DirEntry entry : entriesToFireCreated) { RemoteFileObjectBase fo = createFileObject(entry); fireRemoteFileObjectCreated(fo); @@ -662,7 +665,7 @@ } } } finally { - lock.unlock(); + writeLock.unlock(); } return storage; } @@ -752,13 +755,11 @@ throw new IOException(getPath()); } - private void invalidate(DirEntry oldEntry) { + private FileObject invalidate(DirEntry oldEntry) { FileObject fo = getFileSystem().getFactory().invalidate(getPath() + '/' + oldEntry.getName()); File oldEntryCache = new File(getCache(), oldEntry.getCache()); removeFile(oldEntryCache); - if (fo != null) { - fireFileDeletedEvent(getListeners(), new FileEvent(fo)); - } + return fo; } private void removeFile(File cache) { diff --git a/dlight.remote.impl/src/org/netbeans/modules/remote/impl/fs/RemoteFileObjectBase.java b/dlight.remote.impl/src/org/netbeans/modules/remote/impl/fs/RemoteFileObjectBase.java --- a/dlight.remote.impl/src/org/netbeans/modules/remote/impl/fs/RemoteFileObjectBase.java +++ b/dlight.remote.impl/src/org/netbeans/modules/remote/impl/fs/RemoteFileObjectBase.java @@ -110,7 +110,11 @@ return fileSystem.getExecutionEnvironment(); } - protected File getCache() { + /** + * local cache of this FileObject (for directory - local dir, for file - local file with content) + * @return + */ + protected final File getCache() { return cache; }