summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoeyHess <>2018-03-16 16:57:00 (GMT)
committerhdiff <hdiff@hdiff.luite.com>2018-03-16 16:57:00 (GMT)
commit90764122af46e5a332d755c168e153d26b4d0c91 (patch)
treeed94e5d472d6cbb4b730ed15bc80e18348b2facb
parentd20600ff250cc93120206f10744b5eb91e6e1d59 (diff)
version 6.201803166.20180316
-rw-r--r--Annex/Content.hs49
-rw-r--r--Annex/Perms.hs1
-rw-r--r--Annex/Ssh.hs62
-rw-r--r--Annex/Transfer.hs15
-rw-r--r--Backend/Hash.hs50
-rw-r--r--CHANGELOG51
-rw-r--r--COPYRIGHT4
-rw-r--r--CmdLine/GitAnnexShell.hs17
-rw-r--r--CmdLine/GitAnnexShell/Checks.hs23
-rw-r--r--CmdLine/GitRemoteTorAnnex.hs3
-rw-r--r--Command/EnableTor.hs4
-rw-r--r--Command/Export.hs4
-rw-r--r--Command/LockContent.hs18
-rw-r--r--Command/Multicast.hs2
-rw-r--r--Command/P2P.hs4
-rw-r--r--Command/P2PStdIO.hs41
-rw-r--r--Command/ReKey.hs4
-rw-r--r--Command/TransferInfo.hs3
-rw-r--r--Command/Unlock.hs1
-rw-r--r--Logs/Transfer.hs13
-rw-r--r--Messages/Progress.hs61
-rw-r--r--NEWS22
-rw-r--r--P2P/Annex.hs106
-rw-r--r--P2P/IO.hs56
-rw-r--r--P2P/Protocol.hs222
-rw-r--r--Remote/GCrypt.hs12
-rw-r--r--Remote/Git.hs146
-rw-r--r--Remote/Helper/P2P.hs67
-rw-r--r--Remote/Helper/Special.hs2
-rw-r--r--Remote/Helper/Ssh.hs176
-rw-r--r--Remote/P2P.hs91
-rw-r--r--Remote/Rsync.hs128
-rw-r--r--Remote/Rsync/RsyncUrl.hs5
-rw-r--r--RemoteDaemon/Transport/Tor.hs40
-rw-r--r--Test.hs2
-rw-r--r--Types/Key.hs93
-rw-r--r--Types/Remote.hs13
-rw-r--r--Utility/Hash.hs63
-rw-r--r--Utility/Metered.hs62
-rw-r--r--Utility/Process.hs6
-rw-r--r--Utility/Process/Transcript.hs48
-rw-r--r--doc/git-annex-export.mdwn2
-rw-r--r--doc/git-annex-shell.mdwn9
-rw-r--r--doc/git-annex.mdwn6
-rw-r--r--git-annex.cabal14
45 files changed, 1287 insertions, 534 deletions
diff --git a/Annex/Content.hs b/Annex/Content.hs
index 768b2a9..627e3e5 100644
--- a/Annex/Content.hs
+++ b/Annex/Content.hs
@@ -16,7 +16,7 @@ module Annex.Content (
lockContentForRemoval,
ContentRemovalLock,
getViaTmp,
- getViaTmp',
+ getViaTmpFromDisk,
checkDiskSpaceToGet,
prepTmp,
withTmp,
@@ -196,19 +196,19 @@ contentLockFile key = Just <$> calcRepo (gitAnnexContentLock key)
{- Prevents the content from being removed while the action is running.
- Uses a shared lock.
-
- - Does not actually check if the content is present. Use inAnnex for that.
- - However, since the contentLockFile is the content file in indirect mode,
- - if the content is not present, locking it will fail.
- -
- - If locking fails, throws an exception rather than running the action.
+ - If locking fails, or the content is not present, throws an exception
+ - rather than running the action.
-
- Note that, in direct mode, nothing prevents the user from directly
- editing or removing the content, even while it's locked by this.
-}
lockContentShared :: Key -> (VerifiedCopy -> Annex a) -> Annex a
-lockContentShared key a = lockContentUsing lock key $ do
- u <- getUUID
- withVerifiedCopy LockedCopy u (return True) a
+lockContentShared key a = lockContentUsing lock key $ ifM (inAnnex key)
+ ( do
+ u <- getUUID
+ withVerifiedCopy LockedCopy u (return True) a
+ , giveup $ "failed to lock content: not present"
+ )
where
#ifndef mingw32_HOST_OS
lock contentfile Nothing = tryLockShared Nothing contentfile
@@ -295,17 +295,27 @@ lockContentUsing locker key a = do
- the key and moves the file into the annex as a key's content. -}
getViaTmp :: VerifyConfig -> Key -> (FilePath -> Annex (Bool, Verification)) -> Annex Bool
getViaTmp v key action = checkDiskSpaceToGet key False $
- getViaTmp' v key action
+ getViaTmpFromDisk v key action
{- Like getViaTmp, but does not check that there is enough disk space
- for the incoming key. For use when the key content is already on disk
- and not being copied into place. -}
-getViaTmp' :: VerifyConfig -> Key -> (FilePath -> Annex (Bool, Verification)) -> Annex Bool
-getViaTmp' v key action = do
+getViaTmpFromDisk :: VerifyConfig -> Key -> (FilePath -> Annex (Bool, Verification)) -> Annex Bool
+getViaTmpFromDisk v key action = do
tmpfile <- prepTmp key
+ resuming <- liftIO $ doesFileExist tmpfile
(ok, verification) <- action tmpfile
+ -- When the temp file already had content, we don't know if
+ -- that content is good or not, so only trust if it the action
+ -- Verified it in passing. Otherwise, force verification even
+ -- if the VerifyConfig normally disables it.
+ let verification' = if resuming
+ then case verification of
+ Verified -> Verified
+ _ -> MustVerify
+ else verification
if ok
- then ifM (verifyKeyContent v verification key tmpfile)
+ then ifM (verifyKeyContent v verification' key tmpfile)
( ifM (pruneTmpWorkDirBefore tmpfile (moveAnnex key))
( do
logStatus key InfoPresent
@@ -331,12 +341,15 @@ getViaTmp' v key action = do
- it is checked.
-}
verifyKeyContent :: VerifyConfig -> Verification -> Key -> FilePath -> Annex Bool
-verifyKeyContent _ Verified _ _ = return True
-verifyKeyContent v UnVerified k f = ifM (shouldVerify v)
- ( verifysize <&&> verifycontent
- , return True
- )
+verifyKeyContent v verification k f = case verification of
+ Verified -> return True
+ UnVerified -> ifM (shouldVerify v)
+ ( verify
+ , return True
+ )
+ MustVerify -> verify
where
+ verify = verifysize <&&> verifycontent
verifysize = case keySize k of
Nothing -> return True
Just size -> do
diff --git a/Annex/Perms.hs b/Annex/Perms.hs
index 93919af..b467d4a 100644
--- a/Annex/Perms.hs
+++ b/Annex/Perms.hs
@@ -6,6 +6,7 @@
-}
module Annex.Perms (
+ FileMode,
setAnnexFilePerm,
setAnnexDirPerm,
annexFileMode,
diff --git a/Annex/Ssh.hs b/Annex/Ssh.hs
index 7280b58..7bb9ad0 100644
--- a/Annex/Ssh.hs
+++ b/Annex/Ssh.hs
@@ -34,7 +34,6 @@ import Annex.Path
import Utility.Env
import Utility.FileSystemEncoding
import Utility.Hash
-import Utility.Process.Transcript
import Types.CleanupActions
import Types.Concurrency
import Git.Env
@@ -202,36 +201,37 @@ prepSocket socketfile gc sshhost sshparams = do
-- the connection has already been started. Otherwise,
-- get the connection started now.
makeconnection socketlock =
- whenM (isNothing <$> fromLockCache socketlock) $ do
- let startps = Param (fromSshHost sshhost) :
- sshparams ++ startSshConnection gc
- -- When we can start the connection in batch mode,
- -- ssh won't prompt to the console.
- (_, connected) <- liftIO $ processTranscript "ssh"
- (["-o", "BatchMode=true"]
- ++ toCommand startps)
- Nothing
- unless connected $ do
- ok <- prompt $ liftIO $
- boolSystem "ssh" startps
- unless ok $
- warning $ "Unable to run git-annex-shell on remote " ++
- Git.repoDescribe (gitConfigRepo (remoteGitConfig gc))
-
--- Parameters to get ssh connected to the remote host,
--- by asking it to run a no-op command.
---
--- Could simply run "true", but the remote host may only
--- allow git-annex-shell to run. So, run git-annex-shell inannex
--- with the path to the remote repository and no other parameters,
--- which is a no-op supported by all versions of git-annex-shell.
-startSshConnection :: RemoteGitConfig -> [CommandParam]
-startSshConnection gc =
- [ Param "git-annex-shell"
- , Param "inannex"
- , File $ Git.repoPath $ gitConfigRepo $
- remoteGitConfig gc
- ]
+ whenM (isNothing <$> fromLockCache socketlock) $
+ -- See if ssh can connect in batch mode,
+ -- if so there's no need to block for a password
+ -- prompt.
+ unlessM (tryssh ["-o", "BatchMode=true"]) $
+ -- ssh needs to prompt (probably)
+ -- If the user enters the wrong password,
+ -- ssh will tell them, so we can ignore
+ -- failure.
+ void $ prompt $ tryssh []
+ -- Try to ssh to the host quietly. Returns True if ssh apparently
+ -- connected to the host successfully. If ssh failed to connect,
+ -- returns False.
+ -- Even if ssh is forced to run some specific command, this will
+ -- return True.
+ -- (Except there's an unlikely false positive where a forced
+ -- ssh command exits 255.)
+ tryssh extraps = liftIO $ withNullHandle $ \nullh -> do
+ let p = proc "ssh" $ concat
+ [ extraps
+ , toCommand sshparams
+ , [fromSshHost sshhost, "true"]
+ ]
+ (Nothing, Nothing, Nothing, pid) <- createProcess $ p
+ { std_out = UseHandle nullh
+ , std_err = UseHandle nullh
+ }
+ exitcode <- waitForProcess pid
+ return $ case exitcode of
+ ExitFailure 255 -> False
+ _ -> True
{- Find ssh socket files.
-
diff --git a/Annex/Transfer.hs b/Annex/Transfer.hs
index 0d013d4..b12b227 100644
--- a/Annex/Transfer.hs
+++ b/Annex/Transfer.hs
@@ -73,9 +73,9 @@ alwaysRunTransfer = runTransfer' True
runTransfer' :: Observable v => Bool -> Transfer -> AssociatedFile -> RetryDecider -> (MeterUpdate -> Annex v) -> Annex v
runTransfer' ignorelock t afile shouldretry transferaction = checkSecureHashes t $ do
info <- liftIO $ startTransferInfo afile
- (meter, tfile, metervar) <- mkProgressUpdater t info
+ (meter, tfile, createtfile, metervar) <- mkProgressUpdater t info
mode <- annexFileMode
- (lck, inprogress) <- prep tfile mode info
+ (lck, inprogress) <- prep tfile createtfile mode
if inprogress && not ignorelock
then do
showNote "transfer already in progress, or unable to take transfer lock"
@@ -88,31 +88,30 @@ runTransfer' ignorelock t afile shouldretry transferaction = checkSecureHashes t
else recordFailedTransfer t info
return v
where
+ prep :: FilePath -> Annex () -> FileMode -> Annex (Maybe LockHandle, Bool)
#ifndef mingw32_HOST_OS
- prep tfile mode info = catchPermissionDenied (const prepfailed) $ do
+ prep tfile createtfile mode = catchPermissionDenied (const prepfailed) $ do
let lck = transferLockFile tfile
createAnnexDirectory $ takeDirectory lck
tryLockExclusive (Just mode) lck >>= \case
Nothing -> return (Nothing, True)
Just lockhandle -> ifM (checkSaneLock lck lockhandle)
( do
- void $ tryIO $
- writeTransferInfoFile info tfile
+ createtfile
return (Just lockhandle, False)
, do
liftIO $ dropLock lockhandle
return (Nothing, True)
)
#else
- prep tfile _mode info = catchPermissionDenied (const prepfailed) $ do
+ prep tfile createtfile _mode = catchPermissionDenied (const prepfailed) $ do
let lck = transferLockFile tfile
createAnnexDirectory $ takeDirectory lck
catchMaybeIO (liftIO $ lockExclusive lck) >>= \case
Nothing -> return (Nothing, False)
Just Nothing -> return (Nothing, True)
Just (Just lockhandle) -> do
- void $ tryIO $
- writeTransferInfoFile info tfile
+ createtfile
return (Just lockhandle, False)
#endif
prepfailed = return (Nothing, False)
diff --git a/Backend/Hash.hs b/Backend/Hash.hs
index da0f7df..e7cf0f9 100644
--- a/Backend/Hash.hs
+++ b/Backend/Hash.hs
@@ -1,10 +1,12 @@
{- git-annex hashing backends
-
- - Copyright 2011-2017 Joey Hess <id@joeyh.name>
+ - Copyright 2011-2018 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
+{-# LANGUAGE CPP #-}
+
module Backend.Hash (
backends,
testKeyBackend,
@@ -28,6 +30,11 @@ data Hash
| SHA2Hash HashSize
| SHA3Hash HashSize
| SkeinHash HashSize
+#if MIN_VERSION_cryptonite(0,23,0)
+ | Blake2bHash HashSize
+ | Blake2sHash HashSize
+ | Blake2spHash HashSize
+#endif
{- Order is slightly significant; want SHA256 first, and more general
- sizes earlier. -}
@@ -36,6 +43,11 @@ hashes = concat
[ map (SHA2Hash . HashSize) [256, 512, 224, 384]
, map (SHA3Hash . HashSize) [256, 512, 224, 384]
, map (SkeinHash . HashSize) [256, 512]
+#if MIN_VERSION_cryptonite(0,23,0)
+ , map (Blake2bHash . HashSize) [256, 512, 160, 224, 384]
+ , map (Blake2sHash . HashSize) [256, 160, 224]
+ , map (Blake2spHash . HashSize) [256, 224]
+#endif
, [SHA1Hash]
, [MD5Hash]
]
@@ -66,6 +78,11 @@ hashKeyVariety SHA1Hash = SHA1Key
hashKeyVariety (SHA2Hash size) = SHA2Key size
hashKeyVariety (SHA3Hash size) = SHA3Key size
hashKeyVariety (SkeinHash size) = SKEINKey size
+#if MIN_VERSION_cryptonite(0,23,0)
+hashKeyVariety (Blake2bHash size) = Blake2bKey size
+hashKeyVariety (Blake2sHash size) = Blake2sKey size
+hashKeyVariety (Blake2spHash size) = Blake2spKey size
+#endif
{- A key is a hash of its contents. -}
keyValue :: Hash -> KeySource -> Annex (Maybe Key)
@@ -94,7 +111,7 @@ selectExtension f
| otherwise = intercalate "." ("":es)
where
es = filter (not . null) $ reverse $
- take 2 $ map (filter validInExtension) $
+ take 2 $ filter (all validInExtension) $
takeWhile shortenough $
reverse $ splitc '.' $ takeExtensions f
shortenough e = length e <= 4 -- long enough for "jpeg"
@@ -168,6 +185,11 @@ hashFile hash file filesize = go hash
go (SHA2Hash hashsize) = usehasher hashsize
go (SHA3Hash hashsize) = use (sha3Hasher hashsize)
go (SkeinHash hashsize) = use (skeinHasher hashsize)
+#if MIN_VERSION_cryptonite(0,23,0)
+ go (Blake2bHash hashsize) = use (blake2bHasher hashsize)
+ go (Blake2sHash hashsize) = use (blake2sHasher hashsize)
+ go (Blake2spHash hashsize) = use (blake2spHasher hashsize)
+#endif
use hasher = liftIO $ do
h <- hasher <$> L.readFile file
@@ -219,6 +241,30 @@ skeinHasher (HashSize hashsize)
| hashsize == 512 = show . skein512
| otherwise = error $ "unsupported SKEIN size " ++ show hashsize
+#if MIN_VERSION_cryptonite(0,23,0)
+blake2bHasher :: HashSize -> (L.ByteString -> String)
+blake2bHasher (HashSize hashsize)
+ | hashsize == 256 = show . blake2b_256
+ | hashsize == 512 = show . blake2b_512
+ | hashsize == 160 = show . blake2b_160
+ | hashsize == 224 = show . blake2b_224
+ | hashsize == 384 = show . blake2b_384
+ | otherwise = error $ "unsupported BLAKE2B size " ++ show hashsize
+
+blake2sHasher :: HashSize -> (L.ByteString -> String)
+blake2sHasher (HashSize hashsize)
+ | hashsize == 256 = show . blake2s_256
+ | hashsize == 160 = show . blake2s_160
+ | hashsize == 224 = show . blake2s_224
+ | otherwise = error $ "unsupported BLAKE2S size " ++ show hashsize
+
+blake2spHasher :: HashSize -> (L.ByteString -> String)
+blake2spHasher (HashSize hashsize)
+ | hashsize == 256 = show . blake2sp_256
+ | hashsize == 224 = show . blake2sp_224
+ | otherwise = error $ "unsupported BLAKE2SP size " ++ show hashsize
+#endif
+
md5Hasher :: L.ByteString -> String
md5Hasher = show . md5
diff --git a/CHANGELOG b/CHANGELOG
index 861db44..16612cb 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,3 +1,54 @@
+git-annex (6.20180316) upstream; urgency=medium
+
+ * New protocol for communicating with git-annex-shell increases speed
+ of operations involving ssh remotes. When not transferring large files,
+ git-annex is between 200% and 400% faster using the new protocol,
+ and it's just as fast as before when transferring large files.
+ (When the remote has an old git-annex-shell, git-annex falls back
+ to the old slower code. This fallback is planned to be removed
+ after 5 years or so.)
+ * Note that, due to not using rsync to transfer files over ssh
+ any longer, permissions and other file metadata of annexed files
+ will no longer be preserved when copying them to and from ssh remotes.
+ Other remotes never supported preserving that information, so
+ this is not considered a regression.
+ * Fix data loss bug in content locking over tor, when the remote
+ repository is in direct mode, it neglected to check that the content
+ was actually present when locking it. This could cause git annex drop
+ to remove the only copy of a file when it thought the tor remote had
+ a copy.
+ * Fix data loss bug when the local repository uses direct mode, and a
+ locally modified file is dropped from a remote repsitory. The bug
+ caused the modified file to be counted as a copy of the original file.
+ (This is not a severe bug because in such a situation, dropping
+ from the remote and then modifying the file is allowed and has the same
+ end result.)
+ * Some downloads will be verified, even when annex.verify=false.
+ This is done in some edge cases where there's a likelyhood than an
+ object was downloaded incorrectly.
+ * Support exporttree=yes for rsync special remotes.
+ * Added backends for the BLAKE2 family of hashes, when built with
+ a new enough version of cryptonite.
+ * Improve SHA*E extension extraction code to not treat parts of the
+ filename that contain punctuation or other non-alphanumeric characters
+ as extensions. Before, such characters were filtered out.
+ * Better ssh connection warmup when using -J for concurrency.
+ Avoids ugly messages when forced ssh command is not git-annex-shell.
+ * Fix race condition in ssh warmup that caused git-annex to get
+ stuck and never process some files when run with high levels of
+ concurrency.
+ * Fix reversion introduced in 6.20171214 that caused concurrent
+ transfers to incorrectly fail with "transfer already in progress".
+ * Note that Remote/Git.hs now contains AGPL licensed code,
+ thus the license of git-annex as a whole is AGPL. This was already
+ the case when git-annex was built with the webapp enabled.
+ * Include amount of data transferred in progress display.
+ * Dial back optimisation when building on arm, which prevents
+ ghc and llc from running out of memory when optimising some files.
+ (Unfortunately this fix is incomplete due to a ghc bug.)
+
+ -- Joey Hess <id@joeyh.name> Fri, 16 Mar 2018 12:10:40 -0400
+
git-annex (6.20180227) upstream; urgency=medium
* inprogress: Avoid showing failures for files not in progress.
diff --git a/COPYRIGHT b/COPYRIGHT
index 4ac78af..966891a 100644
--- a/COPYRIGHT
+++ b/COPYRIGHT
@@ -10,6 +10,10 @@ Copyright: © 2012-2017 Joey Hess <id@joeyh.name>
© 2014 Sören Brunk
License: AGPL-3+
+Files: Remote/Git.hs Remote/Helper/Ssh.hs
+Copyright: © 2011-2018 Joey Hess <id@joeyh.name>
+License: AGPL-3+
+
Files: Remote/Ddar.hs
Copyright: © 2011 Joey Hess <id@joeyh.name>
© 2014 Robie Basak <robie@justgohome.co.uk>
diff --git a/CmdLine/GitAnnexShell.hs b/CmdLine/GitAnnexShell.hs
index 154bfeb..3dc31e6 100644
--- a/CmdLine/GitAnnexShell.hs
+++ b/CmdLine/GitAnnexShell.hs
@@ -1,6 +1,6 @@
{- git-annex-shell main program
-
- - Copyright 2010-2012 Joey Hess <id@joeyh.name>
+ - Copyright 2010-2018 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@@ -28,6 +28,7 @@ import qualified Command.TransferInfo
import qualified Command.Commit
import qualified Command.NotifyChanges
import qualified Command.GCryptSetup
+import qualified Command.P2PStdIO
cmds_readonly :: [Command]
cmds_readonly =
@@ -47,8 +48,18 @@ cmds_notreadonly =
, Command.GCryptSetup.cmd
]
+-- Commands that can operate readonly or not; they use checkNotReadOnly.
+cmds_readonly_capable :: [Command]
+cmds_readonly_capable =
+ [ gitAnnexShellCheck Command.P2PStdIO.cmd
+ ]
+
+cmds_readonly_safe :: [Command]
+cmds_readonly_safe = cmds_readonly ++ cmds_readonly_capable
+
cmds :: [Command]
-cmds = map (adddirparam . noMessages) (cmds_readonly ++ cmds_notreadonly)
+cmds = map (adddirparam . noMessages)
+ (cmds_readonly ++ cmds_notreadonly ++ cmds_readonly_capable)
where
adddirparam c = c { cmdparamdesc = "DIRECTORY " ++ cmdparamdesc c }
@@ -94,7 +105,7 @@ builtins = map cmdname cmds
builtin :: String -> String -> [String] -> IO ()
builtin cmd dir params = do
- unless (cmd `elem` map cmdname cmds_readonly)
+ unless (cmd `elem` map cmdname cmds_readonly_safe)
checkNotReadOnly
checkDirectory $ Just dir
let (params', fieldparams, opts) = partitionParams params
diff --git a/CmdLine/GitAnnexShell/Checks.hs b/CmdLine/GitAnnexShell/Checks.hs
index fcbf14b..3409884 100644
--- a/CmdLine/GitAnnexShell/Checks.hs
+++ b/CmdLine/GitAnnexShell/Checks.hs
@@ -14,17 +14,28 @@ import Annex.Init
import Utility.UserInfo
import Utility.Env
+limitedEnv :: String
+limitedEnv = "GIT_ANNEX_SHELL_LIMITED"
+
checkNotLimited :: IO ()
-checkNotLimited = checkEnv "GIT_ANNEX_SHELL_LIMITED"
+checkNotLimited = checkEnv limitedEnv
+
+readOnlyEnv :: String
+readOnlyEnv = "GIT_ANNEX_SHELL_READONLY"
checkNotReadOnly :: IO ()
-checkNotReadOnly = checkEnv "GIT_ANNEX_SHELL_READONLY"
+checkNotReadOnly = checkEnv readOnlyEnv
checkEnv :: String -> IO ()
-checkEnv var = getEnv var >>= \case
- Nothing -> noop
- Just "" -> noop
- Just _ -> giveup $ "Action blocked by " ++ var
+checkEnv var = checkEnvSet var >>= \case
+ False -> noop
+ True -> giveup $ "Action blocked by " ++ var
+
+checkEnvSet :: String -> IO Bool
+checkEnvSet var = getEnv var >>= return . \case
+ Nothing -> False
+ Just "" -> False
+ Just _ -> True
checkDirectory :: Maybe FilePath -> IO ()
checkDirectory mdir = do
diff --git a/CmdLine/GitRemoteTorAnnex.hs b/CmdLine/GitRemoteTorAnnex.hs
index 8a87797..b2b9b78 100644
--- a/CmdLine/GitRemoteTorAnnex.hs
+++ b/CmdLine/GitRemoteTorAnnex.hs
@@ -57,6 +57,7 @@ connectService address port service = do
myuuid <- getUUID
g <- Annex.gitRepo
conn <- liftIO $ connectPeer g (TorAnnex address port)
- liftIO $ runNetProto conn $ auth myuuid authtoken >>= \case
+ runst <- liftIO $ mkRunState Client
+ liftIO $ runNetProto runst conn $ auth myuuid authtoken noop >>= \case
Just _theiruuid -> connect service stdin stdout
Nothing -> giveup $ "authentication failed, perhaps you need to set " ++ p2pAuthTokenEnv
diff --git a/Command/EnableTor.hs b/Command/EnableTor.hs
index b73d002..85e2000 100644
--- a/Command/EnableTor.hs
+++ b/Command/EnableTor.hs
@@ -21,6 +21,7 @@ import Config.Files
import P2P.IO
import qualified P2P.Protocol as P2P
import Utility.ThreadScheduler
+import RemoteDaemon.Transport.Tor
import Control.Concurrent.Async
import qualified Network.Socket as S
@@ -122,7 +123,8 @@ checkHiddenService = bracket setup cleanup go
, connIhdl = h
, connOhdl = h
}
- void $ runNetProto conn $ P2P.serveAuth u
+ runst <- mkRunState Client
+ void $ runNetProto runst conn $ P2P.serveAuth u
hClose h
haslistener sockfile = catchBoolIO $ do
diff --git a/Command/Export.hs b/Command/Export.hs
index 4e1880c..cb3943e 100644
--- a/Command/Export.hs
+++ b/Command/Export.hs
@@ -220,7 +220,7 @@ performExport r ea db ek af contentsha loc = do
let rollback = void $
performUnexport r ea db [ek] loc
sendAnnex k rollback $ \f ->
- metered Nothing k (return $ Just f) $ \m -> do
+ metered Nothing k (return $ Just f) $ \_ m -> do
let m' = combineMeterUpdate pm m
storer f k loc m'
, do
@@ -228,7 +228,7 @@ performExport r ea db ek af contentsha loc = do
return False
)
-- Sending a non-annexed file.
- GitKey sha1k -> metered Nothing sha1k (return Nothing) $ \m ->
+ GitKey sha1k -> metered Nothing sha1k (return Nothing) $ \_ m ->
withTmpFile "export" $ \tmp h -> do
b <- catObject contentsha
liftIO $ L.hPut h b
diff --git a/Command/LockContent.hs b/Command/LockContent.hs
index 202ba20..1ed8cdf 100644
--- a/Command/LockContent.hs
+++ b/Command/LockContent.hs
@@ -22,9 +22,8 @@ cmd = noCommit $
seek :: CmdParams -> CommandSeek
seek = withWords start
--- First, lock the content. Then, make sure the content is actually
--- present, and print out "OK". Wait for the caller to send a line before
--- dropping the lock.
+-- First, lock the content, then print out "OK".
+-- Wait for the caller to send a line before dropping the lock.
start :: [String] -> CommandStart
start [ks] = do
ok <- lockContentShared k (const locksuccess)
@@ -34,12 +33,9 @@ start [ks] = do
else exitFailure
where
k = fromMaybe (giveup "bad key") (file2key ks)
- locksuccess = ifM (inAnnex k)
- ( liftIO $ do
- putStrLn contentLockedMarker
- hFlush stdout
- _ <- getProtocolLine stdin
- return True
- , return False
- )
+ locksuccess = liftIO $ do
+ putStrLn contentLockedMarker
+ hFlush stdout
+ _ <- getProtocolLine stdin
+ return True
start _ = giveup "Specify exactly 1 key."
diff --git a/Command/Multicast.hs b/Command/Multicast.hs
index 55792a2..e2f6870 100644
--- a/Command/Multicast.hs
+++ b/Command/Multicast.hs
@@ -213,7 +213,7 @@ storeReceived f = do
warning $ "Received a file " ++ f ++ " that is not a git-annex key. Deleting this file."
liftIO $ nukeFile f
Just k -> void $
- getViaTmp' AlwaysVerify k $ \dest -> unVerified $
+ getViaTmpFromDisk AlwaysVerify k $ \dest -> unVerified $
liftIO $ catchBoolIO $ do
rename f dest
return True
diff --git a/Command/P2P.hs b/Command/P2P.hs
index 65a2a67..fecd48d 100644
--- a/Command/P2P.hs
+++ b/Command/P2P.hs
@@ -309,7 +309,9 @@ setupLink remotename (P2PAddressAuth addr authtoken) = do
Left e -> return $ ConnectionError $ "Unable to connect with peer. Please check that the peer is connected to the network, and try again. (" ++ show e ++ ")"
Right conn -> do
u <- getUUID
- go =<< liftIO (runNetProto conn $ P2P.auth u authtoken)
+ let proto = P2P.auth u authtoken noop
+ runst <- liftIO $ mkRunState Client
+ go =<< liftIO (runNetProto runst conn proto)
where
go (Right (Just theiruuid)) = do
ok <- inRepo $ Git.Command.runBool
diff --git a/Command/P2PStdIO.hs b/Command/P2PStdIO.hs
new file mode 100644
index 0000000..38a3eb0
--- /dev/null
+++ b/Command/P2PStdIO.hs
@@ -0,0 +1,41 @@
+{- git-annex command
+ -
+ - Copyright 2018 Joey Hess <id@joeyh.name>
+ -
+ - Licensed under the GNU GPL version 3 or higher.
+ -}
+
+module Command.P2PStdIO where
+
+import Command
+import P2P.IO
+import P2P.Annex
+import qualified P2P.Protocol as P2P
+import qualified Annex
+import Annex.UUID
+import qualified CmdLine.GitAnnexShell.Checks as Checks
+
+cmd :: Command
+cmd = noMessages $ command "p2pstdio" SectionPlumbing
+ "communicate in P2P protocol over stdio"
+ paramUUID (withParams seek)
+
+seek :: CmdParams -> CommandSeek
+seek [u] = commandAction $ start $ toUUID u
+seek _ = giveup "missing UUID parameter"
+
+start :: UUID -> CommandStart
+start theiruuid = do
+ servermode <- liftIO $
+ Checks.checkEnvSet Checks.readOnlyEnv >>= return . \case
+ True -> P2P.ServeReadOnly
+ False -> P2P.ServeReadWrite
+ myuuid <- getUUID
+ conn <- stdioP2PConnection <$> Annex.gitRepo
+ let server = do
+ P2P.net $ P2P.sendMessage (P2P.AUTH_SUCCESS myuuid)
+ P2P.serveAuthed servermode myuuid
+ runst <- liftIO $ mkRunState $ Serving theiruuid Nothing
+ runFullProto runst conn server >>= \case
+ Right () -> next $ next $ return True
+ Left e -> giveup e
diff --git a/Command/ReKey.hs b/Command/ReKey.hs
index aaaaf7e..be67a25 100644
--- a/Command/ReKey.hs
+++ b/Command/ReKey.hs
@@ -79,11 +79,11 @@ perform file oldkey newkey = do
linkKey :: FilePath -> Key -> Key -> Annex Bool
linkKey file oldkey newkey = ifM (isJust <$> isAnnexLink file)
{- If the object file is already hardlinked to elsewhere, a hard
- - link won't be made by getViaTmp', but a copy instead.
+ - link won't be made by getViaTmpFromDisk, but a copy instead.
- This avoids hard linking to content linked to an
- unlocked file, which would leave the new key unlocked
- and vulnerable to corruption. -}
- ( getViaTmp' DefaultVerify newkey $ \tmp -> unVerified $ do
+ ( getViaTmpFromDisk DefaultVerify newkey $ \tmp -> unVerified $ do
oldobj <- calcRepo (gitAnnexLocation oldkey)
linkOrCopy' (return True) newkey oldobj tmp Nothing
, do
diff --git a/Command/TransferInfo.hs b/Command/TransferInfo.hs
index 3f352a8..d93cedd 100644
--- a/Command/TransferInfo.hs
+++ b/Command/TransferInfo.hs
@@ -50,7 +50,8 @@ start (k:[]) = do
, transferKey = key
}
tinfo <- liftIO $ startTransferInfo afile
- (update, tfile, _) <- mkProgressUpdater t tinfo
+ (update, tfile, createtfile, _) <- mkProgressUpdater t tinfo
+ createtfile
liftIO $ mapM_ void
[ tryIO $ forever $ do
bytes <- readUpdate
diff --git a/Command/Unlock.hs b/Command/Unlock.hs
index 221c9f0..eb3942f 100644
--- a/Command/Unlock.hs
+++ b/Command/Unlock.hs
@@ -15,7 +15,6 @@ import Annex.Version
import Annex.Link
import Annex.ReplaceFile
import Utility.CopyFile
-import Utility.FileMode
import Git.FilePath
import qualified Database.Keys
diff --git a/Logs/Transfer.hs b/Logs/Transfer.hs
index 9413f70..aebb249 100644
--- a/Logs/Transfer.hs
+++ b/Logs/Transfer.hs
@@ -47,14 +47,17 @@ percentComplete (Transfer { transferKey = key }) info =
percentage <$> keySize key <*> Just (fromMaybe 0 $ bytesComplete info)
{- Generates a callback that can be called as transfer progresses to update
- - the transfer info file. Also returns the file it'll be updating, and a
- - MVar that can be used to read the number of bytesComplete. -}
-mkProgressUpdater :: Transfer -> TransferInfo -> Annex (MeterUpdate, FilePath, MVar Integer)
+ - the transfer info file. Also returns the file it'll be updating,
+ - an action that sets up the file with appropriate permissions,
+ - which should be run after locking the transfer lock file, but
+ - before using the callback, and a MVar that can be used to read
+ - the number of bytesComplete. -}
+mkProgressUpdater :: Transfer -> TransferInfo -> Annex (MeterUpdate, FilePath, Annex (), MVar Integer)
mkProgressUpdater t info = do
tfile <- fromRepo $ transferFile t
- _ <- tryNonAsync $ writeTransferInfoFile info tfile
+ let createtfile = void $ tryNonAsync $ writeTransferInfoFile info tfile
mvar <- liftIO $ newMVar 0
- return (liftIO . updater tfile mvar, tfile, mvar)
+ return (liftIO . updater tfile mvar, tfile, createtfile, mvar)
where
updater tfile mvar b = modifyMVar_ mvar $ \oldbytes -> do
let newbytes = fromBytesProcessed b
diff --git a/Messages/Progress.hs b/Messages/Progress.hs
index cb924ee..bc7d1a0 100644
--- a/Messages/Progress.hs
+++ b/Messages/Progress.hs
@@ -24,46 +24,52 @@ import qualified System.Console.Concurrent as Console
#endif
{- Shows a progress meter while performing a transfer of a key.
- - The action is passed a callback to use to update the meter.
+ - The action is passed the meter and a callback to use to update the meter.
-
- When the key's size is not known, the srcfile is statted to get the size.
- This allows uploads of keys without size to still have progress
- displayed.
--}
-metered :: Maybe MeterUpdate -> Key -> Annex (Maybe FilePath) -> (MeterUpdate -> Annex a) -> Annex a
+metered :: Maybe MeterUpdate -> Key -> Annex (Maybe FilePath) -> (Meter -> MeterUpdate -> Annex a) -> Annex a
metered othermeter key getsrcfile a = withMessageState $ \st ->
flip go st =<< getsz
where
go _ (MessageState { outputType = QuietOutput }) = nometer
go msize (MessageState { outputType = NormalOutput, concurrentOutputEnabled = False }) = do
showOutput
- meter <- liftIO $ mkMeter msize bandwidthMeter $
- displayMeterHandle stdout
- m <- liftIO $ rateLimitMeterUpdate 0.1 msize $
+ meter <- liftIO $ mkMeter msize $
+ displayMeterHandle stdout bandwidthMeter
+ m <- liftIO $ rateLimitMeterUpdate 0.2 meter $
updateMeter meter
- r <- a (combinemeter m)
+ r <- a meter (combinemeter m)
liftIO $ clearMeterHandle meter stdout
return r
go msize (MessageState { outputType = NormalOutput, concurrentOutputEnabled = True }) =
#if WITH_CONCURRENTOUTPUT
withProgressRegion $ \r -> do
- meter <- liftIO $ mkMeter msize bandwidthMeter $ \_ s ->
- Regions.setConsoleRegion r ('\n' : s)
- m <- liftIO $ rateLimitMeterUpdate 0.1 msize $
+ meter <- liftIO $ mkMeter msize $ \_ msize' old new ->
+ let s = bandwidthMeter msize' old new
+ in Regions.setConsoleRegion r ('\n' : s)
+ m <- liftIO $ rateLimitMeterUpdate 0.2 meter $
updateMeter meter
- a (combinemeter m)
+ a meter (combinemeter m)
#else
nometer
#endif
go msize (MessageState { outputType = JSONOutput jsonoptions })
| jsonProgress jsonoptions = do
buf <- withMessageState $ return . jsonBuffer
- m <- liftIO $ rateLimitMeterUpdate 0.1 msize $
- JSON.progress buf msize
- a (combinemeter m)
+ meter <- liftIO $ mkMeter msize $ \_ msize' _old (new, _now) ->
+ JSON.progress buf msize' new
+ m <- liftIO $ rateLimitMeterUpdate 0.1 meter $
+ updateMeter meter
+ a meter (combinemeter m)
| otherwise = nometer
- nometer = a $ combinemeter (const noop)
+ nometer = do
+ dummymeter <- liftIO $ mkMeter Nothing $
+ \_ _ _ _ -> return ()
+ a dummymeter (combinemeter (const noop))
combinemeter m = case othermeter of
Nothing -> m
@@ -77,29 +83,19 @@ metered othermeter key getsrcfile a = withMessageState $ \st ->
Nothing -> return Nothing
Just f -> catchMaybeIO $ liftIO $ getFileSize f
-{- Use when the command's own progress output is preferred.
- - The command's output will be suppressed and git-annex's progress meter
- - used for concurrent output, and json progress. -}
-commandMetered :: Maybe MeterUpdate -> Key -> Annex (Maybe FilePath) -> (MeterUpdate -> Annex a) -> Annex a
-commandMetered combinemeterupdate key getsrcfile a =
- withMessageState $ \s -> if needOutputMeter s
- then metered combinemeterupdate key getsrcfile a
- else a (fromMaybe nullMeterUpdate combinemeterupdate)
-
{- Poll file size to display meter, but only when concurrent output or
- json progress needs the information. -}
meteredFile :: FilePath -> Maybe MeterUpdate -> Key -> Annex a -> Annex a
meteredFile file combinemeterupdate key a =
withMessageState $ \s -> if needOutputMeter s
- then metered combinemeterupdate key (return Nothing) $ \p ->
+ then metered combinemeterupdate key (return Nothing) $ \_ p ->
watchFileSize file p a
else a
-
-needOutputMeter :: MessageState -> Bool
-needOutputMeter s = case outputType s of
- JSONOutput jsonoptions -> jsonProgress jsonoptions
- NormalOutput | concurrentOutputEnabled s -> True
- _ -> False
+ where
+ needOutputMeter s = case outputType s of
+ JSONOutput jsonoptions -> jsonProgress jsonoptions
+ NormalOutput | concurrentOutputEnabled s -> True
+ _ -> False
{- Progress dots. -}
showProgressDots :: Annex ()
@@ -126,6 +122,11 @@ mkOutputHandler = OutputHandler
<$> commandProgressDisabled
<*> mkStderrEmitter
+mkOutputHandlerQuiet :: Annex OutputHandler
+mkOutputHandlerQuiet = OutputHandler
+ <$> pure True
+ <*> mkStderrEmitter
+
mkStderrRelayer :: Annex (Handle -> IO ())
mkStderrRelayer = do
quiet <- commandProgressDisabled
diff --git a/NEWS b/NEWS
index 484f7db..a127219 100644
--- a/NEWS
+++ b/NEWS
@@ -1,4 +1,14 @@
-git-annex (6.20170228) unstable; urgency=medium
+git-annex (6.20180309) upstream; urgency=medium
+
+ Note that, due to not using rsync to transfer files over ssh
+ any longer, permissions and other file metadata of annexed files
+ will no longer be preserved when copying them to and from ssh remotes.
+ Other remotes never supported preserving that information, so
+ this is not considered a regression.
+
+ -- Joey Hess <id@joeyh.name> Fri, 09 Mar 2018 13:22:47 -0400
+
+git-annex (6.20170228) upstream; urgency=medium
This version of git-annex has mitigations for SHA1 hash collision
problems.
@@ -10,7 +20,7 @@ git-annex (6.20170228) unstable; urgency=medium
-- Joey Hess <id@joeyh.name> Tue, 28 Feb 2017 13:28:50 -0400
-git-annex (6.20170101) unstable; urgency=medium
+git-annex (6.20170101) upstream; urgency=medium
XMPP support has been removed from the assistant in this release.
@@ -20,7 +30,7 @@ git-annex (6.20170101) unstable; urgency=medium
-- Joey Hess <id@joeyh.name> Tue, 27 Dec 2016 16:37:46 -0400
-git-annex (4.20131002) unstable; urgency=low
+git-annex (4.20131002) upstream; urgency=low
The layout of gcrypt repositories has changed, and
if you created one you must manually upgrade it.
@@ -28,7 +38,7 @@ git-annex (4.20131002) unstable; urgency=low
-- Joey Hess <joeyh@debian.org> Tue, 24 Sep 2013 13:55:23 -0400
-git-annex (3.20120123) unstable; urgency=low
+git-annex (3.20120123) upstream; urgency=low
There was a bug in the handling of directory special remotes that
could cause partial file contents to be stored in them. If you use
@@ -39,7 +49,7 @@ git-annex (3.20120123) unstable; urgency=low
-- Joey Hess <joeyh@debian.org> Thu, 19 Jan 2012 15:24:23 -0400
-git-annex (3.20110624) experimental; urgency=low
+git-annex (3.20110624) upstream; urgency=low
There has been another change to the git-annex data store.
Use `git annex upgrade` to migrate your repositories to the new
@@ -56,7 +66,7 @@ git-annex (3.20110624) experimental; urgency=low
-- Joey Hess <joeyh@debian.org> Tue, 21 Jun 2011 20:18:00 -0400
-git-annex (0.20110316) experimental; urgency=low
+git-annex (0.20110316) upstream; urgency=low
This version reorganises the layout of git-annex's files in your repository.
There is an upgrade process to convert a repository from the old git-annex
diff --git a/P2P/Annex.hs b/P2P/Annex.hs
index 82f669e..8c6bebf 100644
--- a/P2P/Annex.hs
+++ b/P2P/Annex.hs
@@ -1,52 +1,42 @@
{- P2P protocol, Annex implementation
-
- - Copyright 2016 Joey Hess <id@joeyh.name>
+ - Copyright 2016-2018 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
-{-# LANGUAGE RankNTypes, FlexibleContexts, CPP #-}
+{-# LANGUAGE RankNTypes, FlexibleContexts #-}
module P2P.Annex
- ( RunMode(..)
+ ( RunState(..)
+ , mkRunState
, P2PConnection(..)
, runFullProto
- , torSocketFile
) where
import Annex.Common
import Annex.Content
import Annex.Transfer
import Annex.ChangedRefs
-import P2P.Address
import P2P.Protocol
import P2P.IO
import Logs.Location
import Types.NumCopies
import Utility.Metered
-import Utility.Tor
-import Annex.UUID
import Control.Monad.Free
-#ifndef mingw32_HOST_OS
-import System.Posix.User
-#endif
-
-data RunMode
- = Serving UUID (Maybe ChangedRefsHandle)
- | Client
-- Full interpreter for Proto, that can receive and send objects.
-runFullProto :: RunMode -> P2PConnection -> Proto a -> Annex (Either String a)
-runFullProto runmode conn = go
+runFullProto :: RunState -> P2PConnection -> Proto a -> Annex (Either String a)
+runFullProto runst conn = go
where
go :: RunProto Annex
go (Pure v) = return (Right v)
- go (Free (Net n)) = runNet conn go n
- go (Free (Local l)) = runLocal runmode go l
+ go (Free (Net n)) = runNet runst conn go n
+ go (Free (Local l)) = runLocal runst go l
-runLocal :: RunMode -> RunProto Annex -> LocalF (Proto a) -> Annex (Either String a)
-runLocal runmode runner a = case a of
+runLocal :: RunState -> RunProto Annex -> LocalF (Proto a) -> Annex (Either String a)
+runLocal runst runner a = case a of
TmpContentSize k next -> do
tmp <- fromRepo $ gitAnnexTmpObjectLocation k
size <- liftIO $ catchDefaultIO 0 $ getFileSize tmp
@@ -61,14 +51,10 @@ runLocal runmode runner a = case a of
ReadContent k af o sender next -> do
v <- tryNonAsync $ prepSendAnnex k
case v of
- -- The check can detect if the file
- -- changed while it was transferred, but we don't
- -- use it. Instead, the receiving peer must
- -- AlwaysVerify the content it receives.
- Right (Just (f, _check)) -> do
+ Right (Just (f, checkchanged)) -> do
v' <- tryNonAsync $
transfer upload k af $
- sinkfile f o sender
+ sinkfile f o checkchanged sender
case v' of
Left e -> return (Left (show e))
Right (Left e) -> return (Left (show e))
@@ -76,16 +62,16 @@ runLocal runmode runner a = case a of
-- content not available
Right Nothing -> runner (next False)
Left e -> return (Left (show e))
- StoreContent k af o l getb next -> do
+ StoreContent k af o l getb validitycheck next -> do
ok <- flip catchNonAsync (const $ return False) $
transfer download k af $ \p ->
- getViaTmp AlwaysVerify k $ \tmp ->
- unVerified $ storefile tmp o l getb p
- runner (next ok)
- StoreContentTo dest o l getb next -> do
- ok <- flip catchNonAsync (const $ return False) $
- storefile dest o l getb nullMeterUpdate
+ getViaTmp DefaultVerify k $ \tmp -> do
+ storefile tmp o l getb validitycheck p
runner (next ok)
+ StoreContentTo dest o l getb validitycheck next -> do
+ res <- flip catchNonAsync (const $ return (False, UnVerified)) $
+ storefile dest o l getb validitycheck nullMeterUpdate
+ runner (next res)
SetPresent k u next -> do
v <- tryNonAsync $ logChange k u InfoPresent
case v of
@@ -120,36 +106,51 @@ runLocal runmode runner a = case a of
protoaction False
next
Right _ -> runner next
- WaitRefChange next -> case runmode of
- Serving _ (Just h) -> do
+ WaitRefChange next -> case runst of
+ Serving _ (Just h) _ -> do
v <- tryNonAsync $ liftIO $ waitChangedRefs h
case v of
Left e -> return (Left (show e))
Right changedrefs -> runner (next changedrefs)
_ -> return $ Left "change notification not available"
+ UpdateMeterTotalSize m sz next -> do
+ liftIO $ setMeterTotalSize m sz
+ runner next
+ RunValidityCheck check next -> runner . next =<< check
where
- transfer mk k af ta = case runmode of
+ transfer mk k af ta = case runst of
-- Update transfer logs when serving.
- Serving theiruuid _ ->
+ Serving theiruuid _ _ ->
mk theiruuid k af noRetry ta noNotification
-- Transfer logs are updated higher in the stack when
-- a client.
- Client -> ta nullMeterUpdate
+ Client _ -> ta nullMeterUpdate
- storefile dest (Offset o) (Len l) getb p = do
+ storefile dest (Offset o) (Len l) getb validitycheck p = do
let p' = offsetMeterUpdate p (toBytesProcessed o)
v <- runner getb
case v of
- Right b -> liftIO $ do
- withBinaryFile dest ReadWriteMode $ \h -> do
+ Right b -> do
+ liftIO $ withBinaryFile dest ReadWriteMode $ \h -> do
when (o /= 0) $
hSeek h AbsoluteSeek o
meteredWrite p' h b
- sz <- getFileSize dest
- return (toInteger sz == l + o)
+ rightsize <- do
+ sz <- liftIO $ getFileSize dest
+ return (toInteger sz == l + o)
+
+ runner validitycheck >>= \case
+ Right (Just Valid) ->
+ return (rightsize, UnVerified)
+ _ -> do
+ -- Invalid, or old protocol
+ -- version. Validity is not
+ -- known. Force content
+ -- verification.
+ return (rightsize, MustVerify)
Left e -> error e
- sinkfile f (Offset o) sender p = bracket setup cleanup go
+ sinkfile f (Offset o) checkchanged sender p = bracket setup cleanup go
where
setup = liftIO $ openBinaryFile f ReadMode
cleanup = liftIO . hClose
@@ -158,15 +159,8 @@ runLocal runmode runner a = case a of
when (o /= 0) $
liftIO $ hSeek h AbsoluteSeek o
b <- liftIO $ hGetContentsMetered h p'
- runner (sender b)
-
-torSocketFile :: Annex (Maybe FilePath)
-torSocketFile = do
- u <- getUUID
- let ident = fromUUID u
-#ifndef mingw32_HOST_OS
- uid <- liftIO getRealUserID
-#else
- let uid = 0
-#endif
- liftIO $ getHiddenServiceSocketFile torAppName uid ident
+ let validitycheck = local $ runValidityCheck $
+ checkchanged >>= return . \case
+ False -> Invalid
+ True -> Valid
+ runner (sender b validitycheck)
diff --git a/P2P/IO.hs b/P2P/IO.hs
index 9ebb102..b89f84d 100644
--- a/P2P/IO.hs
+++ b/P2P/IO.hs
@@ -9,7 +9,11 @@
module P2P.IO
( RunProto
+ , RunState(..)
+ , mkRunState
, P2PConnection(..)
+ , ClosableConnection(..)
+ , stdioP2PConnection
, connectPeer
, closeConnection
, serveUnixSocket
@@ -28,6 +32,8 @@ import Utility.SimpleProtocol
import Utility.Metered
import Utility.Tor
import Utility.FileMode
+import Types.UUID
+import Annex.ChangedRefs
import Control.Monad.Free
import Control.Monad.IO.Class
@@ -35,6 +41,7 @@ import System.Exit (ExitCode(..))
import Network.Socket
import Control.Concurrent
import Control.Concurrent.Async
+import Control.Concurrent.STM
import qualified Data.ByteString as B
import qualified Data.ByteString.Lazy as L
import System.Log.Logger (debugM)
@@ -43,6 +50,15 @@ import qualified Network.Socket as S
-- Type of interpreters of the Proto free monad.
type RunProto m = forall a. (MonadIO m, MonadMask m) => Proto a -> m (Either String a)
+data RunState
+ = Serving UUID (Maybe ChangedRefsHandle) (TVar ProtocolVersion)
+ | Client (TVar ProtocolVersion)
+
+mkRunState :: (TVar ProtocolVersion -> RunState) -> IO RunState
+mkRunState mk = do
+ tvar <- newTVarIO defaultProtocolVersion
+ return (mk tvar)
+
data P2PConnection = P2PConnection
{ connRepo :: Repo
, connCheckAuth :: (AuthToken -> Bool)
@@ -50,6 +66,19 @@ data P2PConnection = P2PConnection
, connOhdl :: Handle
}
+data ClosableConnection conn
+ = OpenConnection conn
+ | ClosedConnection
+
+-- P2PConnection using stdio.
+stdioP2PConnection :: Git.Repo -> P2PConnection
+stdioP2PConnection g = P2PConnection
+ { connRepo = g
+ , connCheckAuth = const False
+ , connIhdl = stdin
+ , connOhdl = stdout
+ }
+
-- Opens a connection to a peer. Does not authenticate with it.
connectPeer :: Git.Repo -> P2PAddress -> IO P2PConnection
connectPeer g (TorAnnex onionaddress onionport) = do
@@ -106,20 +135,20 @@ setupHandle s = do
-- This only runs Net actions. No Local actions will be run
-- (those need the Annex monad) -- if the interpreter reaches any,
-- it returns Nothing.
-runNetProto :: P2PConnection -> Proto a -> IO (Either String a)
-runNetProto conn = go
+runNetProto :: RunState -> P2PConnection -> Proto a -> IO (Either String a)
+runNetProto runst conn = go
where
go :: RunProto IO
go (Pure v) = return (Right v)
- go (Free (Net n)) = runNet conn go n
+ go (Free (Net n)) = runNet runst conn go n
go (Free (Local _)) = return (Left "unexpected annex operation attempted")
-- Interpreter of the Net part of Proto.
--
-- An interpreter of Proto has to be provided, to handle the rest of Proto
-- actions.
-runNet :: (MonadIO m, MonadMask m) => P2PConnection -> RunProto m -> NetF (Proto a) -> m (Either String a)
-runNet conn runner f = case f of
+runNet :: (MonadIO m, MonadMask m) => RunState -> P2PConnection -> RunProto m -> NetF (Proto a) -> m (Either String a)
+runNet runst conn runner f = case f of
SendMessage m next -> do
v <- liftIO $ tryNonAsync $ do
let l = unwords (formatMessage m)
@@ -137,11 +166,8 @@ runNet conn runner f = case f of
Right (Just l) -> case parseMessage l of
Just m -> do
liftIO $ debugMessage "P2P <" m
- runner (next m)
- Nothing -> runner $ do
- let e = ERROR $ "protocol parse error: " ++ show l
- net $ sendMessage e
- next e
+ runner (next (Just m))
+ Nothing -> runner (next Nothing)
SendBytes len b p next -> do
v <- liftIO $ tryNonAsync $ do
ok <- sendExactly len b (connOhdl conn) p
@@ -169,11 +195,19 @@ runNet conn runner f = case f of
case v of
Left e -> return (Left e)
Right () -> runner next
+ SetProtocolVersion v next -> do
+ liftIO $ atomically $ writeTVar versiontvar v
+ runner next
+ GetProtocolVersion next ->
+ liftIO (readTVarIO versiontvar) >>= runner . next
where
-- This is only used for running Net actions when relaying,
-- so it's ok to use runNetProto, despite it not supporting
-- all Proto actions.
- runnerio = runNetProto conn
+ runnerio = runNetProto runst conn
+ versiontvar = case runst of
+ Serving _ _ tv -> tv
+ Client tv -> tv
debugMessage :: String -> Message -> IO ()
debugMessage prefix m = debugM "p2p" $
diff --git a/P2P/Protocol.hs b/P2P/Protocol.hs
index e749791..944c819 100644
--- a/P2P/Protocol.hs
+++ b/P2P/Protocol.hs
@@ -1,6 +1,8 @@
{- P2P protocol
-
- - Copyright 2016 Joey Hess <id@joeyh.name>
+ - See doc/design/p2p_protocol.mdwn
+ -
+ - Copyright 2016-2018 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@@ -12,8 +14,10 @@
module P2P.Protocol where
import qualified Utility.SimpleProtocol as Proto
+import Types (Annex)
import Types.Key
import Types.UUID
+import Types.Remote (Verification(..), unVerified)
import Utility.AuthToken
import Utility.Applicative
import Utility.PartialPrelude
@@ -39,16 +43,29 @@ newtype Offset = Offset Integer
newtype Len = Len Integer
deriving (Show)
--- | Service as used by the connect message is gitremote-helpers(1)
+newtype ProtocolVersion = ProtocolVersion Integer
+ deriving (Show, Eq, Ord)
+
+defaultProtocolVersion :: ProtocolVersion
+defaultProtocolVersion = ProtocolVersion 0
+
+maxProtocolVersion :: ProtocolVersion
+maxProtocolVersion = ProtocolVersion 1
+
+-- | Service as used by the connect message in gitremote-helpers(1)
data Service = UploadPack | ReceivePack
deriving (Show)
+data Validity = Valid | Invalid
+ deriving (Show)
+
-- | Messages in the protocol. The peer that makes the connection
-- always initiates requests, and the other peer makes responses to them.
data Message
= AUTH UUID AuthToken -- uuid of the peer that is authenticating
| AUTH_SUCCESS UUID -- uuid of the remote peer
| AUTH_FAILURE
+ | VERSION ProtocolVersion
| CONNECT Service
| CONNECTDONE ExitCode
| NOTIFYCHANGE
@@ -64,6 +81,7 @@ data Message
| SUCCESS
| FAILURE
| DATA Len -- followed by bytes of data
+ | VALIDITY Validity
| ERROR String
deriving (Show)
@@ -71,6 +89,7 @@ instance Proto.Sendable Message where
formatMessage (AUTH uuid authtoken) = ["AUTH", Proto.serialize uuid, Proto.serialize authtoken]
formatMessage (AUTH_SUCCESS uuid) = ["AUTH-SUCCESS", Proto.serialize uuid]
formatMessage AUTH_FAILURE = ["AUTH-FAILURE"]
+ formatMessage (VERSION v) = ["VERSION", Proto.serialize v]
formatMessage (CONNECT service) = ["CONNECT", Proto.serialize service]
formatMessage (CONNECTDONE exitcode) = ["CONNECTDONE", Proto.serialize exitcode]
formatMessage NOTIFYCHANGE = ["NOTIFYCHANGE"]
@@ -85,6 +104,8 @@ instance Proto.Sendable Message where
formatMessage ALREADY_HAVE = ["ALREADY-HAVE"]
formatMessage SUCCESS = ["SUCCESS"]
formatMessage FAILURE = ["FAILURE"]
+ formatMessage (VALIDITY Valid) = ["VALID"]
+ formatMessage (VALIDITY Invalid) = ["INVALID"]
formatMessage (DATA len) = ["DATA", Proto.serialize len]
formatMessage (ERROR err) = ["ERROR", Proto.serialize err]
@@ -92,6 +113,7 @@ instance Proto.Receivable Message where
parseCommand "AUTH" = Proto.parse2 AUTH
parseCommand "AUTH-SUCCESS" = Proto.parse1 AUTH_SUCCESS
parseCommand "AUTH-FAILURE" = Proto.parse0 AUTH_FAILURE
+ parseCommand "VERSION" = Proto.parse1 VERSION
parseCommand "CONNECT" = Proto.parse1 CONNECT
parseCommand "CONNECTDONE" = Proto.parse1 CONNECTDONE
parseCommand "NOTIFYCHANGE" = Proto.parse0 NOTIFYCHANGE
@@ -108,8 +130,14 @@ instance Proto.Receivable Message where
parseCommand "FAILURE" = Proto.parse0 FAILURE
parseCommand "DATA" = Proto.parse1 DATA
parseCommand "ERROR" = Proto.parse1 ERROR
+ parseCommand "VALID" = Proto.parse0 (VALIDITY Valid)
+ parseCommand "INVALID" = Proto.parse0 (VALIDITY Invalid)
parseCommand _ = Proto.parseFail
+instance Proto.Serializable ProtocolVersion where
+ serialize (ProtocolVersion n) = show n
+ deserialize = ProtocolVersion <$$> readish
+
instance Proto.Serializable Offset where
serialize (Offset n) = show n
deserialize = Offset <$$> readish
@@ -173,7 +201,7 @@ local = hoistFree Local
data NetF c
= SendMessage Message c
- | ReceiveMessage (Message -> c)
+ | ReceiveMessage (Maybe Message -> c)
| SendBytes Len L.ByteString MeterUpdate c
-- ^ Sends exactly Len bytes of data. (Any more or less will
-- confuse the receiver.)
@@ -190,6 +218,9 @@ data NetF c
-- peer, while at the same time accepting input from the peer
-- which is sent the the second RelayHandle. Continues until
-- the peer sends an ExitCode.
+ | SetProtocolVersion ProtocolVersion c
+ --- ^ Called when a new protocol version has been negotiated.
+ | GetProtocolVersion (ProtocolVersion -> c)
deriving (Functor)
type Net = Free NetF
@@ -205,25 +236,26 @@ data LocalF c
| ContentSize Key (Maybe Len -> c)
-- ^ Gets size of the content of a key, when the full content is
-- present.
- | ReadContent Key AssociatedFile Offset (L.ByteString -> Proto Bool) (Bool -> c)
+ | ReadContent Key AssociatedFile Offset (L.ByteString -> Proto Validity -> Proto Bool) (Bool -> c)
-- ^ Reads the content of a key and sends it to the callback.
- -- Note that the content may change while it's being sent.
-- If the content is not available, sends L.empty to the callback.
- | StoreContent Key AssociatedFile Offset Len (Proto L.ByteString) (Bool -> c)
+ -- Note that the content may change while it's being sent.
+ -- The callback is passed a validity check that it can run after
+ -- sending the content to detect when this happened.
+ | StoreContent Key AssociatedFile Offset Len (Proto L.ByteString) (Proto (Maybe Validity)) (Bool -> c)
-- ^ Stores content to the key's temp file starting at an offset.
-- Once the whole content of the key has been stored, moves the
-- temp file into place as the content of the key, and returns True.
--
- -- Note: The ByteString may not contain the entire remaining content
- -- of the key. Only once the temp file size == Len has the whole
- -- content been transferred.
- | StoreContentTo FilePath Offset Len (Proto L.ByteString) (Bool -> c)
- -- ^ Stores the content to a temp file starting at an offset.
- -- Once the whole content of the key has been stored, returns True.
+ -- If the validity check is provided and fails, the content was
+ -- changed while it was being sent, so verificiation of the
+ -- received content should be forced.
--
-- Note: The ByteString may not contain the entire remaining content
-- of the key. Only once the temp file size == Len has the whole
-- content been transferred.
+ | StoreContentTo FilePath Offset Len (Proto L.ByteString) (Proto (Maybe Validity)) ((Bool, Verification) -> c)
+ -- ^ Like StoreContent, but stores the content to a temp file.
| SetPresent Key UUID c
| CheckContentPresent Key (Bool -> c)
-- ^ Checks if the whole content of the key is locally present.
@@ -233,10 +265,15 @@ data LocalF c
| TryLockContent Key (Bool -> Proto ()) c
-- ^ Try to lock the content of a key, preventing it
-- from being deleted, while running the provided protocol
- -- action. If unable to lock the content, runs the protocol action
- -- with False.
+ -- action. If unable to lock the content, or the content is not
+ -- present, runs the protocol action with False.
| WaitRefChange (ChangedRefs -> c)
- -- ^ Waits for one or more git refs to change and returns them.
+ -- ^ Waits for one or more git refs to change and returns them.a
+ | UpdateMeterTotalSize Meter Integer c
+ -- ^ Updates the total size of a Meter, for cases where the size is
+ -- not known until the data is being received.
+ | RunValidityCheck (Annex Validity) (Validity -> c)
+ -- ^ Runs a deferred validity check.
deriving (Functor)
type Local = Free LocalF
@@ -245,17 +282,33 @@ type Local = Free LocalF
$(makeFree ''NetF)
$(makeFree ''LocalF)
-auth :: UUID -> AuthToken -> Proto (Maybe UUID)
-auth myuuid t = do
+auth :: UUID -> AuthToken -> Proto () -> Proto (Maybe UUID)
+auth myuuid t a = do
net $ sendMessage (AUTH myuuid t)
+ postAuth a
+
+postAuth :: Proto () -> Proto (Maybe UUID)
+postAuth a = do
r <- net receiveMessage
case r of
- AUTH_SUCCESS theiruuid -> return $ Just theiruuid
- AUTH_FAILURE -> return Nothing
+ Just (AUTH_SUCCESS theiruuid) -> do
+ a
+ return $ Just theiruuid
+ Just AUTH_FAILURE -> return Nothing
_ -> do
net $ sendMessage (ERROR "auth failed")
return Nothing
+negotiateProtocolVersion :: ProtocolVersion -> Proto ()
+negotiateProtocolVersion preferredversion = do
+ net $ sendMessage (VERSION preferredversion)
+ r <- net receiveMessage
+ case r of
+ Just (VERSION v) -> net $ setProtocolVersion v
+ -- Old server doesn't know about the VERSION command.
+ Just (ERROR _) -> return ()
+ _ -> net $ sendMessage (ERROR "expected VERSION")
+
checkPresent :: Key -> Proto Bool
checkPresent key = do
net $ sendMessage (CHECKPRESENT key)
@@ -286,8 +339,9 @@ remove key = do
net $ sendMessage (REMOVE key)
checkSuccess
-get :: FilePath -> Key -> AssociatedFile -> MeterUpdate -> Proto Bool
-get dest key af p = receiveContent p sizer storer (\offset -> GET offset af key)
+get :: FilePath -> Key -> AssociatedFile -> Meter -> MeterUpdate -> Proto (Bool, Verification)
+get dest key af m p =
+ receiveContent (Just m) p sizer storer (\offset -> GET offset af key)
where
sizer = fileSize dest
storer = storeContentTo dest
@@ -297,10 +351,10 @@ put key af p = do
net $ sendMessage (PUT af key)
r <- net receiveMessage
case r of
- PUT_FROM offset -> sendContent key af offset p
- ALREADY_HAVE -> return True
+ Just (PUT_FROM offset) -> sendContent key af offset p
+ Just ALREADY_HAVE -> return True
_ -> do
- net $ sendMessage (ERROR "expected PUT_FROM")
+ net $ sendMessage (ERROR "expected PUT_FROM or ALREADY_HAVE")
return False
data ServerHandler a
@@ -311,13 +365,19 @@ data ServerHandler a
-- Server loop, getting messages from the client and handling them
serverLoop :: (Message -> Proto (ServerHandler a)) -> Proto (Maybe a)
serverLoop a = do
- cmd <- net receiveMessage
- case cmd of
+ mcmd <- net receiveMessage
+ case mcmd of
-- When the client sends ERROR to the server, the server
-- gives up, since it's not clear what state the client
-- is in, and so not possible to recover.
- ERROR _ -> return Nothing
- _ -> do
+ Just (ERROR _) -> return Nothing
+ -- When the client sends an unparseable message, the server
+ -- responds with an error message, and loops. This allows
+ -- expanding the protocol with new messages.
+ Nothing -> do
+ net $ sendMessage (ERROR "unknown command")
+ serverLoop a
+ Just cmd -> do
v <- a cmd
case v of
ServerGot r -> return (Just r)
@@ -351,43 +411,64 @@ serveAuth myuuid = serverLoop handler
return ServerContinue
handler _ = return ServerUnexpected
+data ServerMode = ServeReadOnly | ServeReadWrite
+
-- | Serve the protocol, with a peer that has authenticated.
-serveAuthed :: UUID -> Proto ()
-serveAuthed myuuid = void $ serverLoop handler
+serveAuthed :: ServerMode -> UUID -> Proto ()
+serveAuthed servermode myuuid = void $ serverLoop handler
where
+ readonlyerror = net $ sendMessage (ERROR "this repository is read-only; write access denied")
+ handler (VERSION theirversion) = do
+ let v = min theirversion maxProtocolVersion
+ net $ setProtocolVersion v
+ net $ sendMessage (VERSION v)
+ return ServerContinue
handler (LOCKCONTENT key) = do
local $ tryLockContent key $ \locked -> do
sendSuccess locked
when locked $ do
r' <- net receiveMessage
case r' of
- UNLOCKCONTENT -> return ()
+ Just UNLOCKCONTENT -> return ()
_ -> net $ sendMessage (ERROR "expected UNLOCKCONTENT")
return ServerContinue
handler (CHECKPRESENT key) = do
sendSuccess =<< local (checkContentPresent key)
return ServerContinue
- handler (REMOVE key) = do
- sendSuccess =<< local (removeContent key)
- return ServerContinue
- handler (PUT af key) = do
- have <- local $ checkContentPresent key
- if have
- then net $ sendMessage ALREADY_HAVE
- else do
- let sizer = tmpContentSize key
- let storer = storeContent key af
- ok <- receiveContent nullMeterUpdate sizer storer PUT_FROM
- when ok $
- local $ setPresent key myuuid
- return ServerContinue
+ handler (REMOVE key) = case servermode of
+ ServeReadWrite -> do
+ sendSuccess =<< local (removeContent key)
+ return ServerContinue
+ ServeReadOnly -> do
+ readonlyerror
+ return ServerContinue
+ handler (PUT af key) = case servermode of
+ ServeReadWrite -> do
+ have <- local $ checkContentPresent key
+ if have
+ then net $ sendMessage ALREADY_HAVE
+ else do
+ let sizer = tmpContentSize key
+ let storer = \o l b v -> unVerified $
+ storeContent key af o l b v
+ (ok, _v) <- receiveContent Nothing nullMeterUpdate sizer storer PUT_FROM
+ when ok $
+ local $ setPresent key myuuid
+ return ServerContinue
+ ServeReadOnly -> do
+ readonlyerror
+ return ServerContinue
handler (GET offset key af) = do
void $ sendContent af key offset nullMeterUpdate
-- setPresent not called because the peer may have
-- requested the data but not permanently stored it.
return ServerContinue
handler (CONNECT service) = do
- net $ relayService service
+ let goahead = net $ relayService service
+ case (servermode, service) of
+ (ServeReadWrite, _) -> goahead
+ (ServeReadOnly, UploadPack) -> goahead
+ (ServeReadOnly, ReceivePack) -> readonlyerror
-- After connecting to git, there may be unconsumed data
-- from the git processes hanging around (even if they
-- exited successfully), so stop serving this connection.
@@ -401,42 +482,63 @@ serveAuthed myuuid = void $ serverLoop handler
sendContent :: Key -> AssociatedFile -> Offset -> MeterUpdate -> Proto Bool
sendContent key af offset@(Offset n) p = go =<< local (contentSize key)
where
- go Nothing = sender (Len 0) L.empty
+ go Nothing = sender (Len 0) L.empty (return Valid)
go (Just (Len totallen)) = do
let len = totallen - n
if len <= 0
- then sender (Len 0) L.empty
+ then sender (Len 0) L.empty (return Valid)
else local $ readContent key af offset $
sender (Len len)
- sender len content = do
+ sender len content validitycheck = do
let p' = offsetMeterUpdate p (toBytesProcessed n)
net $ sendMessage (DATA len)
net $ sendBytes len content p'
+ ver <- net getProtocolVersion
+ when (ver >= ProtocolVersion 1) $
+ net . sendMessage . VALIDITY =<< validitycheck
checkSuccess
-receiveContent :: MeterUpdate -> Local Len -> (Offset -> Len -> Proto L.ByteString -> Local Bool) -> (Offset -> Message) -> Proto Bool
-receiveContent p sizer storer mkmsg = do
+receiveContent
+ :: Maybe Meter
+ -> MeterUpdate
+ -> Local Len
+ -> (Offset -> Len -> Proto L.ByteString -> Proto (Maybe Validity) -> Local (Bool, Verification))
+ -> (Offset -> Message)
+ -> Proto (Bool, Verification)
+receiveContent mm p sizer storer mkmsg = do
Len n <- local sizer
let p' = offsetMeterUpdate p (toBytesProcessed n)
let offset = Offset n
net $ sendMessage (mkmsg offset)
r <- net receiveMessage
case r of
- DATA len -> do
- ok <- local $ storer offset len
+ Just (DATA len@(Len l)) -> do
+ local $ case mm of
+ Nothing -> return ()
+ Just m -> updateMeterTotalSize m (n+l)
+ ver <- net getProtocolVersion
+ let validitycheck = if ver >= ProtocolVersion 1
+ then net receiveMessage >>= \case
+ Just (VALIDITY v) -> return (Just v)
+ _ -> do
+ net $ sendMessage (ERROR "expected VALID or INVALID")
+ return Nothing
+ else return Nothing
+ (ok, v) <- local $ storer offset len
(net (receiveBytes len p'))
+ validitycheck
sendSuccess ok
- return ok
+ return (ok, v)
_ -> do
net $ sendMessage (ERROR "expected DATA")
- return False
+ return (False, UnVerified)
checkSuccess :: Proto Bool
checkSuccess = do
ack <- net receiveMessage
case ack of
- SUCCESS -> return True
- FAILURE -> return False
+ Just SUCCESS -> return True
+ Just FAILURE -> return False
_ -> do
net $ sendMessage (ERROR "expected SUCCESS or FAILURE")
return False
@@ -450,7 +552,7 @@ notifyChange = do
net $ sendMessage NOTIFYCHANGE
ack <- net receiveMessage
case ack of
- CHANGED rs -> return (Just rs)
+ Just (CHANGED rs) -> return (Just rs)
_ -> do
net $ sendMessage (ERROR "expected CHANGED")
return Nothing
@@ -470,8 +572,8 @@ relayFromPeer :: Net RelayData
relayFromPeer = do
r <- receiveMessage
case r of
- CONNECTDONE exitcode -> return $ RelayDone exitcode
- DATA len -> RelayFromPeer <$> receiveBytes len nullMeterUpdate
+ Just (CONNECTDONE exitcode) -> return $ RelayDone exitcode
+ Just (DATA len) -> RelayFromPeer <$> receiveBytes len nullMeterUpdate
_ -> do
sendMessage $ ERROR "expected DATA or CONNECTDONE"
return $ RelayDone $ ExitFailure 1
diff --git a/Remote/GCrypt.hs b/Remote/GCrypt.hs
index 52ae5e1..15ddfdb 100644
--- a/Remote/GCrypt.hs
+++ b/Remote/GCrypt.hs
@@ -50,6 +50,7 @@ import Utility.Tmp
import Logs.Remote
import Utility.Gpg
import Utility.SshHost
+import Messages.Progress
remote :: RemoteType
remote = RemoteType
@@ -338,9 +339,11 @@ store r rsyncopts
Remote.Directory.finalizeStoreGeneric tmpdir destdir
return True
| Git.repoIsSsh (repo r) = if accessShell r
- then fileStorer $ \k f p -> Ssh.rsyncHelper (Just p)
- =<< Ssh.rsyncParamsRemote False r Upload k f
- (AssociatedFile Nothing)
+ then fileStorer $ \k f p -> do
+ oh <- mkOutputHandler
+ Ssh.rsyncHelper oh (Just p)
+ =<< Ssh.rsyncParamsRemote False r Upload k f
+ (AssociatedFile Nothing)
else fileStorer $ Remote.Rsync.store rsyncopts
| otherwise = unsupportedUrl
@@ -353,7 +356,8 @@ retrieve r rsyncopts
then fileRetriever $ \f k p -> do
ps <- Ssh.rsyncParamsRemote False r Download k f
(AssociatedFile Nothing)
- unlessM (Ssh.rsyncHelper (Just p) ps) $
+ oh <- mkOutputHandler
+ unlessM (Ssh.rsyncHelper oh (Just p) ps) $
giveup "rsync failed"
else fileRetriever $ Remote.Rsync.retrieve rsyncopts
| otherwise = unsupportedUrl
diff --git a/Remote/Git.hs b/Remote/Git.hs
index 2cebcce..526a0f4 100644
--- a/Remote/Git.hs
+++ b/Remote/Git.hs
@@ -2,7 +2,7 @@
-
- Copyright 2011-2018 Joey Hess <id@joeyh.name>
-
- - Licensed under the GNU GPL version 3 or higher.
+ - Licensed under the GNU AGPL version 3 or higher.
-}
{-# LANGUAGE CPP #-}
@@ -54,12 +54,13 @@ import Remote.Helper.Export
import qualified Remote.Helper.Ssh as Ssh
import qualified Remote.GCrypt
import qualified Remote.P2P
+import qualified Remote.Helper.P2P as P2PHelper
import P2P.Address
import Annex.Path
import Creds
-import Messages.Progress
import Types.NumCopies
import Annex.Action
+import Messages.Progress
import Control.Concurrent
import Control.Concurrent.MSampleVar
@@ -146,23 +147,23 @@ gen r u c gc
| Git.GCrypt.isEncrypted r = Remote.GCrypt.chainGen r u c gc
| otherwise = case repoP2PAddress r of
Nothing -> do
- duc <- mkDeferredUUIDCheck r u gc
- go duc <$> remoteCost gc defcst
+ st <- mkState r u gc
+ go st <$> remoteCost gc defcst
Just addr -> Remote.P2P.chainGen addr r u c gc
where
defcst = if repoCheap r then cheapRemoteCost else expensiveRemoteCost
- go duc cst = Just new
+ go st cst = Just new
where
new = Remote
{ uuid = u
, cost = cst
, name = Git.repoDescribe r
- , storeKey = copyToRemote new duc
- , retrieveKeyFile = copyFromRemote new
- , retrieveKeyFileCheap = copyFromRemoteCheap new
- , removeKey = dropKey new duc
- , lockContent = Just (lockKey new duc)
- , checkPresent = inAnnex new duc
+ , storeKey = copyToRemote new st
+ , retrieveKeyFile = copyFromRemote new st
+ , retrieveKeyFileCheap = copyFromRemoteCheap new st
+ , removeKey = dropKey new st
+ , lockContent = Just (lockKey new st)
+ , checkPresent = inAnnex new st
, checkPresentCheap = repoCheap r
, exportActions = exportUnsupported
, whereisKey = Nothing
@@ -327,8 +328,8 @@ tryGitConfigRead autoinit r
else []
{- Checks if a given remote has the content for a key in its annex. -}
-inAnnex :: Remote -> DeferredUUIDCheck -> Key -> Annex Bool
-inAnnex rmt duc key
+inAnnex :: Remote -> State -> Key -> Annex Bool
+inAnnex rmt (State connpool duc) key
| Git.repoIsHttp r = checkhttp
| Git.repoIsUrl r = checkremote
| otherwise = checklocal
@@ -340,7 +341,9 @@ inAnnex rmt duc key
( return True
, giveup "not found"
)
- checkremote = Ssh.inAnnex r key
+ checkremote =
+ let fallback = Ssh.inAnnex r key
+ in P2PHelper.checkpresent (Ssh.runProto rmt connpool (cantCheck rmt) fallback) key
checklocal = ifM duc
( guardUsable r (cantCheck r) $
maybe (cantCheck r) return
@@ -365,8 +368,8 @@ keyUrls r key = map tourl locs'
remoteconfig = gitconfig r
cfg = remoteGitConfig remoteconfig
-dropKey :: Remote -> DeferredUUIDCheck -> Key -> Annex Bool
-dropKey r duc key
+dropKey :: Remote -> State -> Key -> Annex Bool
+dropKey r (State connpool duc) key
| not $ Git.repoIsUrl (repo r) = ifM duc
( guardUsable (repo r) (return False) $
commitOnCleanup r $ onLocalFast r $ do
@@ -380,10 +383,12 @@ dropKey r duc key
, return False
)
| Git.repoIsHttp (repo r) = giveup "dropping from http remote not supported"
- | otherwise = commitOnCleanup r $ Ssh.dropKey (repo r) key
+ | otherwise = commitOnCleanup r $ do
+ let fallback = Ssh.dropKey (repo r) key
+ P2PHelper.remove (Ssh.runProto r connpool False fallback) key
-lockKey :: Remote -> DeferredUUIDCheck -> Key -> (VerifiedCopy -> Annex r) -> Annex r
-lockKey r duc key callback
+lockKey :: Remote -> State -> Key -> (VerifiedCopy -> Annex r) -> Annex r
+lockKey r (State connpool duc) key callback
| not $ Git.repoIsUrl (repo r) = ifM duc
( guardUsable (repo r) failedlock $ do
inorigrepo <- Annex.makeRunner
@@ -391,15 +396,17 @@ lockKey r duc key callback
-- and then run the callback in the original
-- annex monad, not the remote's.
onLocalFast r $
- Annex.Content.lockContentShared key $ \vc ->
- ifM (Annex.Content.inAnnex key)
- ( liftIO $ inorigrepo $ callback vc
- , failedlock
- )
+ Annex.Content.lockContentShared key $
+ liftIO . inorigrepo . callback
, failedlock
)
| Git.repoIsSsh (repo r) = do
showLocking r
+ let withconn = Ssh.withP2PSshConnection r connpool fallback
+ P2PHelper.lock withconn Ssh.runProtoConn (uuid r) key callback
+ | otherwise = failedlock
+ where
+ fallback = do
Just (cmd, params) <- Ssh.git_annex_shell ConsumeStdin
(repo r) "lockcontent"
[Param $ key2file key] []
@@ -437,20 +444,16 @@ lockKey r duc key callback
showNote "lockcontent failed"
signaldone
failedlock
- | otherwise = failedlock
- where
failedlock = giveup "can't lock content"
{- Tries to copy a key's content from a remote's annex to a file. -}
-copyFromRemote :: Remote -> Key -> AssociatedFile -> FilePath -> MeterUpdate -> Annex (Bool, Verification)
-copyFromRemote r key file dest p
- | Git.repoIsHttp (repo r) = unVerified $
- Annex.Content.downloadUrl key p (keyUrls r key) dest
- | otherwise = commandMetered (Just p) key (return Nothing) $
- copyFromRemote' r key file dest
+copyFromRemote :: Remote -> State -> Key -> AssociatedFile -> FilePath -> MeterUpdate -> Annex (Bool, Verification)
+copyFromRemote = copyFromRemote' False
-copyFromRemote' :: Remote -> Key -> AssociatedFile -> FilePath -> MeterUpdate -> Annex (Bool, Verification)
-copyFromRemote' r key file dest meterupdate
+copyFromRemote' :: Bool -> Remote -> State -> Key -> AssociatedFile -> FilePath -> MeterUpdate -> Annex (Bool, Verification)
+copyFromRemote' forcersync r (State connpool _) key file dest meterupdate
+ | Git.repoIsHttp (repo r) = unVerified $
+ Annex.Content.downloadUrl key meterupdate (keyUrls r key) dest
| not $ Git.repoIsUrl (repo r) = guardUsable (repo r) (unVerified (return False)) $ do
params <- Ssh.rsyncParams r Download
u <- getUUID
@@ -466,11 +469,17 @@ copyFromRemote' r key file dest meterupdate
runTransfer (Transfer Download u key)
file forwardRetry
(\p -> copier object dest (combineMeterUpdate p meterupdate) checksuccess)
- | Git.repoIsSsh (repo r) = unVerified $ feedprogressback $ \p -> do
- Ssh.rsyncHelper (Just (combineMeterUpdate meterupdate p))
- =<< Ssh.rsyncParamsRemote False r Download key dest file
+ | Git.repoIsSsh (repo r) = if forcersync
+ then fallback meterupdate
+ else P2PHelper.retrieve
+ (\p -> Ssh.runProto r connpool (False, UnVerified) (fallback p))
+ key file dest meterupdate
| otherwise = giveup "copying from non-ssh, non-http remote not supported"
where
+ fallback p = unVerified $ feedprogressback $ \p' -> do
+ oh <- mkOutputHandlerQuiet
+ Ssh.rsyncHelper oh (Just (combineMeterUpdate p' p))
+ =<< Ssh.rsyncParamsRemote False r Download key dest file
{- Feed local rsync's progress info back to the remote,
- by forking a feeder thread that runs
- git-annex-shell transferinfo at the same time
@@ -531,9 +540,9 @@ copyFromRemote' r key file dest meterupdate
=<< tryTakeMVar pidv
bracketIO noop (const cleanup) (const $ a feeder)
-copyFromRemoteCheap :: Remote -> Key -> AssociatedFile -> FilePath -> Annex Bool
+copyFromRemoteCheap :: Remote -> State -> Key -> AssociatedFile -> FilePath -> Annex Bool
#ifndef mingw32_HOST_OS
-copyFromRemoteCheap r key af file
+copyFromRemoteCheap r st key af file
| not $ Git.repoIsUrl (repo r) = guardUsable (repo r) (return False) $ liftIO $ do
loc <- gitAnnexLocation key (repo r) $
remoteGitConfig $ gitconfig r
@@ -547,36 +556,31 @@ copyFromRemoteCheap r key af file
)
| Git.repoIsSsh (repo r) =
ifM (Annex.Content.preseedTmp key file)
- ( fst <$> copyFromRemote r key af file nullMeterUpdate
+ ( fst <$> copyFromRemote' True r st key af file nullMeterUpdate
, return False
)
| otherwise = return False
#else
-copyFromRemoteCheap _ _ _ _ = return False
+copyFromRemoteCheap _ _ _ _ _ = return False
#endif
{- Tries to copy a key's content to a remote's annex. -}
-copyToRemote :: Remote -> DeferredUUIDCheck -> Key -> AssociatedFile -> MeterUpdate -> Annex Bool
-copyToRemote r duc key file meterupdate
+copyToRemote :: Remote -> State -> Key -> AssociatedFile -> MeterUpdate -> Annex Bool
+copyToRemote r (State connpool duc) key file meterupdate
| not $ Git.repoIsUrl (repo r) = ifM duc
( guardUsable (repo r) (return False) $ commitOnCleanup r $
copylocal =<< Annex.Content.prepSendAnnex key
, return False
)
| Git.repoIsSsh (repo r) = commitOnCleanup r $
- Annex.Content.sendAnnex key noop $ \object ->
- withmeter object $ \p -> do
- -- This is too broad really, but recvkey normally
- -- verifies content anyway, so avoid complicating
- -- it with a local sendAnnex check and rollback.
- unlocked <- isDirect <||> versionSupportsUnlockedPointers
- Ssh.rsyncHelper (Just p)
- =<< Ssh.rsyncParamsRemote unlocked r Upload key object file
+ P2PHelper.store
+ (\p -> Ssh.runProto r connpool False (copyremotefallback p))
+ key file meterupdate
+
| otherwise = giveup "copying to non-ssh repo not supported"
where
- withmeter object = commandMetered (Just meterupdate) key (return $ Just object)
copylocal Nothing = return False
- copylocal (Just (object, checksuccess)) = withmeter object $ \p -> do
+ copylocal (Just (object, checksuccess)) = do
-- The checksuccess action is going to be run in
-- the remote's Annex, but it needs access to the local
-- Annex monad's state.
@@ -591,12 +595,20 @@ copyToRemote r duc key file meterupdate
ensureInitialized
copier <- mkCopier hardlink params
let verify = Annex.Content.RemoteVerify r
- runTransfer (Transfer Download u key) file forwardRetry $ \p' ->
- let p'' = combineMeterUpdate p p'
+ runTransfer (Transfer Download u key) file forwardRetry $ \p ->
+ let p' = combineMeterUpdate meterupdate p
in Annex.Content.saveState True `after`
Annex.Content.getViaTmp verify key
- (\dest -> copier object dest p'' (liftIO checksuccessio))
+ (\dest -> copier object dest p' (liftIO checksuccessio))
)
+ copyremotefallback p = Annex.Content.sendAnnex key noop $ \object -> do
+ -- This is too broad really, but recvkey normally
+ -- verifies content anyway, so avoid complicating
+ -- it with a local sendAnnex check and rollback.
+ unlocked <- isDirect <||> versionSupportsUnlockedPointers
+ oh <- mkOutputHandlerQuiet
+ Ssh.rsyncHelper oh (Just p)
+ =<< Ssh.rsyncParamsRemote unlocked r Upload key object file
fsckOnRemote :: Git.Repo -> [CommandParam] -> Annex (IO Bool)
fsckOnRemote r params
@@ -672,8 +684,10 @@ rsyncOrCopyFile rsyncparams src dest p =
docopy = liftIO $ watchFileSize dest p $
copyFileExternal CopyTimeStamps src dest
#endif
- dorsync = Ssh.rsyncHelper (Just p) $
- rsyncparams ++ [File src, File dest]
+ dorsync = do
+ oh <- mkOutputHandler
+ Ssh.rsyncHelper oh (Just p) $
+ rsyncparams ++ [File src, File dest]
commitOnCleanup :: Remote -> Annex a -> Annex a
commitOnCleanup r a = go `after` a
@@ -732,10 +746,18 @@ mkCopier remotewanthardlink rsyncparams = do
, return copier
)
-{- Normally the UUID is checked at startup, but annex-checkuuid config
- - can prevent that. To avoid getting confused, a deferred
- - check is done just before the repository is used. This returns False
- - when the repository UUID is not as expected. -}
+data State = State Ssh.P2PSshConnectionPool DeferredUUIDCheck
+
+mkState :: Git.Repo -> UUID -> RemoteGitConfig -> Annex State
+mkState r u gc = State
+ <$> Ssh.mkP2PSshConnectionPool
+ <*> mkDeferredUUIDCheck r u gc
+
+{- Normally the UUID of a local repository is checked at startup,
+ - but annex-checkuuid config can prevent that. To avoid getting
+ - confused, a deferred check is done just before the repository
+ - is used.
+ - This returns False when the repository UUID is not as expected. -}
type DeferredUUIDCheck = Annex Bool
mkDeferredUUIDCheck :: Git.Repo -> UUID -> RemoteGitConfig -> Annex DeferredUUIDCheck
diff --git a/Remote/Helper/P2P.hs b/Remote/Helper/P2P.hs
new file mode 100644
index 0000000..a953df1
--- /dev/null
+++ b/Remote/Helper/P2P.hs
@@ -0,0 +1,67 @@
+{- Helpers for remotes using the git-annex P2P protocol.
+ -
+ - Copyright 2016-2018 Joey Hess <id@joeyh.name>
+ -
+ - Licensed under the GNU GPL version 3 or higher.
+ -}
+
+{-# LANGUAGE RankNTypes #-}
+
+module Remote.Helper.P2P where
+
+import Annex.Common
+import qualified P2P.Protocol as P2P
+import P2P.IO
+import Types.Remote
+import Annex.Content
+import Messages.Progress
+import Utility.Metered
+import Types.NumCopies
+
+import Control.Concurrent
+
+-- Runs a Proto action using a connection it sets up.
+type ProtoRunner a = P2P.Proto a -> Annex (Maybe a)
+
+-- Runs a Proto action using a ClosableConnection.
+type ProtoConnRunner c = forall a. P2P.Proto a -> ClosableConnection c -> Annex (ClosableConnection c, Maybe a)
+
+-- Runs an Annex action with a connection from the pool, adding it back to
+-- the pool when done.
+type WithConn a c = (ClosableConnection c -> Annex (ClosableConnection c, a)) -> Annex a
+
+store :: (MeterUpdate -> ProtoRunner Bool) -> Key -> AssociatedFile -> MeterUpdate -> Annex Bool
+store runner k af p = do
+ let getsrcfile = fmap fst <$> prepSendAnnex k
+ metered (Just p) k getsrcfile $ \_ p' ->
+ fromMaybe False
+ <$> runner p' (P2P.put k af p')
+
+retrieve :: (MeterUpdate -> ProtoRunner (Bool, Verification)) -> Key -> AssociatedFile -> FilePath -> MeterUpdate -> Annex (Bool, Verification)
+retrieve runner k af dest p =
+ metered (Just p) k (return Nothing) $ \m p' ->
+ fromMaybe (False, UnVerified)
+ <$> runner p' (P2P.get dest k af m p')
+
+remove :: ProtoRunner Bool -> Key -> Annex Bool
+remove runner k = fromMaybe False <$> runner (P2P.remove k)
+
+checkpresent :: ProtoRunner Bool -> Key -> Annex Bool
+checkpresent runner k = maybe unavail return =<< runner (P2P.checkPresent k)
+ where
+ unavail = giveup "can't connect to remote"
+
+lock :: WithConn a c -> ProtoConnRunner c -> UUID -> Key -> (VerifiedCopy -> Annex a) -> Annex a
+lock withconn connrunner u k callback = withconn $ \conn -> do
+ connv <- liftIO $ newMVar conn
+ let runproto d p = do
+ c <- liftIO $ takeMVar connv
+ (c', mr) <- connrunner p c
+ liftIO $ putMVar connv c'
+ return (fromMaybe d mr)
+ r <- P2P.lockContentWhile runproto k go
+ conn' <- liftIO $ takeMVar connv
+ return (conn', r)
+ where
+ go False = giveup "can't lock content"
+ go True = withVerifiedCopy LockedCopy u (return True) callback
diff --git a/Remote/Helper/Special.hs b/Remote/Helper/Special.hs
index 83e08c5..446bd36 100644
--- a/Remote/Helper/Special.hs
+++ b/Remote/Helper/Special.hs
@@ -228,7 +228,7 @@ specialRemote' cfg c preparestorer prepareretriever prepareremover preparecheckp
chunkconfig = chunkConfig cfg
displayprogress p k srcfile a
- | displayProgress cfg = metered (Just p) k (return srcfile) a
+ | displayProgress cfg = metered (Just p) k (return srcfile) (const a)
| otherwise = a p
{- Sink callback for retrieveChunks. Stores the file content into the
diff --git a/Remote/Helper/Ssh.hs b/Remote/Helper/Ssh.hs
index a4d91ab..3ceb41e 100644
--- a/Remote/Helper/Ssh.hs
+++ b/Remote/Helper/Ssh.hs
@@ -1,8 +1,8 @@
{- git-annex remote access with ssh and git-annex-shell
-
- - Copyright 2011-2013 Joey Hess <id@joeyh.name>
+ - Copyright 2011-2018 Joey Hess <id@joeyh.name>
-
- - Licensed under the GNU GPL version 3 or higher.
+ - Licensed under the GNU AGPL version 3 or higher.
-}
module Remote.Helper.Ssh where
@@ -16,13 +16,17 @@ import Annex.Ssh
import CmdLine.GitAnnexShell.Fields (Field, fieldName)
import qualified CmdLine.GitAnnexShell.Fields as Fields
import Remote.Helper.Messages
-import Messages.Progress
import Utility.Metered
import Utility.Rsync
import Utility.SshHost
import Types.Remote
import Types.Transfer
import Config
+import qualified P2P.Protocol as P2P
+import qualified P2P.IO as P2P
+import qualified P2P.Annex as P2P
+
+import Control.Concurrent.STM
toRepo :: ConsumeStdin -> Git.Repo -> RemoteGitConfig -> SshCommand -> Annex (FilePath, [CommandParam])
toRepo cs r gc remotecmd = do
@@ -91,9 +95,9 @@ onRemote cs r (with, errorval) command params fields = do
inAnnex :: Git.Repo -> Key -> Annex Bool
inAnnex r k = do
showChecking r
- onRemote NoConsumeStdin r (check, cantCheck r) "inannex" [Param $ key2file k] []
+ onRemote NoConsumeStdin r (runcheck, cantCheck r) "inannex" [Param $ key2file k] []
where
- check c p = dispatch =<< safeSystem c p
+ runcheck c p = dispatch =<< safeSystem c p
dispatch ExitSuccess = return True
dispatch (ExitFailure 1) = return False
dispatch _ = cantCheck r
@@ -106,14 +110,13 @@ dropKey r key = onRemote NoConsumeStdin r (boolSystem, return False) "dropkey"
]
[]
-rsyncHelper :: Maybe MeterUpdate -> [CommandParam] -> Annex Bool
-rsyncHelper m params = do
- showOutput -- make way for progress bar
+rsyncHelper :: OutputHandler -> Maybe MeterUpdate -> [CommandParam] -> Annex Bool
+rsyncHelper oh m params = do
+ unless (quietMode oh) $
+ showOutput -- make way for progress bar
a <- case m of
Nothing -> return $ rsync params
- Just meter -> do
- oh <- mkOutputHandler
- return $ rsyncProgress oh meter params
+ Just meter -> return $ rsyncProgress oh meter params
ifM (liftIO a)
( return True
, do
@@ -179,3 +182,154 @@ rsyncParams r direction = do
-- successfully locked.
contentLockedMarker :: String
contentLockedMarker = "OK"
+
+-- A connection over ssh to git-annex shell speaking the P2P protocol.
+type P2PSshConnection = P2P.ClosableConnection
+ (P2P.RunState, P2P.P2PConnection, ProcessHandle)
+
+closeP2PSshConnection :: P2PSshConnection -> IO (P2PSshConnection, Maybe ExitCode)
+closeP2PSshConnection P2P.ClosedConnection = return (P2P.ClosedConnection, Nothing)
+closeP2PSshConnection (P2P.OpenConnection (_st, conn, pid)) = do
+ P2P.closeConnection conn
+ exitcode <- waitForProcess pid
+ return (P2P.ClosedConnection, Just exitcode)
+
+-- Pool of connections over ssh to git-annex-shell p2pstdio.
+type P2PSshConnectionPool = TVar (Maybe P2PSshConnectionPoolState)
+
+data P2PSshConnectionPoolState
+ = P2PSshConnections [P2PSshConnection]
+ -- Remotes using an old version of git-annex-shell don't support P2P
+ | P2PSshUnsupported
+
+mkP2PSshConnectionPool :: Annex P2PSshConnectionPool
+mkP2PSshConnectionPool = liftIO $ newTVarIO Nothing
+
+-- Takes a connection from the pool, if any are available, otherwise
+-- tries to open a new one.
+getP2PSshConnection :: Remote -> P2PSshConnectionPool -> Annex (Maybe P2PSshConnection)
+getP2PSshConnection r connpool = getexistingconn >>= \case
+ Nothing -> return Nothing
+ Just Nothing -> openP2PSshConnection r connpool
+ Just (Just c) -> return (Just c)
+ where
+ getexistingconn = liftIO $ atomically $ readTVar connpool >>= \case
+ Just P2PSshUnsupported -> return Nothing
+ Just (P2PSshConnections (c:cs)) -> do
+ writeTVar connpool (Just (P2PSshConnections cs))
+ return (Just (Just c))
+ Just (P2PSshConnections []) -> return (Just Nothing)
+ Nothing -> return (Just Nothing)
+
+-- Add a connection to the pool, unless it's closed.
+storeP2PSshConnection :: P2PSshConnectionPool -> P2PSshConnection -> IO ()
+storeP2PSshConnection _ P2P.ClosedConnection = return ()
+storeP2PSshConnection connpool conn = atomically $ modifyTVar' connpool $ \case
+ Just (P2PSshConnections cs) -> Just (P2PSshConnections (conn:cs))
+ _ -> Just (P2PSshConnections [conn])
+
+-- Try to open a P2PSshConnection.
+-- The new connection is not added to the pool, so it's available
+-- for the caller to use.
+-- If the remote does not support the P2P protocol, that's remembered in
+-- the connection pool.
+openP2PSshConnection :: Remote -> P2PSshConnectionPool -> Annex (Maybe P2PSshConnection)
+openP2PSshConnection r connpool = do
+ u <- getUUID
+ let ps = [Param (fromUUID u)]
+ git_annex_shell ConsumeStdin (repo r) "p2pstdio" ps [] >>= \case
+ Nothing -> do
+ liftIO $ rememberunsupported
+ return Nothing
+ Just (cmd, params) -> start cmd params
+ where
+ start cmd params = liftIO $ withNullHandle $ \nullh -> do
+ -- stderr is discarded because old versions of git-annex
+ -- shell always error
+ (Just from, Just to, Nothing, pid) <- createProcess $
+ (proc cmd (toCommand params))
+ { std_in = CreatePipe
+ , std_out = CreatePipe
+ , std_err = UseHandle nullh
+ }
+ let conn = P2P.P2PConnection
+ { P2P.connRepo = repo r
+ , P2P.connCheckAuth = const False
+ , P2P.connIhdl = to
+ , P2P.connOhdl = from
+ }
+ runst <- P2P.mkRunState P2P.Client
+ let c = P2P.OpenConnection (runst, conn, pid)
+ -- When the connection is successful, the remote
+ -- will send an AUTH_SUCCESS with its uuid.
+ let proto = P2P.postAuth $
+ P2P.negotiateProtocolVersion P2P.maxProtocolVersion
+ tryNonAsync (P2P.runNetProto runst conn proto) >>= \case
+ Right (Right (Just theiruuid)) | theiruuid == uuid r ->
+ return $ Just c
+ _ -> do
+ (cclosed, exitcode) <- closeP2PSshConnection c
+ -- ssh exits 255 when unable to connect to
+ -- server. Return a closed connection in
+ -- this case, to avoid the fallback action
+ -- being run instead, which would mean a
+ -- second connection attempt to this server
+ -- that is down.
+ if exitcode == Just (ExitFailure 255)
+ then return (Just cclosed)
+ else do
+ rememberunsupported
+ return Nothing
+ rememberunsupported = atomically $
+ modifyTVar' connpool $
+ maybe (Just P2PSshUnsupported) Just
+
+-- Runs a P2P Proto action on a remote when it supports that,
+-- otherwise the fallback action.
+runProto :: Remote -> P2PSshConnectionPool -> a -> Annex a -> P2P.Proto a -> Annex (Maybe a)
+runProto r connpool bad fallback proto = Just <$>
+ (getP2PSshConnection r connpool >>= maybe fallback go)
+ where
+ go c = do
+ (c', v) <- runProtoConn proto c
+ case v of
+ Just res -> do
+ liftIO $ storeP2PSshConnection connpool c'
+ return res
+ -- Running the proto failed, either due to a protocol
+ -- error or a network error.
+ Nothing -> return bad
+
+runProtoConn :: P2P.Proto a -> P2PSshConnection -> Annex (P2PSshConnection, Maybe a)
+runProtoConn _ P2P.ClosedConnection = return (P2P.ClosedConnection, Nothing)
+runProtoConn a conn@(P2P.OpenConnection (runst, c, _pid)) = do
+ P2P.runFullProto runst c a >>= \case
+ Right r -> return (conn, Just r)
+ -- When runFullProto fails, the connection is no longer
+ -- usable, so close it.
+ Left e -> do
+ warning $ "Lost connection (" ++ e ++ ")"
+ conn' <- fst <$> liftIO (closeP2PSshConnection conn)
+ return (conn', Nothing)
+
+-- Allocates a P2P ssh connection from the pool, and runs the action with it,
+-- returning the connection to the pool once the action is done.
+--
+-- If the remote does not support the P2P protocol, runs the fallback
+-- action instead.
+withP2PSshConnection
+ :: Remote
+ -> P2PSshConnectionPool
+ -> Annex a
+ -> (P2PSshConnection -> Annex (P2PSshConnection, a))
+ -> Annex a
+withP2PSshConnection r connpool fallback a = bracketOnError get cache go
+ where
+ get = getP2PSshConnection r connpool
+ cache (Just conn) = liftIO $ storeP2PSshConnection connpool conn
+ cache Nothing = return ()
+ go (Just conn) = do
+ (conn', res) <- a conn
+ cache (Just conn')
+ return res
+ go Nothing = fallback
diff --git a/Remote/P2P.hs b/Remote/P2P.hs
index 83ce258..41b6b21 100644
--- a/Remote/P2P.hs
+++ b/Remote/P2P.hs
@@ -1,6 +1,6 @@
{- git remotes using the git-annex P2P protocol
-
- - Copyright 2016 Joey Hess <id@joeyh.name>
+ - Copyright 2016-2018 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@@ -21,17 +21,13 @@ import Types.Remote
import Types.GitConfig
import qualified Git
import Annex.UUID
-import Annex.Content
import Config
import Config.Cost
import Remote.Helper.Git
import Remote.Helper.Export
-import Messages.Progress
-import Utility.Metered
+import Remote.Helper.P2P
import Utility.AuthToken
-import Types.NumCopies
-import Control.Concurrent
import Control.Concurrent.STM
remote :: RemoteType
@@ -49,16 +45,18 @@ chainGen :: P2PAddress -> Git.Repo -> UUID -> RemoteConfig -> RemoteGitConfig ->
chainGen addr r u c gc = do
connpool <- mkConnectionPool
cst <- remoteCost gc veryExpensiveRemoteCost
+ let protorunner = runProto u addr connpool
+ let withconn = withConnection u addr connpool
let this = Remote
{ uuid = u
, cost = cst
, name = Git.repoDescribe r
- , storeKey = store u addr connpool
- , retrieveKeyFile = retrieve u addr connpool
+ , storeKey = store (const protorunner)
+ , retrieveKeyFile = retrieve (const protorunner)
, retrieveKeyFileCheap = \_ _ _ -> return False
- , removeKey = remove u addr connpool
- , lockContent = Just (lock u addr connpool)
- , checkPresent = checkpresent u addr connpool
+ , removeKey = remove protorunner
+ , lockContent = Just $ lock withconn runProtoConn u
+ , checkPresent = checkpresent protorunner
, checkPresentCheap = False
, exportActions = exportUnsupported
, whereisKey = Nothing
@@ -78,48 +76,8 @@ chainGen addr r u c gc = do
}
return (Just this)
-store :: UUID -> P2PAddress -> ConnectionPool -> Key -> AssociatedFile -> MeterUpdate -> Annex Bool
-store u addr connpool k af p = do
- let getsrcfile = fmap fst <$> prepSendAnnex k
- metered (Just p) k getsrcfile $ \p' ->
- fromMaybe False
- <$> runProto u addr connpool (P2P.put k af p')
-
-retrieve :: UUID -> P2PAddress -> ConnectionPool -> Key -> AssociatedFile -> FilePath -> MeterUpdate -> Annex (Bool, Verification)
-retrieve u addr connpool k af dest p = unVerified $
- metered (Just p) k (return Nothing) $ \p' -> fromMaybe False
- <$> runProto u addr connpool (P2P.get dest k af p')
-
-remove :: UUID -> P2PAddress -> ConnectionPool -> Key -> Annex Bool
-remove u addr connpool k = fromMaybe False
- <$> runProto u addr connpool (P2P.remove k)
-
-checkpresent :: UUID -> P2PAddress -> ConnectionPool -> Key -> Annex Bool
-checkpresent u addr connpool k = maybe unavail return
- =<< runProto u addr connpool (P2P.checkPresent k)
- where
- unavail = giveup "can't connect to peer"
-
-lock :: UUID -> P2PAddress -> ConnectionPool -> Key -> (VerifiedCopy -> Annex r) -> Annex r
-lock u addr connpool k callback =
- withConnection u addr connpool $ \conn -> do
- connv <- liftIO $ newMVar conn
- let runproto d p = do
- c <- liftIO $ takeMVar connv
- (c', mr) <- runProto' p c
- liftIO $ putMVar connv c'
- return (fromMaybe d mr)
- r <- P2P.lockContentWhile runproto k go
- conn' <- liftIO $ takeMVar connv
- return (conn', r)
- where
- go False = giveup "can't lock content"
- go True = withVerifiedCopy LockedCopy u (return True) callback
-
--- | A connection to the peer.
-data Connection
- = OpenConnection P2PConnection
- | ClosedConnection
+-- | A connection to the peer, which can be closed.
+type Connection = ClosableConnection (RunState, P2PConnection)
type ConnectionPool = TVar [Connection]
@@ -128,12 +86,12 @@ mkConnectionPool = liftIO $ newTVarIO []
-- Runs the Proto action.
runProto :: UUID -> P2PAddress -> ConnectionPool -> P2P.Proto a -> Annex (Maybe a)
-runProto u addr connpool a = withConnection u addr connpool (runProto' a)
+runProto u addr connpool a = withConnection u addr connpool (runProtoConn a)
-runProto' :: P2P.Proto a -> Connection -> Annex (Connection, Maybe a)
-runProto' _ ClosedConnection = return (ClosedConnection, Nothing)
-runProto' a (OpenConnection conn) = do
- v <- runFullProto Client conn a
+runProtoConn :: P2P.Proto a -> Connection -> Annex (Connection, Maybe a)
+runProtoConn _ ClosedConnection = return (ClosedConnection, Nothing)
+runProtoConn a c@(OpenConnection (runst, conn)) = do
+ v <- runFullProto runst conn a
-- When runFullProto fails, the connection is no longer usable,
-- so close it.
case v of
@@ -141,7 +99,7 @@ runProto' a (OpenConnection conn) = do
warning $ "Lost connection to peer (" ++ e ++ ")"
liftIO $ closeConnection conn
return (ClosedConnection, Nothing)
- Right r -> return (OpenConnection conn, Just r)
+ Right r -> return (c, Just r)
-- Uses an open connection if one is available in the ConnectionPool;
-- otherwise opens a new connection.
@@ -180,11 +138,20 @@ openConnection u addr = do
myuuid <- getUUID
authtoken <- fromMaybe nullAuthToken
<$> loadP2PRemoteAuthToken addr
- res <- liftIO $ runNetProto conn $
- P2P.auth myuuid authtoken
+ let proto = P2P.auth myuuid authtoken $
+ -- Before 6.20180312, the protocol server
+ -- had a bug that made negotiating the
+ -- protocol version terminate the
+ -- connection. So, this must stay disabled
+ -- until the old version is not in use
+ -- anywhere.
+ --P2P.negotiateProtocolVersion P2P.maxProtocolVersion
+ return ()
+ runst <- liftIO $ mkRunState Client
+ res <- liftIO $ runNetProto runst conn proto
case res of
Right (Just theiruuid)
- | u == theiruuid -> return (OpenConnection conn)
+ | u == theiruuid -> return (OpenConnection (runst, conn))
| otherwise -> do
liftIO $ closeConnection conn
warning "Remote peer uuid seems to have changed."
diff --git a/Remote/Rsync.hs b/Remote/Rsync.hs
index dfac615..7f687a7 100644
--- a/Remote/Rsync.hs
+++ b/Remote/Rsync.hs
@@ -1,6 +1,6 @@
{- A remote that is only accessible by rsync.
-
- - Copyright 2011 Joey Hess <id@joeyh.name>
+ - Copyright 2011-2018 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@@ -29,6 +29,7 @@ import Annex.Ssh
import Remote.Helper.Special
import Remote.Helper.Messages
import Remote.Helper.Export
+import Types.Export
import Remote.Rsync.RsyncUrl
import Crypto
import Utility.Rsync
@@ -49,7 +50,7 @@ remote = RemoteType
, enumerate = const (findSpecialRemotes "rsyncurl")
, generate = gen
, setup = rsyncSetup
- , exportSupported = exportUnsupported
+ , exportSupported = exportIsSupported
}
gen :: Git.Repo -> UUID -> RemoteConfig -> RemoteGitConfig -> Annex (Maybe Remote)
@@ -75,7 +76,14 @@ gen r u c gc = do
, lockContent = Nothing
, checkPresent = checkPresentDummy
, checkPresentCheap = False
- , exportActions = exportUnsupported
+ , exportActions = return $ ExportActions
+ { storeExport = storeExportM o
+ , retrieveExport = retrieveExportM o
+ , removeExport = removeExportM o
+ , checkPresentExport = checkPresentExportM o
+ , removeExportDirectory = Just (removeExportDirectoryM o)
+ , renameExport = renameExportM o
+ }
, whereisKey = Nothing
, remoteFsck = Nothing
, repairRepo = Nothing
@@ -165,14 +173,25 @@ rsyncSetup _ mu _ c gc = do
- pass --include=X --include=X/Y --include=X/Y/file --exclude=*)
-}
store :: RsyncOpts -> Key -> FilePath -> MeterUpdate -> Annex Bool
-store o k src meterupdate = withRsyncScratchDir $ \tmp -> do
- let dest = tmp </> Prelude.head (keyPaths k)
- liftIO $ createDirectoryIfMissing True $ parentDir dest
- ok <- liftIO $ if canrename
+store o k src meterupdate = storeGeneric o meterupdate basedest populatedest
+ where
+ basedest = Prelude.head (keyPaths k)
+ populatedest dest = liftIO $ if canrename
then do
rename src dest
return True
else createLinkOrCopy src dest
+ {- If the key being sent is encrypted or chunked, the file
+ - containing its content is a temp file, and so can be
+ - renamed into place. Otherwise, the file is the annexed
+ - object file, and has to be copied or hard linked into place. -}
+ canrename = isEncKey k || isChunkKey k
+
+storeGeneric :: RsyncOpts -> MeterUpdate -> FilePath -> (FilePath -> Annex Bool) -> Annex Bool
+storeGeneric o meterupdate basedest populatedest = withRsyncScratchDir $ \tmp -> do
+ let dest = tmp </> basedest
+ liftIO $ createDirectoryIfMissing True $ parentDir dest
+ ok <- populatedest dest
ps <- sendParams
if ok
then showResumable $ rsyncRemote Upload o (Just meterupdate) $ ps ++
@@ -182,61 +201,97 @@ store o k src meterupdate = withRsyncScratchDir $ \tmp -> do
, Param $ rsyncUrl o
]
else return False
- where
- {- If the key being sent is encrypted or chunked, the file
- - containing its content is a temp file, and so can be
- - renamed into place. Otherwise, the file is the annexed
- - object file, and has to be copied or hard linked into place. -}
- canrename = isEncKey k || isChunkKey k
retrieve :: RsyncOpts -> FilePath -> Key -> MeterUpdate -> Annex ()
retrieve o f k p =
- unlessM (rsyncRetrieve o k f (Just p)) $
+ unlessM (rsyncRetrieveKey o k f (Just p)) $
giveup "rsync failed"
retrieveCheap :: RsyncOpts -> Key -> AssociatedFile -> FilePath -> Annex Bool
-retrieveCheap o k _af f = ifM (preseedTmp k f) ( rsyncRetrieve o k f Nothing , return False )
+retrieveCheap o k _af f = ifM (preseedTmp k f) ( rsyncRetrieveKey o k f Nothing , return False )
remove :: RsyncOpts -> Remover
-remove o k = do
+remove o k = removeGeneric o includes
+ where
+ includes = concatMap use dirHashes
+ use h = let dir = h def k in
+ [ parentDir dir
+ , dir
+ -- match content directory and anything in it
+ , dir </> keyFile k </> "***"
+ ]
+
+{- An empty directory is rsynced to make it delete. Everything is excluded,
+ - except for the specified includes. Due to the way rsync traverses
+ - directories, the includes must match both the file to be deleted, and
+ - its parent directories, but not their other contents. -}
+removeGeneric :: RsyncOpts -> [String] -> Annex Bool
+removeGeneric o includes = do
ps <- sendParams
withRsyncScratchDir $ \tmp -> liftIO $ do
{- Send an empty directory to rysnc to make it delete. -}
- let dummy = tmp </> keyFile k
- createDirectoryIfMissing True dummy
rsync $ rsyncOptions o ++ ps ++
map (\s -> Param $ "--include=" ++ s) includes ++
[ Param "--exclude=*" -- exclude everything else
, Param "--quiet", Param "--delete", Param "--recursive"
] ++ partialParams ++
- [ Param $ addTrailingPathSeparator dummy
+ [ Param $ addTrailingPathSeparator tmp
, Param $ rsyncUrl o
]
- where
- {- Specify include rules to match the directories where the
- - content could be. Note that the parent directories have
- - to also be explicitly included, due to how rsync
- - traverses directories. -}
- includes = concatMap use dirHashes
- use h = let dir = h def k in
- [ parentDir dir
- , dir
- -- match content directory and anything in it
- , dir </> keyFile k </> "***"
- ]
checkKey :: Git.Repo -> RsyncOpts -> CheckPresent
checkKey r o k = do
showChecking r
+ checkPresentGeneric o (rsyncUrls o k)
+
+checkPresentGeneric :: RsyncOpts -> [RsyncUrl] -> Annex Bool
+checkPresentGeneric o rsyncurls =
-- note: Does not currently differentiate between rsync failing
-- to connect, and the file not being present.
- untilTrue (rsyncUrls o k) $ \u ->
+ untilTrue rsyncurls $ \u ->
liftIO $ catchBoolIO $ do
withQuietOutput createProcessSuccess $
proc "rsync" $ toCommand $
rsyncOptions o ++ [Param u]
return True
+storeExportM :: RsyncOpts -> FilePath -> Key -> ExportLocation -> MeterUpdate -> Annex Bool
+storeExportM o src _k loc meterupdate =
+ storeGeneric o meterupdate basedest populatedest
+ where
+ basedest = fromExportLocation loc
+ populatedest = liftIO . createLinkOrCopy src
+
+retrieveExportM :: RsyncOpts -> Key -> ExportLocation -> FilePath -> MeterUpdate -> Annex Bool
+retrieveExportM o _k loc dest p = rsyncRetrieve o [rsyncurl] dest (Just p)
+ where
+ rsyncurl = mkRsyncUrl o (fromExportLocation loc)
+
+checkPresentExportM :: RsyncOpts -> Key -> ExportLocation -> Annex Bool
+checkPresentExportM o _k loc = checkPresentGeneric o [rsyncurl]
+ where
+ rsyncurl = mkRsyncUrl o (fromExportLocation loc)
+
+removeExportM :: RsyncOpts -> Key -> ExportLocation -> Annex Bool
+removeExportM o _k loc =
+ removeGeneric o (includes (fromExportLocation loc))
+ where
+ includes f = f : case upFrom f of
+ Nothing -> []
+ Just f' -> includes f'
+
+removeExportDirectoryM :: RsyncOpts -> ExportDirectory -> Annex Bool
+removeExportDirectoryM o ed = removeGeneric o (allbelow d : includes d)
+ where
+ d = fromExportDirectory ed
+ allbelow f = f </> "***"
+ includes f = f : case upFrom f of
+ Nothing -> []
+ Just f' -> includes f'
+
+renameExportM :: RsyncOpts -> Key -> ExportLocation -> ExportLocation -> Annex Bool
+renameExportM _ _ _ _ = return False
+
{- Rsync params to enable resumes of sending files safely,
- ensure that files are only moved into place once complete
-}
@@ -259,15 +314,18 @@ withRsyncScratchDir a = do
t <- fromRepo gitAnnexTmpObjectDir
withTmpDirIn t "rsynctmp" a
-rsyncRetrieve :: RsyncOpts -> Key -> FilePath -> Maybe MeterUpdate -> Annex Bool
-rsyncRetrieve o k dest meterupdate =
- showResumable $ untilTrue (rsyncUrls o k) $ \u -> rsyncRemote Download o meterupdate
+rsyncRetrieve :: RsyncOpts -> [RsyncUrl] -> FilePath -> Maybe MeterUpdate -> Annex Bool
+rsyncRetrieve o rsyncurls dest meterupdate =
+ showResumable $ untilTrue rsyncurls $ \u -> rsyncRemote Download o meterupdate
-- use inplace when retrieving to support resuming
[ Param "--inplace"
, Param u
, File dest
]
+rsyncRetrieveKey :: RsyncOpts -> Key -> FilePath -> Maybe MeterUpdate -> Annex Bool
+rsyncRetrieveKey o k dest meterupdate = rsyncRetrieve o (rsyncUrls o k) dest meterupdate
+
showResumable :: Annex Bool -> Annex Bool
showResumable a = ifM a
( return True
diff --git a/Remote/Rsync/RsyncUrl.hs b/Remote/Rsync/RsyncUrl.hs
index c0f30c1..67ce794 100644
--- a/Remote/Rsync/RsyncUrl.hs
+++ b/Remote/Rsync/RsyncUrl.hs
@@ -1,6 +1,6 @@
{- Rsync urls.
-
- - Copyright 2014 Joey Hess <id@joeyh.name>
+ - Copyright 2014-2018 Joey Hess <id@joeyh.name>
-
- Licensed under the GNU GPL version 3 or higher.
-}
@@ -36,6 +36,9 @@ rsyncEscape o u
| rsyncShellEscape o && rsyncUrlIsShell (rsyncUrl o) = shellEscape u
| otherwise = u
+mkRsyncUrl :: RsyncOpts -> FilePath -> RsyncUrl
+mkRsyncUrl o f = rsyncUrl o </> rsyncEscape o f
+
rsyncUrls :: RsyncOpts -> Key -> [RsyncUrl]
rsyncUrls o k = map use dirHashes
where
diff --git a/RemoteDaemon/Transport/Tor.hs b/RemoteDaemon/Transport/Tor.hs
index b0fa3c1..6689461 100644
--- a/RemoteDaemon/Transport/Tor.hs
+++ b/RemoteDaemon/Transport/Tor.hs
@@ -5,7 +5,9 @@
- Licensed under the GNU GPL version 3 or higher.
-}
-module RemoteDaemon.Transport.Tor (server, transport) where
+{-# LANGUAGE CPP #-}
+
+module RemoteDaemon.Transport.Tor (server, transport, torSocketFile) where
import Common
import qualified Annex
@@ -14,6 +16,7 @@ import Annex.ChangedRefs
import RemoteDaemon.Types
import RemoteDaemon.Common
import Utility.AuthToken
+import Utility.Tor
import P2P.Protocol as P2P
import P2P.IO
import P2P.Annex
@@ -30,6 +33,9 @@ import System.Log.Logger (debugM)
import Control.Concurrent.STM
import Control.Concurrent.STM.TBMQueue
import Control.Concurrent.Async
+#ifndef mingw32_HOST_OS
+import System.Posix.User
+#endif
-- Run tor hidden service.
server :: Server
@@ -109,7 +115,9 @@ serveClient th u r q = bracket setup cleanup start
, connIhdl = h
, connOhdl = h
}
- v <- liftIO $ runNetProto conn $ P2P.serveAuth u
+ -- not really Client, but we don't know their uuid yet
+ runstauth <- liftIO $ mkRunState Client
+ v <- liftIO $ runNetProto runstauth conn $ P2P.serveAuth u
case v of
Right (Just theiruuid) -> authed conn theiruuid
Right Nothing -> liftIO $
@@ -121,8 +129,9 @@ serveClient th u r q = bracket setup cleanup start
authed conn theiruuid =
bracket watchChangedRefs (liftIO . maybe noop stopWatchingChangedRefs) $ \crh -> do
- v' <- runFullProto (Serving theiruuid crh) conn $
- P2P.serveAuthed u
+ runst <- liftIO $ mkRunState (Serving theiruuid crh)
+ v' <- runFullProto runst conn $
+ P2P.serveAuthed P2P.ServeReadWrite u
case v' of
Right () -> return ()
Left e -> liftIO $ debugM "remotedaemon" ("Tor connection error: " ++ e)
@@ -140,8 +149,8 @@ transport (RemoteRepo r gc) url@(RemoteURI uri) th ichan ochan =
myuuid <- liftAnnex th getUUID
authtoken <- fromMaybe nullAuthToken
<$> liftAnnex th (loadP2PRemoteAuthToken addr)
- res <- runNetProto conn $
- P2P.auth myuuid authtoken
+ runst <- mkRunState Client
+ res <- runNetProto runst conn $ P2P.auth myuuid authtoken noop
case res of
Right (Just theiruuid) -> do
expecteduuid <- liftAnnex th $ getRepoUUID r
@@ -149,7 +158,7 @@ transport (RemoteRepo r gc) url@(RemoteURI uri) th ichan ochan =
then do
send (CONNECTED url)
status <- handlecontrol
- `race` handlepeer conn
+ `race` handlepeer runst conn
send (DISCONNECTED url)
return $ either id id status
else return ConnectionStopping
@@ -164,13 +173,13 @@ transport (RemoteRepo r gc) url@(RemoteURI uri) th ichan ochan =
LOSTNET -> return ConnectionStopping
_ -> handlecontrol
- handlepeer conn = do
- v <- runNetProto conn P2P.notifyChange
+ handlepeer runst conn = do
+ v <- runNetProto runst conn P2P.notifyChange
case v of
Right (Just (ChangedRefs shas)) -> do
whenM (checkShouldFetch gc th shas) $
fetch
- handlepeer conn
+ handlepeer runst conn
_ -> return ConnectionClosed
fetch = do
@@ -178,3 +187,14 @@ transport (RemoteRepo r gc) url@(RemoteURI uri) th ichan ochan =
ok <- inLocalRepo th $
runBool [Param "fetch", Param $ Git.repoDescribe r]
send (DONESYNCING url ok)
+
+torSocketFile :: Annex.Annex (Maybe FilePath)
+torSocketFile = do
+ u <- getUUID
+ let ident = fromUUID u
+#ifndef mingw32_HOST_OS
+ uid <- liftIO getRealUserID
+#else
+ let uid = 0
+#endif
+ liftIO $ getHiddenServiceSocketFile torAppName uid ident
diff --git a/Test.hs b/Test.hs
index 12999b4..b0f4186 100644
--- a/Test.hs
+++ b/Test.hs
@@ -6,8 +6,6 @@
-}
{-# LANGUAGE CPP #-}
-{- Avoid optimising this file much, since it's large and does not need it._-}
-{-# OPTIONS_GHC -O1 -optlo-O2 #-}
module Test where
diff --git a/Types/Key.hs b/Types/Key.hs
index 44ebe3c..b3efc04 100644
--- a/Types/Key.hs
+++ b/Types/Key.hs
@@ -5,6 +5,8 @@
- Licensed under the GNU GPL version 3 or higher.
-}
+{-# LANGUAGE CPP #-}
+
module Types.Key where
import Utility.PartialPrelude
@@ -31,6 +33,9 @@ data KeyVariety
= SHA2Key HashSize HasExt
| SHA3Key HashSize HasExt
| SKEINKey HashSize HasExt
+ | Blake2bKey HashSize HasExt
+ | Blake2sKey HashSize HasExt
+ | Blake2spKey HashSize HasExt
| SHA1Key HasExt
| MD5Key HasExt
| WORMKey
@@ -52,6 +57,9 @@ hasExt :: KeyVariety -> Bool
hasExt (SHA2Key _ (HasExt b)) = b
hasExt (SHA3Key _ (HasExt b)) = b
hasExt (SKEINKey _ (HasExt b)) = b
+hasExt (Blake2bKey _ (HasExt b)) = b
+hasExt (Blake2sKey _ (HasExt b)) = b
+hasExt (Blake2spKey _ (HasExt b)) = b
hasExt (SHA1Key (HasExt b)) = b
hasExt (MD5Key (HasExt b)) = b
hasExt WORMKey = False
@@ -62,6 +70,9 @@ sameExceptExt :: KeyVariety -> KeyVariety -> Bool
sameExceptExt (SHA2Key sz1 _) (SHA2Key sz2 _) = sz1 == sz2
sameExceptExt (SHA3Key sz1 _) (SHA3Key sz2 _) = sz1 == sz2
sameExceptExt (SKEINKey sz1 _) (SKEINKey sz2 _) = sz1 == sz2
+sameExceptExt (Blake2bKey sz1 _) (Blake2bKey sz2 _) = sz1 == sz2
+sameExceptExt (Blake2sKey sz1 _) (Blake2sKey sz2 _) = sz1 == sz2
+sameExceptExt (Blake2spKey sz1 _) (Blake2spKey sz2 _) = sz1 == sz2
sameExceptExt (SHA1Key _) (SHA1Key _) = True
sameExceptExt (MD5Key _) (MD5Key _) = True
sameExceptExt _ _ = False
@@ -72,6 +83,9 @@ cryptographicallySecure :: KeyVariety -> Bool
cryptographicallySecure (SHA2Key _ _) = True
cryptographicallySecure (SHA3Key _ _) = True
cryptographicallySecure (SKEINKey _ _) = True
+cryptographicallySecure (Blake2bKey _ _) = True
+cryptographicallySecure (Blake2sKey _ _) = True
+cryptographicallySecure (Blake2spKey _ _) = True
cryptographicallySecure _ = False
formatKeyVariety :: KeyVariety -> String
@@ -79,6 +93,9 @@ formatKeyVariety v = case v of
SHA2Key sz e -> adde e (addsz sz "SHA")
SHA3Key sz e -> adde e (addsz sz "SHA3_")
SKEINKey sz e -> adde e (addsz sz "SKEIN")
+ Blake2bKey sz e -> adde e (addsz sz "BLAKE2B")
+ Blake2sKey sz e -> adde e (addsz sz "BLAKE2S")
+ Blake2spKey sz e -> adde e (addsz sz "BLAKE2SP")
SHA1Key e -> adde e "SHA1"
MD5Key e -> adde e "MD5"
WORMKey -> "WORM"
@@ -90,30 +107,52 @@ formatKeyVariety v = case v of
addsz (HashSize n) s = s ++ show n
parseKeyVariety :: String -> KeyVariety
-parseKeyVariety "SHA256" = SHA2Key (HashSize 256) (HasExt False)
-parseKeyVariety "SHA256E" = SHA2Key (HashSize 256) (HasExt True)
-parseKeyVariety "SHA512" = SHA2Key (HashSize 512) (HasExt False)
-parseKeyVariety "SHA512E" = SHA2Key (HashSize 512) (HasExt True)
-parseKeyVariety "SHA224" = SHA2Key (HashSize 224) (HasExt False)
-parseKeyVariety "SHA224E" = SHA2Key (HashSize 224) (HasExt True)
-parseKeyVariety "SHA384" = SHA2Key (HashSize 384) (HasExt False)
-parseKeyVariety "SHA384E" = SHA2Key (HashSize 384) (HasExt True)
-parseKeyVariety "SHA3_512" = SHA3Key (HashSize 512) (HasExt False)
-parseKeyVariety "SHA3_512E" = SHA3Key (HashSize 512) (HasExt True)
-parseKeyVariety "SHA3_384" = SHA3Key (HashSize 384) (HasExt False)
-parseKeyVariety "SHA3_384E" = SHA3Key (HashSize 384) (HasExt True)
-parseKeyVariety "SHA3_256" = SHA3Key (HashSize 256) (HasExt False)
-parseKeyVariety "SHA3_256E" = SHA3Key (HashSize 256) (HasExt True)
-parseKeyVariety "SHA3_224" = SHA3Key (HashSize 224) (HasExt False)
-parseKeyVariety "SHA3_224E" = SHA3Key (HashSize 224) (HasExt True)
-parseKeyVariety "SKEIN512" = SKEINKey (HashSize 512) (HasExt False)
-parseKeyVariety "SKEIN512E" = SKEINKey (HashSize 512) (HasExt True)
-parseKeyVariety "SKEIN256" = SKEINKey (HashSize 256) (HasExt False)
-parseKeyVariety "SKEIN256E" = SKEINKey (HashSize 256) (HasExt True)
-parseKeyVariety "SHA1" = SHA1Key (HasExt False)
-parseKeyVariety "SHA1E" = SHA1Key (HasExt True)
-parseKeyVariety "MD5" = MD5Key (HasExt False)
-parseKeyVariety "MD5E" = MD5Key (HasExt True)
-parseKeyVariety "WORM" = WORMKey
-parseKeyVariety "URL" = URLKey
-parseKeyVariety s = OtherKey s
+parseKeyVariety "SHA256" = SHA2Key (HashSize 256) (HasExt False)
+parseKeyVariety "SHA256E" = SHA2Key (HashSize 256) (HasExt True)
+parseKeyVariety "SHA512" = SHA2Key (HashSize 512) (HasExt False)
+parseKeyVariety "SHA512E" = SHA2Key (HashSize 512) (HasExt True)
+parseKeyVariety "SHA224" = SHA2Key (HashSize 224) (HasExt False)
+parseKeyVariety "SHA224E" = SHA2Key (HashSize 224) (HasExt True)
+parseKeyVariety "SHA384" = SHA2Key (HashSize 384) (HasExt False)
+parseKeyVariety "SHA384E" = SHA2Key (HashSize 384) (HasExt True)
+parseKeyVariety "SHA3_512" = SHA3Key (HashSize 512) (HasExt False)
+parseKeyVariety "SHA3_512E" = SHA3Key (HashSize 512) (HasExt True)
+parseKeyVariety "SHA3_384" = SHA3Key (HashSize 384) (HasExt False)
+parseKeyVariety "SHA3_384E" = SHA3Key (HashSize 384) (HasExt True)
+parseKeyVariety "SHA3_256" = SHA3Key (HashSize 256) (HasExt False)
+parseKeyVariety "SHA3_256E" = SHA3Key (HashSize 256) (HasExt True)
+parseKeyVariety "SHA3_224" = SHA3Key (HashSize 224) (HasExt False)
+parseKeyVariety "SHA3_224E" = SHA3Key (HashSize 224) (HasExt True)
+parseKeyVariety "SKEIN512" = SKEINKey (HashSize 512) (HasExt False)
+parseKeyVariety "SKEIN512E" = SKEINKey (HashSize 512) (HasExt True)
+parseKeyVariety "SKEIN256" = SKEINKey (HashSize 256) (HasExt False)
+parseKeyVariety "SKEIN256E" = SKEINKey (HashSize 256) (HasExt True)
+#if MIN_VERSION_cryptonite(0,23,0)
+parseKeyVariety "BLAKE2B160" = Blake2bKey (HashSize 160) (HasExt False)
+parseKeyVariety "BLAKE2B160E" = Blake2bKey (HashSize 160) (HasExt True)
+parseKeyVariety "BLAKE2B224" = Blake2bKey (HashSize 224) (HasExt False)
+parseKeyVariety "BLAKE2B224E" = Blake2bKey (HashSize 224) (HasExt True)
+parseKeyVariety "BLAKE2B256" = Blake2bKey (HashSize 256) (HasExt False)
+parseKeyVariety "BLAKE2B256E" = Blake2bKey (HashSize 256) (HasExt True)
+parseKeyVariety "BLAKE2B384" = Blake2bKey (HashSize 384) (HasExt False)
+parseKeyVariety "BLAKE2B384E" = Blake2bKey (HashSize 384) (HasExt True)
+parseKeyVariety "BLAKE2B512" = Blake2bKey (HashSize 512) (HasExt False)
+parseKeyVariety "BLAKE2B512E" = Blake2bKey (HashSize 512) (HasExt True)
+parseKeyVariety "BLAKE2S160" = Blake2sKey (HashSize 160) (HasExt False)
+parseKeyVariety "BLAKE2S160E" = Blake2sKey (HashSize 160) (HasExt True)
+parseKeyVariety "BLAKE2S224" = Blake2sKey (HashSize 224) (HasExt False)
+parseKeyVariety "BLAKE2S224E" = Blake2sKey (HashSize 224) (HasExt True)
+parseKeyVariety "BLAKE2S256" = Blake2sKey (HashSize 256) (HasExt False)
+parseKeyVariety "BLAKE2S256E" = Blake2sKey (HashSize 256) (HasExt True)
+parseKeyVariety "BLAKE2SP224" = Blake2spKey (HashSize 224) (HasExt False)
+parseKeyVariety "BLAKE2SP224E" = Blake2spKey (HashSize 224) (HasExt True)
+parseKeyVariety "BLAKE2SP256" = Blake2spKey (HashSize 256) (HasExt False)
+parseKeyVariety "BLAKE2SP256E" = Blake2spKey (HashSize 256) (HasExt True)
+#endif
+parseKeyVariety "SHA1" = SHA1Key (HasExt False)
+parseKeyVariety "SHA1E" = SHA1Key (HasExt True)
+parseKeyVariety "MD5" = MD5Key (HasExt False)
+parseKeyVariety "MD5E" = MD5Key (HasExt True)
+parseKeyVariety "WORM" = WORMKey
+parseKeyVariety "URL" = URLKey
+parseKeyVariety s = OtherKey s
diff --git a/Types/Remote.hs b/Types/Remote.hs
index a734be9..78ec416 100644
--- a/Types/Remote.hs
+++ b/Types/Remote.hs
@@ -149,9 +149,16 @@ instance Ord (RemoteA a) where
instance ToUUID (RemoteA a) where
toUUID = uuid
--- Use Verified when the content of a key is verified as part of a
--- transfer, and so a separate verification step is not needed.
-data Verification = UnVerified | Verified
+data Verification
+ = UnVerified
+ -- ^ Content was not verified during transfer, but is probably
+ -- ok, so if verification is disabled, don't verify it
+ | Verified
+ -- ^ Content was verified during transfer, so don't verify it
+ -- again.
+ | MustVerify
+ -- ^ Content likely to have been altered during transfer,
+ -- verify even if verification is normally disabled
unVerified :: Monad m => m Bool -> m (Bool, Verification)
unVerified a = do
diff --git a/Utility/Hash.hs b/Utility/Hash.hs
index 70f826b..3bbb062 100644
--- a/Utility/Hash.hs
+++ b/Utility/Hash.hs
@@ -1,5 +1,7 @@
{- Convenience wrapper around cryptonite's hashing. -}
+{-# LANGUAGE CPP #-}
+
module Utility.Hash (
sha1,
sha2_224,
@@ -12,6 +14,18 @@ module Utility.Hash (
sha3_512,
skein256,
skein512,
+#if MIN_VERSION_cryptonite(0,23,0)
+ blake2s_160,
+ blake2s_224,
+ blake2s_256,
+ blake2sp_224,
+ blake2sp_256,
+ blake2b_160,
+ blake2b_224,
+ blake2b_256,
+ blake2b_384,
+ blake2b_512,
+#endif
md5,
prop_hashes_stable,
Mac(..),
@@ -59,6 +73,42 @@ skein256 = hashlazy
skein512 :: L.ByteString -> Digest Skein512_512
skein512 = hashlazy
+#if MIN_VERSION_cryptonite(0,23,0)
+blake2s_160 :: L.ByteString -> Digest Blake2s_160
+blake2s_160 = hashlazy
+
+blake2s_224 :: L.ByteString -> Digest Blake2s_224
+blake2s_224 = hashlazy
+
+blake2s_256 :: L.ByteString -> Digest Blake2s_256
+blake2s_256 = hashlazy
+
+blake2sp_224 :: L.ByteString -> Digest Blake2sp_224
+blake2sp_224 = hashlazy
+
+blake2sp_256 :: L.ByteString -> Digest Blake2sp_256
+blake2sp_256 = hashlazy
+
+blake2b_160 :: L.ByteString -> Digest Blake2b_160
+blake2b_160 = hashlazy
+
+blake2b_224 :: L.ByteString -> Digest Blake2b_224
+blake2b_224 = hashlazy
+
+blake2b_256 :: L.ByteString -> Digest Blake2b_256
+blake2b_256 = hashlazy
+
+blake2b_384 :: L.ByteString -> Digest Blake2b_384
+blake2b_384 = hashlazy
+
+blake2b_512 :: L.ByteString -> Digest Blake2b_512
+blake2b_512 = hashlazy
+#endif
+
+-- Disabled because it's buggy with some versions of cryptonite.
+--blake2bp_512 :: L.ByteString -> Digest Blake2bp_512
+--blake2bp_512 = hashlazy
+
md5 :: L.ByteString -> Digest MD5
md5 = hashlazy
@@ -76,6 +126,19 @@ prop_hashes_stable = all (\(hasher, result) -> hasher foo == result)
, (show . sha3_256, "76d3bc41c9f588f7fcd0d5bf4718f8f84b1c41b20882703100b9eb9413807c01")
, (show . sha3_384, "665551928d13b7d84ee02734502b018d896a0fb87eed5adb4c87ba91bbd6489410e11b0fbcc06ed7d0ebad559e5d3bb5")
, (show . sha3_512, "4bca2b137edc580fe50a88983ef860ebaca36c857b1f492839d6d7392452a63c82cbebc68e3b70a2a1480b4bb5d437a7cba6ecf9d89f9ff3ccd14cd6146ea7e7")
+#if MIN_VERSION_cryptonite(0,23,0)
+ , (show . blake2s_160, "52fb63154f958a5c56864597273ea759e52c6f00")
+ , (show . blake2s_224, "9466668503ac415d87b8e1dfd7f348ab273ac1d5e4f774fced5fdb55")
+ , (show . blake2s_256, "08d6cad88075de8f192db097573d0e829411cd91eb6ec65e8fc16c017edfdb74")
+ , (show . blake2sp_224, "8492d356fbac99f046f55e114301f7596649cb590e5b083d1a19dcdb")
+ , (show . blake2sp_256, "050dc5786037ea72cb9ed9d0324afcab03c97ec02e8c47368fc5dfb4cf49d8c9")
+ , (show . blake2b_160, "983ceba2afea8694cc933336b27b907f90c53a88")
+ , (show . blake2b_224, "853986b3fe231d795261b4fb530e1a9188db41e460ec4ca59aafef78")
+ , (show . blake2b_256, "b8fe9f7f6255a6fa08f668ab632a8d081ad87983c77cd274e48ce450f0b349fd")
+ , (show . blake2b_384, "e629ee880953d32c8877e479e3b4cb0a4c9d5805e2b34c675b5a5863c4ad7d64bb2a9b8257fac9d82d289b3d39eb9cc2")
+ , (show . blake2b_512, "ca002330e69d3e6b84a46a56a6533fd79d51d97a3bb7cad6c2ff43b354185d6dc1e723fb3db4ae0737e120378424c714bb982d9dc5bbd7a0ab318240ddd18f8d")
+ --, (show . blake2bp_512, "")
+#endif
, (show . md5, "acbd18db4cc2f85cedef654fccc4a4d8")
]
where
diff --git a/Utility/Metered.hs b/Utility/Metered.hs
index a5dda54..2eb665d 100644
--- a/Utility/Metered.hs
+++ b/Utility/Metered.hs
@@ -1,6 +1,6 @@
{- Metered IO and actions
-
- - Copyright 2012-2016 Joey Hess <id@joeyh.name>
+ - Copyright 2012-2018 Joey Hess <id@joeyh.name>
-
- License: BSD-2-clause
-}
@@ -288,14 +288,14 @@ outputFilter cmd params environ outfilter errfilter = catchBoolIO $ do
-- | Limit a meter to only update once per unit of time.
--
-- It's nice to display the final update to 100%, even if it comes soon
--- after a previous update. To make that happen, a total size has to be
--- provided.
-rateLimitMeterUpdate :: NominalDiffTime -> Maybe Integer -> MeterUpdate -> IO MeterUpdate
-rateLimitMeterUpdate delta totalsize meterupdate = do
+-- after a previous update. To make that happen, the Meter has to know
+-- its total size.
+rateLimitMeterUpdate :: NominalDiffTime -> Meter -> MeterUpdate -> IO MeterUpdate
+rateLimitMeterUpdate delta (Meter totalsizev _ _ _) meterupdate = do
lastupdate <- newMVar (toEnum 0 :: POSIXTime)
return $ mu lastupdate
where
- mu lastupdate n@(BytesProcessed i) = case totalsize of
+ mu lastupdate n@(BytesProcessed i) = readMVar totalsizev >>= \case
Just t | i >= t -> meterupdate n
_ -> do
now <- getPOSIXTime
@@ -306,35 +306,38 @@ rateLimitMeterUpdate delta totalsize meterupdate = do
meterupdate n
else putMVar lastupdate prev
-data Meter = Meter (Maybe Integer) (MVar MeterState) (MVar String) RenderMeter DisplayMeter
+data Meter = Meter (MVar (Maybe Integer)) (MVar MeterState) (MVar String) DisplayMeter
type MeterState = (BytesProcessed, POSIXTime)
-type DisplayMeter = MVar String -> String -> IO ()
+type DisplayMeter = MVar String -> Maybe Integer -> (BytesProcessed, POSIXTime) -> (BytesProcessed, POSIXTime) -> IO ()
type RenderMeter = Maybe Integer -> (BytesProcessed, POSIXTime) -> (BytesProcessed, POSIXTime) -> String
-- | Make a meter. Pass the total size, if it's known.
-mkMeter :: Maybe Integer -> RenderMeter -> DisplayMeter -> IO Meter
-mkMeter totalsize rendermeter displaymeter = Meter
- <$> pure totalsize
+mkMeter :: Maybe Integer -> DisplayMeter -> IO Meter
+mkMeter totalsize displaymeter = Meter
+ <$> newMVar totalsize
<*> ((\t -> newMVar (zeroBytesProcessed, t)) =<< getPOSIXTime)
<*> newMVar ""
- <*> pure rendermeter
<*> pure displaymeter
+setMeterTotalSize :: Meter -> Integer -> IO ()
+setMeterTotalSize (Meter totalsizev _ _ _) = void . swapMVar totalsizev . Just
+
-- | Updates the meter, displaying it if necessary.
updateMeter :: Meter -> BytesProcessed -> IO ()
-updateMeter (Meter totalsize sv bv rendermeter displaymeter) new = do
+updateMeter (Meter totalsizev sv bv displaymeter) new = do
now <- getPOSIXTime
(old, before) <- swapMVar sv (new, now)
- when (old /= new) $
- displaymeter bv $
- rendermeter totalsize (old, before) (new, now)
+ when (old /= new) $ do
+ totalsize <- readMVar totalsizev
+ displaymeter bv totalsize (old, before) (new, now)
-- | Display meter to a Handle.
-displayMeterHandle :: Handle -> DisplayMeter
-displayMeterHandle h v s = do
+displayMeterHandle :: Handle -> RenderMeter -> DisplayMeter
+displayMeterHandle h rendermeter v msize old new = do
+ let s = rendermeter msize old new
olds <- swapMVar v s
-- Avoid writing when the rendered meter has not changed.
when (olds /= s) $ do
@@ -344,29 +347,32 @@ displayMeterHandle h v s = do
-- | Clear meter displayed by displayMeterHandle.
clearMeterHandle :: Meter -> Handle -> IO ()
-clearMeterHandle (Meter _ _ v _ _) h = do
+clearMeterHandle (Meter _ _ v _) h = do
olds <- readMVar v
hPutStr h $ '\r' : replicate (length olds) ' ' ++ "\r"
hFlush h
-- | Display meter in the form:
--- 10% 300 KiB/s 16m40s
+-- 10% 1.3MiB 300 KiB/s 16m40s
-- or when total size is not known:
--- 1.3 MiB 300 KiB/s
+-- 1.3 MiB 300 KiB/s
bandwidthMeter :: RenderMeter
bandwidthMeter mtotalsize (BytesProcessed old, before) (BytesProcessed new, now) =
unwords $ catMaybes
- [ Just percentoramount
- -- Pad enough for max width: "xxxx.xx KiB xxxx KiB/s"
- , Just $ replicate (23 - length percentoramount - length rate) ' '
+ [ Just percentamount
+ -- Pad enough for max width: "100% xxxx.xx KiB xxxx KiB/s"
+ , Just $ replicate (29 - length percentamount - length rate) ' '
, Just rate
, estimatedcompletion
]
where
- percentoramount = case mtotalsize of
- Just totalsize -> showPercentage 0 $
- percentage totalsize (min new totalsize)
- Nothing -> roughSize' memoryUnits True 2 new
+ amount = roughSize' memoryUnits True 2 new
+ percentamount = case mtotalsize of
+ Just totalsize ->
+ let p = showPercentage 0 $
+ percentage totalsize (min new totalsize)
+ in p ++ replicate (6 - length p) ' ' ++ amount
+ Nothing -> amount
rate = roughSize' memoryUnits True 0 bytespersecond ++ "/s"
bytespersecond
| duration == 0 = fromIntegral transferred
diff --git a/Utility/Process.hs b/Utility/Process.hs
index ff454f7..1807a13 100644
--- a/Utility/Process.hs
+++ b/Utility/Process.hs
@@ -27,6 +27,7 @@ module Utility.Process (
withHandle,
withIOHandles,
withOEHandles,
+ withNullHandle,
withQuietOutput,
feedWithQuietOutput,
createProcess,
@@ -213,13 +214,16 @@ withOEHandles creator p a = creator p' $ a . oeHandles
, std_err = CreatePipe
}
+withNullHandle :: (Handle -> IO a) -> IO a
+withNullHandle = withFile devNull WriteMode
+
-- | Forces the CreateProcessRunner to run quietly;
-- both stdout and stderr are discarded.
withQuietOutput
:: CreateProcessRunner
-> CreateProcess
-> IO ()
-withQuietOutput creator p = withFile devNull WriteMode $ \nullh -> do
+withQuietOutput creator p = withNullHandle $ \nullh -> do
let p' = p
{ std_out = UseHandle nullh
, std_err = UseHandle nullh
diff --git a/Utility/Process/Transcript.hs b/Utility/Process/Transcript.hs
index 0dbe428..68fb222 100644
--- a/Utility/Process/Transcript.hs
+++ b/Utility/Process/Transcript.hs
@@ -1,6 +1,6 @@
{- Process transcript
-
- - Copyright 2012-2015 Joey Hess <id@joeyh.name>
+ - Copyright 2012-2018 Joey Hess <id@joeyh.name>
-
- License: BSD-2-clause
-}
@@ -11,10 +11,11 @@
module Utility.Process.Transcript where
import Utility.Process
+import Utility.Misc
import System.IO
-import Control.Concurrent
-import qualified Control.Exception as E
+import System.Exit
+import Control.Concurrent.Async
import Control.Monad
#ifndef mingw32_HOST_OS
import qualified System.Posix.IO
@@ -24,18 +25,25 @@ import Control.Applicative
import Data.Maybe
import Prelude
--- | Runs a process, optionally feeding it some input, and
--- returns a transcript combining its stdout and stderr, and
--- whether it succeeded or failed.
+-- | Runs a process and returns a transcript combining its stdout and
+-- stderr, and whether it succeeded or failed.
processTranscript :: String -> [String] -> (Maybe String) -> IO (String, Bool)
processTranscript cmd opts = processTranscript' (proc cmd opts)
+-- | Also feeds the process some input.
processTranscript' :: CreateProcess -> Maybe String -> IO (String, Bool)
processTranscript' cp input = do
+ (t, c) <- processTranscript'' cp input
+ return (t, c == ExitSuccess)
+
+processTranscript'' :: CreateProcess -> Maybe String -> IO (String, ExitCode)
+processTranscript'' cp input = do
#ifndef mingw32_HOST_OS
{- This implementation interleves stdout and stderr in exactly the order
- the process writes them. -}
(readf, writef) <- System.Posix.IO.createPipe
+ System.Posix.IO.setFdOption readf System.Posix.IO.CloseOnExec True
+ System.Posix.IO.setFdOption writef System.Posix.IO.CloseOnExec True
readh <- System.Posix.IO.fdToHandle readf
writeh <- System.Posix.IO.fdToHandle writef
p@(_, _, _, pid) <- createProcess $ cp
@@ -45,12 +53,9 @@ processTranscript' cp input = do
}
hClose writeh
- get <- mkreader readh
+ get <- asyncreader readh
writeinput input p
- transcript <- get
-
- ok <- checkSuccessProcess pid
- return (transcript, ok)
+ transcript <- wait get
#else
{- This implementation for Windows puts stderr after stdout. -}
p@(_, _, _, pid) <- createProcess $ cp
@@ -59,24 +64,15 @@ processTranscript' cp input = do
, std_err = CreatePipe
}
- getout <- mkreader (stdoutHandle p)
- geterr <- mkreader (stderrHandle p)
+ getout <- asyncreader (stdoutHandle p)
+ geterr <- asyncreader (stderrHandle p)
writeinput input p
- transcript <- (++) <$> getout <*> geterr
-
- ok <- checkSuccessProcess pid
- return (transcript, ok)
+ transcript <- (++) <$> wait getout <*> wait geterr
#endif
+ code <- waitForProcess pid
+ return (transcript, code)
where
- mkreader h = do
- s <- hGetContents h
- v <- newEmptyMVar
- void $ forkIO $ do
- void $ E.evaluate (length s)
- putMVar v ()
- return $ do
- takeMVar v
- return s
+ asyncreader = async . hGetContentsStrict
writeinput (Just s) p = do
let inh = stdinHandle p
diff --git a/doc/git-annex-export.mdwn b/doc/git-annex-export.mdwn
index a8f9f5c..1d7170c 100644
--- a/doc/git-annex-export.mdwn
+++ b/doc/git-annex-export.mdwn
@@ -29,7 +29,7 @@ Repeated exports are done efficiently, by diffing the old and new tree,
and transferring only the changed files, and renaming files as necessary.
Exports can be interrupted and resumed. However, partially uploaded files
-will be re-started from the beginning.
+will be re-started from the beginning in most cases.
Once content has been exported to a remote, commands like `git annex get`
can download content from there the same as from other remotes. However,
diff --git a/doc/git-annex-shell.mdwn b/doc/git-annex-shell.mdwn
index 167f540..fc536e4 100644
--- a/doc/git-annex-shell.mdwn
+++ b/doc/git-annex-shell.mdwn
@@ -90,6 +90,15 @@ first "/~/" or "/~user/" is expanded to the specified home directory.
Sets up a repository as a gcrypt repository.
+* p2pstdio directory uuid
+
+ This causes git-annex-shell to communicate using the git-annex p2p
+ protocol over stdio. When supported by git-annex-shell, this allows
+ multiple actions to be run over a single connection, improving speed.
+
+ The uuid is the one belonging to the repository that will be
+ communicating with git-annex-shell.
+
# OPTIONS
Most options are the same as in git-annex. The ones specific
diff --git a/doc/git-annex.mdwn b/doc/git-annex.mdwn
index db8cfca..93ecb15 100644
--- a/doc/git-annex.mdwn
+++ b/doc/git-annex.mdwn
@@ -1226,6 +1226,10 @@ Here are all the supported configuration settings.
from remotes. If you trust a remote and don't want the overhead
of these checksums, you can set this to `false`.
+ Note that even when this is set to `false`, git-annex does verification
+ in some edge cases, where it's likely the case than an
+ object was downloaded incorrectly.
+
* `remote.<name>.annex-export-tracking`
When set to a branch name or other treeish, this makes what's exported
@@ -1244,7 +1248,7 @@ Here are all the supported configuration settings.
git-annex caches UUIDs of remote repositories here.
-- `remote.<name>.annex-checkuuid`
+* `remote.<name>.annex-checkuuid`
This only affects remotes that have their url pointing to a directory on
the same system. git-annex normally checks the uuid of such
diff --git a/git-annex.cabal b/git-annex.cabal
index 2b17f67..9e273d9 100644
--- a/git-annex.cabal
+++ b/git-annex.cabal
@@ -1,5 +1,5 @@
Name: git-annex
-Version: 6.20180227
+Version: 6.20180316
Cabal-Version: >= 1.8
License: GPL-3
Maintainer: Joey Hess <id@joeyh.name>
@@ -377,7 +377,15 @@ Executable git-annex
-- Fully optimize for production.
if flag(Production)
- GHC-Options: -O2
+ -- Lower memory systems can run out of memory with -O2, so
+ -- optimise slightly less.
+ -- This needs -O1 before the -optlo, due to this bug:
+ -- https://ghc.haskell.org/trac/ghc/ticket/14821
+ -- But unfortunately, hackage currently refuses to accept -O1
+ if arch(arm)
+ GHC-Options: -optlo-O2
+ else
+ GHC-Options: -O2
-- Avoid linking with unused dynamic libaries.
-- (Only tested on Linux).
@@ -749,6 +757,7 @@ Executable git-annex
Command.NotifyChanges
Command.NumCopies
Command.P2P
+ Command.P2PStdIO
Command.PostReceive
Command.PreCommit
Command.Proxy
@@ -925,6 +934,7 @@ Executable git-annex
Remote.Helper.Hooks
Remote.Helper.Http
Remote.Helper.Messages
+ Remote.Helper.P2P
Remote.Helper.ReadOnly
Remote.Helper.Special
Remote.Helper.Ssh